2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/metadata/gc-internal.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 static gint lmf_tls_offset = -1;
91 static gint lmf_addr_tls_offset = -1;
93 /* This mutex protects architecture specific caches */
94 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
95 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
96 static CRITICAL_SECTION mini_arch_mutex;
98 static gboolean v5_supported = FALSE;
99 static gboolean v6_supported = FALSE;
100 static gboolean v7_supported = FALSE;
101 static gboolean v7s_supported = FALSE;
102 static gboolean thumb_supported = FALSE;
103 static gboolean thumb2_supported = FALSE;
105 * Whenever to use the ARM EABI
107 static gboolean eabi_supported = FALSE;
110 * Whenever to use the iphone ABI extensions:
111 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
112 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
113 * This is required for debugging/profiling tools to work, but it has some overhead so it should
114 * only be turned on in debug builds.
116 static gboolean iphone_abi = FALSE;
119 * The FPU we are generating code for. This is NOT runtime configurable right now,
120 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
122 static MonoArmFPU arm_fpu;
124 #if defined(ARM_FPU_VFP_HARD)
125 static int vfp_scratch1 = ARM_VFP_F28;
126 static int vfp_scratch2 = ARM_VFP_F30;
128 static int vfp_scratch1 = ARM_VFP_D0;
129 static int vfp_scratch2 = ARM_VFP_D1;
134 static volatile int ss_trigger_var = 0;
136 static gpointer single_step_func_wrapper;
137 static gpointer breakpoint_func_wrapper;
140 * The code generated for sequence points reads from this location, which is
141 * made read-only when single stepping is enabled.
143 static gpointer ss_trigger_page;
145 /* Enabled breakpoints read from this trigger page */
146 static gpointer bp_trigger_page;
148 /* Structure used by the sequence points in AOTed code */
150 gpointer ss_trigger_page;
151 gpointer bp_trigger_page;
152 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
157 * floating point support: on ARM it is a mess, there are at least 3
158 * different setups, each of which binary incompat with the other.
159 * 1) FPA: old and ugly, but unfortunately what current distros use
160 * the double binary format has the two words swapped. 8 double registers.
161 * Implemented usually by kernel emulation.
162 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
163 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
164 * 3) VFP: the new and actually sensible and useful FP support. Implemented
165 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
167 * We do not care about FPA. We will support soft float and VFP.
169 int mono_exc_esp_offset = 0;
171 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
172 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
173 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
175 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
176 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
177 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
179 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
180 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
181 //#define DEBUG_IMT 0
183 /* A variant of ARM_LDR_IMM which can handle large offsets */
184 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
185 if (arm_is_imm12 ((offset))) { \
186 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
188 g_assert ((scratch_reg) != (basereg)); \
189 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
190 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
194 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
195 if (arm_is_imm12 ((offset))) { \
196 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
198 g_assert ((scratch_reg) != (basereg)); \
199 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
200 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
204 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
207 mono_arch_regname (int reg)
209 static const char * rnames[] = {
210 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
211 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
212 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
215 if (reg >= 0 && reg < 16)
221 mono_arch_fregname (int reg)
223 static const char * rnames[] = {
224 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
225 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
226 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
227 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
228 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
229 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
232 if (reg >= 0 && reg < 32)
240 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
242 int imm8, rot_amount;
243 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
244 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
247 g_assert (dreg != sreg);
248 code = mono_arm_emit_load_imm (code, dreg, imm);
249 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
254 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
256 /* we can use r0-r3, since this is called only for incoming args on the stack */
257 if (size > sizeof (gpointer) * 4) {
259 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
260 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
261 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
262 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
263 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
264 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
265 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
266 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
267 ARM_B_COND (code, ARMCOND_NE, 0);
268 arm_patch (code - 4, start_loop);
271 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
272 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
274 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
275 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
281 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
282 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
283 doffset = soffset = 0;
285 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
286 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
292 g_assert (size == 0);
297 emit_call_reg (guint8 *code, int reg)
300 ARM_BLX_REG (code, reg);
302 #ifdef USE_JUMP_TABLES
303 g_assert_not_reached ();
305 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
309 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
315 emit_call_seq (MonoCompile *cfg, guint8 *code)
317 #ifdef USE_JUMP_TABLES
318 code = mono_arm_patchable_bl (code, ARMCOND_AL);
320 if (cfg->method->dynamic) {
321 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
323 *(gpointer*)code = NULL;
325 code = emit_call_reg (code, ARMREG_IP);
334 mono_arm_patchable_b (guint8 *code, int cond)
336 #ifdef USE_JUMP_TABLES
339 jte = mono_jumptable_add_entry ();
340 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
341 ARM_BX_COND (code, cond, ARMREG_IP);
343 ARM_B_COND (code, cond, 0);
349 mono_arm_patchable_bl (guint8 *code, int cond)
351 #ifdef USE_JUMP_TABLES
354 jte = mono_jumptable_add_entry ();
355 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
356 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
358 ARM_BL_COND (code, cond, 0);
363 #ifdef USE_JUMP_TABLES
365 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
367 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
368 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
373 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
375 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
376 ARM_LDR_IMM (code, reg, reg, 0);
382 emit_aotconst (MonoCompile *cfg, guint8 *start, guint8 *code, int dreg, int tramp_type, gconstpointer target)
384 /* Load the GOT offset */
385 mono_add_patch_info (cfg, code - start, tramp_type, target);
386 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
388 *(gpointer*)code = NULL;
390 /* Load the value from the GOT */
391 ARM_LDR_REG_REG (code, dreg, ARMREG_PC, dreg);
397 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
399 switch (ins->opcode) {
402 case OP_FCALL_MEMBASE:
404 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
406 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
408 ARM_FMSR (code, ins->dreg, ARMREG_R0);
409 ARM_CVTS (code, ins->dreg, ins->dreg);
413 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
415 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
428 * Emit code to push an LMF structure on the LMF stack.
429 * On arm, this is intermixed with the initialization of other fields of the structure.
432 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
434 gboolean get_lmf_fast = FALSE;
437 #ifdef HAVE_AEABI_READ_TP
438 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
440 if (lmf_addr_tls_offset != -1) {
443 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
444 (gpointer)"__aeabi_read_tp");
445 code = emit_call_seq (cfg, code);
447 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
453 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
456 /* Inline mono_get_lmf_addr () */
457 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
459 /* Load mono_jit_tls_id */
461 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
462 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
464 *(gpointer*)code = NULL;
466 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
467 /* call pthread_getspecific () */
468 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
469 (gpointer)"pthread_getspecific");
470 code = emit_call_seq (cfg, code);
471 /* lmf_addr = &jit_tls->lmf */
472 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
473 g_assert (arm_is_imm8 (lmf_offset));
474 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
481 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
482 (gpointer)"mono_get_lmf_addr");
483 code = emit_call_seq (cfg, code);
485 /* we build the MonoLMF structure on the stack - see mini-arm.h */
486 /* lmf_offset is the offset from the previous stack pointer,
487 * alloc_size is the total stack space allocated, so the offset
488 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
489 * The pointer to the struct is put in r1 (new_lmf).
490 * ip is used as scratch
491 * The callee-saved registers are already in the MonoLMF structure
493 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
494 /* r0 is the result from mono_get_lmf_addr () */
495 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
496 /* new_lmf->previous_lmf = *lmf_addr */
497 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
498 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
499 /* *(lmf_addr) = r1 */
500 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
501 /* Skip method (only needed for trampoline LMF frames) */
502 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
503 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
504 /* save the current IP */
505 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
506 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
508 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
509 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
520 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
524 for (list = inst->float_args; list; list = list->next) {
525 FloatArgData *fad = list->data;
526 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
530 if (*offset + *max_len > cfg->code_size) {
531 cfg->code_size += *max_len;
532 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
534 code = cfg->native_code + *offset;
537 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
539 *offset = code - cfg->native_code;
548 * Emit code to pop an LMF structure from the LMF stack.
551 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
555 if (lmf_offset < 32) {
556 basereg = cfg->frame_reg;
561 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
564 /* ip = previous_lmf */
565 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
567 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
568 /* *(lmf_addr) = previous_lmf */
569 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
574 #endif /* #ifndef DISABLE_JIT */
577 * mono_arch_get_argument_info:
578 * @csig: a method signature
579 * @param_count: the number of parameters to consider
580 * @arg_info: an array to store the result infos
582 * Gathers information on parameters such as size, alignment and
583 * padding. arg_info should be large enought to hold param_count + 1 entries.
585 * Returns the size of the activation frame.
588 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
590 int k, frame_size = 0;
591 guint32 size, align, pad;
595 t = mini_type_get_underlying_type (gsctx, csig->ret);
596 if (MONO_TYPE_ISSTRUCT (t)) {
597 frame_size += sizeof (gpointer);
601 arg_info [0].offset = offset;
604 frame_size += sizeof (gpointer);
608 arg_info [0].size = frame_size;
610 for (k = 0; k < param_count; k++) {
611 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
613 /* ignore alignment for now */
616 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
617 arg_info [k].pad = pad;
619 arg_info [k + 1].pad = 0;
620 arg_info [k + 1].size = size;
622 arg_info [k + 1].offset = offset;
626 align = MONO_ARCH_FRAME_ALIGNMENT;
627 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
628 arg_info [k].pad = pad;
633 #define MAX_ARCH_DELEGATE_PARAMS 3
636 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
638 guint8 *code, *start;
641 start = code = mono_global_codeman_reserve (12);
643 /* Replace the this argument with the target */
644 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
645 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
646 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
648 g_assert ((code - start) <= 12);
650 mono_arch_flush_icache (start, 12);
654 size = 8 + param_count * 4;
655 start = code = mono_global_codeman_reserve (size);
657 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
658 /* slide down the arguments */
659 for (i = 0; i < param_count; ++i) {
660 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
662 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
664 g_assert ((code - start) <= size);
666 mono_arch_flush_icache (start, size);
670 *code_size = code - start;
676 * mono_arch_get_delegate_invoke_impls:
678 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
682 mono_arch_get_delegate_invoke_impls (void)
690 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
691 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
693 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
694 code = get_delegate_invoke_impl (FALSE, i, &code_len);
695 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
696 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
704 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
706 guint8 *code, *start;
708 /* FIXME: Support more cases */
709 if (MONO_TYPE_ISSTRUCT (sig->ret))
713 static guint8* cached = NULL;
714 mono_mini_arch_lock ();
716 mono_mini_arch_unlock ();
721 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
723 start = get_delegate_invoke_impl (TRUE, 0, NULL);
725 mono_mini_arch_unlock ();
728 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
731 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
733 for (i = 0; i < sig->param_count; ++i)
734 if (!mono_is_regsize_var (sig->params [i]))
737 mono_mini_arch_lock ();
738 code = cache [sig->param_count];
740 mono_mini_arch_unlock ();
745 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
746 start = mono_aot_get_trampoline (name);
749 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
751 cache [sig->param_count] = start;
752 mono_mini_arch_unlock ();
760 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
762 return (gpointer)regs [ARMREG_R0];
766 * Initialize the cpu to execute managed code.
769 mono_arch_cpu_init (void)
771 #if defined(__APPLE__)
774 i8_align = __alignof__ (gint64);
779 create_function_wrapper (gpointer function)
781 guint8 *start, *code;
783 start = code = mono_global_codeman_reserve (96);
786 * Construct the MonoContext structure on the stack.
789 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
791 /* save ip, lr and pc into their correspodings ctx.regs slots. */
792 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
793 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
794 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
796 /* save r0..r10 and fp */
797 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
798 ARM_STM (code, ARMREG_IP, 0x0fff);
800 /* now we can update fp. */
801 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
803 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
804 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
805 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
806 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
808 /* make ctx.eip hold the address of the call. */
809 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
810 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
812 /* r0 now points to the MonoContext */
813 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
816 #ifdef USE_JUMP_TABLES
818 gpointer *jte = mono_jumptable_add_entry ();
819 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
823 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
825 *(gpointer*)code = function;
828 ARM_BLX_REG (code, ARMREG_IP);
830 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
831 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
832 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
833 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
835 /* make ip point to the regs array, then restore everything, including pc. */
836 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
837 ARM_LDM (code, ARMREG_IP, 0xffff);
839 mono_arch_flush_icache (start, code - start);
845 * Initialize architecture specific code.
848 mono_arch_init (void)
850 const char *cpu_arch;
852 InitializeCriticalSection (&mini_arch_mutex);
853 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
854 if (mini_get_debug_options ()->soft_breakpoints) {
855 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
856 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
861 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
862 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
863 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
866 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
867 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
868 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
869 #if defined(ENABLE_GSHAREDVT)
870 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
873 #if defined(__ARM_EABI__)
874 eabi_supported = TRUE;
877 #if defined(ARM_FPU_VFP_HARD)
878 arm_fpu = MONO_ARM_FPU_VFP_HARD;
880 arm_fpu = MONO_ARM_FPU_VFP;
882 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
883 /* If we're compiling with a soft float fallback and it
884 turns out that no VFP unit is available, we need to
885 switch to soft float. We don't do this for iOS, since
886 iOS devices always have a VFP unit. */
887 if (!mono_hwcap_arm_has_vfp)
888 arm_fpu = MONO_ARM_FPU_NONE;
892 v5_supported = mono_hwcap_arm_is_v5;
893 v6_supported = mono_hwcap_arm_is_v6;
894 v7_supported = mono_hwcap_arm_is_v7;
895 v7s_supported = mono_hwcap_arm_is_v7s;
897 #if defined(__APPLE__)
898 /* iOS is special-cased here because we don't yet
899 have a way to properly detect CPU features on it. */
900 thumb_supported = TRUE;
903 thumb_supported = mono_hwcap_arm_has_thumb;
904 thumb2_supported = mono_hwcap_arm_has_thumb2;
907 /* Format: armv(5|6|7[s])[-thumb[2]] */
908 cpu_arch = g_getenv ("MONO_CPU_ARCH");
910 /* Do this here so it overrides any detection. */
912 if (strncmp (cpu_arch, "armv", 4) == 0) {
913 v5_supported = cpu_arch [4] >= '5';
914 v6_supported = cpu_arch [4] >= '6';
915 v7_supported = cpu_arch [4] >= '7';
916 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
919 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
920 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
925 * Cleanup architecture specific code.
928 mono_arch_cleanup (void)
933 * This function returns the optimizations supported on this cpu.
936 mono_arch_cpu_optimizations (guint32 *exclude_mask)
938 /* no arm-specific optimizations yet */
944 * This function test for all SIMD functions supported.
946 * Returns a bitmask corresponding to all supported versions.
950 mono_arch_cpu_enumerate_simd_versions (void)
952 /* SIMD is currently unimplemented */
960 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
976 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
978 mono_arch_is_soft_float (void)
980 return arm_fpu == MONO_ARM_FPU_NONE;
985 mono_arm_is_hard_float (void)
987 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
991 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
994 t = mini_type_get_underlying_type (gsctx, t);
1001 case MONO_TYPE_FNPTR:
1003 case MONO_TYPE_OBJECT:
1004 case MONO_TYPE_STRING:
1005 case MONO_TYPE_CLASS:
1006 case MONO_TYPE_SZARRAY:
1007 case MONO_TYPE_ARRAY:
1009 case MONO_TYPE_GENERICINST:
1010 if (!mono_type_generic_inst_is_valuetype (t))
1013 case MONO_TYPE_VALUETYPE:
1020 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1025 for (i = 0; i < cfg->num_varinfo; i++) {
1026 MonoInst *ins = cfg->varinfo [i];
1027 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1030 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1033 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1036 /* we can only allocate 32 bit values */
1037 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1038 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1039 g_assert (i == vmv->idx);
1040 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1047 #define USE_EXTRA_TEMPS 0
1050 mono_arch_get_global_int_regs (MonoCompile *cfg)
1054 mono_arch_compute_omit_fp (cfg);
1057 * FIXME: Interface calls might go through a static rgctx trampoline which
1058 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1061 if (cfg->flags & MONO_CFG_HAS_CALLS)
1062 cfg->uses_rgctx_reg = TRUE;
1064 if (cfg->arch.omit_fp)
1065 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1066 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1067 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1068 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1070 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1071 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1073 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1074 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1075 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1076 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1077 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1078 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1084 * mono_arch_regalloc_cost:
1086 * Return the cost, in number of memory references, of the action of
1087 * allocating the variable VMV into a register during global register
1091 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1097 #endif /* #ifndef DISABLE_JIT */
1099 #ifndef __GNUC_PREREQ
1100 #define __GNUC_PREREQ(maj, min) (0)
1104 mono_arch_flush_icache (guint8 *code, gint size)
1106 #if defined(__native_client__)
1107 // For Native Client we don't have to flush i-cache here,
1108 // as it's being done by dyncode interface.
1111 #ifdef MONO_CROSS_COMPILE
1113 sys_icache_invalidate (code, size);
1114 #elif __GNUC_PREREQ(4, 1)
1115 __clear_cache (code, code + size);
1116 #elif defined(PLATFORM_ANDROID)
1117 const int syscall = 0xf0002;
1125 : "r" (code), "r" (code + size), "r" (syscall)
1126 : "r0", "r1", "r7", "r2"
1129 __asm __volatile ("mov r0, %0\n"
1132 "swi 0x9f0002 @ sys_cacheflush"
1134 : "r" (code), "r" (code + size), "r" (0)
1135 : "r0", "r1", "r3" );
1137 #endif /* !__native_client__ */
1148 RegTypeStructByAddr,
1149 /* gsharedvt argument passed by addr in greg */
1150 RegTypeGSharedVtInReg,
1151 /* gsharedvt argument passed by addr on stack */
1152 RegTypeGSharedVtOnStack,
1157 guint16 vtsize; /* in param area */
1161 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1166 guint32 stack_usage;
1167 gboolean vtype_retaddr;
1168 /* The index of the vret arg in the argument list */
1178 /*#define __alignof__(a) sizeof(a)*/
1179 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1182 #define PARAM_REGS 4
1185 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1188 if (*gr > ARMREG_R3) {
1190 ainfo->offset = *stack_size;
1191 ainfo->reg = ARMREG_SP; /* in the caller */
1192 ainfo->storage = RegTypeBase;
1195 ainfo->storage = RegTypeGeneral;
1202 split = i8_align == 4;
1207 if (*gr == ARMREG_R3 && split) {
1208 /* first word in r3 and the second on the stack */
1209 ainfo->offset = *stack_size;
1210 ainfo->reg = ARMREG_SP; /* in the caller */
1211 ainfo->storage = RegTypeBaseGen;
1213 } else if (*gr >= ARMREG_R3) {
1214 if (eabi_supported) {
1215 /* darwin aligns longs to 4 byte only */
1216 if (i8_align == 8) {
1221 ainfo->offset = *stack_size;
1222 ainfo->reg = ARMREG_SP; /* in the caller */
1223 ainfo->storage = RegTypeBase;
1226 if (eabi_supported) {
1227 if (i8_align == 8 && ((*gr) & 1))
1230 ainfo->storage = RegTypeIRegPair;
1239 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1242 * If we're calling a function like this:
1244 * void foo(float a, double b, float c)
1246 * We pass a in s0 and b in d1. That leaves us
1247 * with s1 being unused. The armhf ABI recognizes
1248 * this and requires register assignment to then
1249 * use that for the next single-precision arg,
1250 * i.e. c in this example. So float_spare either
1251 * tells us which reg to use for the next single-
1252 * precision arg, or it's -1, meaning use *fpr.
1254 * Note that even though most of the JIT speaks
1255 * double-precision, fpr represents single-
1256 * precision registers.
1258 * See parts 5.5 and 6.1.2 of the AAPCS for how
1262 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1263 ainfo->storage = RegTypeFP;
1267 * If we're passing a double-precision value
1268 * and *fpr is odd (e.g. it's s1, s3, ...)
1269 * we need to use the next even register. So
1270 * we mark the current *fpr as a spare that
1271 * can be used for the next single-precision
1275 *float_spare = *fpr;
1280 * At this point, we have an even register
1281 * so we assign that and move along.
1285 } else if (*float_spare >= 0) {
1287 * We're passing a single-precision value
1288 * and it looks like a spare single-
1289 * precision register is available. Let's
1293 ainfo->reg = *float_spare;
1297 * If we hit this branch, we're passing a
1298 * single-precision value and we can simply
1299 * use the next available register.
1307 * We've exhausted available floating point
1308 * regs, so pass the rest on the stack.
1316 ainfo->offset = *stack_size;
1317 ainfo->reg = ARMREG_SP;
1318 ainfo->storage = RegTypeBase;
1325 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1327 guint i, gr, fpr, pstart;
1329 int n = sig->hasthis + sig->param_count;
1330 MonoType *simpletype;
1331 guint32 stack_size = 0;
1333 gboolean is_pinvoke = sig->pinvoke;
1337 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1339 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1346 t = mini_type_get_underlying_type (gsctx, sig->ret);
1347 if (MONO_TYPE_ISSTRUCT (t)) {
1350 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1351 cinfo->ret.storage = RegTypeStructByVal;
1353 cinfo->vtype_retaddr = TRUE;
1355 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1356 cinfo->vtype_retaddr = TRUE;
1362 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1363 * the first argument, allowing 'this' to be always passed in the first arg reg.
1364 * Also do this if the first argument is a reference type, since virtual calls
1365 * are sometimes made using calli without sig->hasthis set, like in the delegate
1368 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1370 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1372 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1376 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1377 cinfo->vret_arg_index = 1;
1381 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1385 if (cinfo->vtype_retaddr)
1386 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1389 DEBUG(printf("params: %d\n", sig->param_count));
1390 for (i = pstart; i < sig->param_count; ++i) {
1391 ArgInfo *ainfo = &cinfo->args [n];
1393 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1394 /* Prevent implicit arguments and sig_cookie from
1395 being passed in registers */
1398 /* Emit the signature cookie just before the implicit arguments */
1399 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1401 DEBUG(printf("param %d: ", i));
1402 if (sig->params [i]->byref) {
1403 DEBUG(printf("byref\n"));
1404 add_general (&gr, &stack_size, ainfo, TRUE);
1408 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1409 switch (simpletype->type) {
1410 case MONO_TYPE_BOOLEAN:
1413 cinfo->args [n].size = 1;
1414 add_general (&gr, &stack_size, ainfo, TRUE);
1417 case MONO_TYPE_CHAR:
1420 cinfo->args [n].size = 2;
1421 add_general (&gr, &stack_size, ainfo, TRUE);
1426 cinfo->args [n].size = 4;
1427 add_general (&gr, &stack_size, ainfo, TRUE);
1433 case MONO_TYPE_FNPTR:
1434 case MONO_TYPE_CLASS:
1435 case MONO_TYPE_OBJECT:
1436 case MONO_TYPE_STRING:
1437 case MONO_TYPE_SZARRAY:
1438 case MONO_TYPE_ARRAY:
1439 cinfo->args [n].size = sizeof (gpointer);
1440 add_general (&gr, &stack_size, ainfo, TRUE);
1443 case MONO_TYPE_GENERICINST:
1444 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1445 cinfo->args [n].size = sizeof (gpointer);
1446 add_general (&gr, &stack_size, ainfo, TRUE);
1450 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1451 /* gsharedvt arguments are passed by ref */
1452 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1453 add_general (&gr, &stack_size, ainfo, TRUE);
1454 switch (ainfo->storage) {
1455 case RegTypeGeneral:
1456 ainfo->storage = RegTypeGSharedVtInReg;
1459 ainfo->storage = RegTypeGSharedVtOnStack;
1462 g_assert_not_reached ();
1468 case MONO_TYPE_TYPEDBYREF:
1469 case MONO_TYPE_VALUETYPE: {
1475 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1476 size = sizeof (MonoTypedRef);
1477 align = sizeof (gpointer);
1479 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1481 size = mono_class_native_size (klass, &align);
1483 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1485 DEBUG(printf ("load %d bytes struct\n", size));
1488 align_size += (sizeof (gpointer) - 1);
1489 align_size &= ~(sizeof (gpointer) - 1);
1490 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1491 ainfo->storage = RegTypeStructByVal;
1492 ainfo->struct_size = size;
1493 /* FIXME: align stack_size if needed */
1494 if (eabi_supported) {
1495 if (align >= 8 && (gr & 1))
1498 if (gr > ARMREG_R3) {
1500 ainfo->vtsize = nwords;
1502 int rest = ARMREG_R3 - gr + 1;
1503 int n_in_regs = rest >= nwords? nwords: rest;
1505 ainfo->size = n_in_regs;
1506 ainfo->vtsize = nwords - n_in_regs;
1509 nwords -= n_in_regs;
1511 ainfo->offset = stack_size;
1512 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1513 stack_size += nwords * sizeof (gpointer);
1520 add_general (&gr, &stack_size, ainfo, FALSE);
1527 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1529 add_general (&gr, &stack_size, ainfo, TRUE);
1537 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1539 add_general (&gr, &stack_size, ainfo, FALSE);
1544 case MONO_TYPE_MVAR:
1545 /* gsharedvt arguments are passed by ref */
1546 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1547 add_general (&gr, &stack_size, ainfo, TRUE);
1548 switch (ainfo->storage) {
1549 case RegTypeGeneral:
1550 ainfo->storage = RegTypeGSharedVtInReg;
1553 ainfo->storage = RegTypeGSharedVtOnStack;
1556 g_assert_not_reached ();
1561 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1565 /* Handle the case where there are no implicit arguments */
1566 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1567 /* Prevent implicit arguments and sig_cookie from
1568 being passed in registers */
1571 /* Emit the signature cookie just before the implicit arguments */
1572 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1576 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1577 switch (simpletype->type) {
1578 case MONO_TYPE_BOOLEAN:
1583 case MONO_TYPE_CHAR:
1589 case MONO_TYPE_FNPTR:
1590 case MONO_TYPE_CLASS:
1591 case MONO_TYPE_OBJECT:
1592 case MONO_TYPE_SZARRAY:
1593 case MONO_TYPE_ARRAY:
1594 case MONO_TYPE_STRING:
1595 cinfo->ret.storage = RegTypeGeneral;
1596 cinfo->ret.reg = ARMREG_R0;
1600 cinfo->ret.storage = RegTypeIRegPair;
1601 cinfo->ret.reg = ARMREG_R0;
1605 cinfo->ret.storage = RegTypeFP;
1607 if (IS_HARD_FLOAT) {
1608 cinfo->ret.reg = ARM_VFP_F0;
1610 cinfo->ret.reg = ARMREG_R0;
1614 case MONO_TYPE_GENERICINST:
1615 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1616 cinfo->ret.storage = RegTypeGeneral;
1617 cinfo->ret.reg = ARMREG_R0;
1620 // FIXME: Only for variable types
1621 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1622 cinfo->ret.storage = RegTypeStructByAddr;
1623 g_assert (cinfo->vtype_retaddr);
1627 case MONO_TYPE_VALUETYPE:
1628 case MONO_TYPE_TYPEDBYREF:
1629 if (cinfo->ret.storage != RegTypeStructByVal)
1630 cinfo->ret.storage = RegTypeStructByAddr;
1633 case MONO_TYPE_MVAR:
1634 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1635 cinfo->ret.storage = RegTypeStructByAddr;
1636 g_assert (cinfo->vtype_retaddr);
1638 case MONO_TYPE_VOID:
1641 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1645 /* align stack size to 8 */
1646 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1647 stack_size = (stack_size + 7) & ~7;
1649 cinfo->stack_usage = stack_size;
1656 debug_omit_fp (void)
1659 return mono_debug_count ();
1666 * mono_arch_compute_omit_fp:
1668 * Determine whenever the frame pointer can be eliminated.
1671 mono_arch_compute_omit_fp (MonoCompile *cfg)
1673 MonoMethodSignature *sig;
1674 MonoMethodHeader *header;
1678 if (cfg->arch.omit_fp_computed)
1681 header = cfg->header;
1683 sig = mono_method_signature (cfg->method);
1685 if (!cfg->arch.cinfo)
1686 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1687 cinfo = cfg->arch.cinfo;
1690 * FIXME: Remove some of the restrictions.
1692 cfg->arch.omit_fp = TRUE;
1693 cfg->arch.omit_fp_computed = TRUE;
1695 if (cfg->disable_omit_fp)
1696 cfg->arch.omit_fp = FALSE;
1697 if (!debug_omit_fp ())
1698 cfg->arch.omit_fp = FALSE;
1700 if (cfg->method->save_lmf)
1701 cfg->arch.omit_fp = FALSE;
1703 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1704 cfg->arch.omit_fp = FALSE;
1705 if (header->num_clauses)
1706 cfg->arch.omit_fp = FALSE;
1707 if (cfg->param_area)
1708 cfg->arch.omit_fp = FALSE;
1709 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1710 cfg->arch.omit_fp = FALSE;
1711 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1712 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1713 cfg->arch.omit_fp = FALSE;
1714 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1715 ArgInfo *ainfo = &cinfo->args [i];
1717 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1719 * The stack offset can only be determined when the frame
1722 cfg->arch.omit_fp = FALSE;
1727 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1728 MonoInst *ins = cfg->varinfo [i];
1731 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1736 * Set var information according to the calling convention. arm version.
1737 * The locals var stuff should most likely be split in another method.
1740 mono_arch_allocate_vars (MonoCompile *cfg)
1742 MonoMethodSignature *sig;
1743 MonoMethodHeader *header;
1745 int i, offset, size, align, curinst;
1749 sig = mono_method_signature (cfg->method);
1751 if (!cfg->arch.cinfo)
1752 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1753 cinfo = cfg->arch.cinfo;
1755 mono_arch_compute_omit_fp (cfg);
1757 if (cfg->arch.omit_fp)
1758 cfg->frame_reg = ARMREG_SP;
1760 cfg->frame_reg = ARMREG_FP;
1762 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1764 /* allow room for the vararg method args: void* and long/double */
1765 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1766 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1768 header = cfg->header;
1770 /* See mono_arch_get_global_int_regs () */
1771 if (cfg->flags & MONO_CFG_HAS_CALLS)
1772 cfg->uses_rgctx_reg = TRUE;
1774 if (cfg->frame_reg != ARMREG_SP)
1775 cfg->used_int_regs |= 1 << cfg->frame_reg;
1777 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1778 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1779 cfg->used_int_regs |= (1 << ARMREG_V5);
1783 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1784 if (sig->ret->type != MONO_TYPE_VOID) {
1785 cfg->ret->opcode = OP_REGVAR;
1786 cfg->ret->inst_c0 = ARMREG_R0;
1789 /* local vars are at a positive offset from the stack pointer */
1791 * also note that if the function uses alloca, we use FP
1792 * to point at the local variables.
1794 offset = 0; /* linkage area */
1795 /* align the offset to 16 bytes: not sure this is needed here */
1797 //offset &= ~(8 - 1);
1799 /* add parameter area size for called functions */
1800 offset += cfg->param_area;
1803 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1806 /* allow room to save the return value */
1807 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1810 /* the MonoLMF structure is stored just below the stack pointer */
1811 if (cinfo->ret.storage == RegTypeStructByVal) {
1812 cfg->ret->opcode = OP_REGOFFSET;
1813 cfg->ret->inst_basereg = cfg->frame_reg;
1814 offset += sizeof (gpointer) - 1;
1815 offset &= ~(sizeof (gpointer) - 1);
1816 cfg->ret->inst_offset = - offset;
1817 offset += sizeof(gpointer);
1818 } else if (cinfo->vtype_retaddr) {
1819 ins = cfg->vret_addr;
1820 offset += sizeof(gpointer) - 1;
1821 offset &= ~(sizeof(gpointer) - 1);
1822 ins->inst_offset = offset;
1823 ins->opcode = OP_REGOFFSET;
1824 ins->inst_basereg = cfg->frame_reg;
1825 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1826 printf ("vret_addr =");
1827 mono_print_ins (cfg->vret_addr);
1829 offset += sizeof(gpointer);
1832 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1833 if (cfg->arch.seq_point_info_var) {
1836 ins = cfg->arch.seq_point_info_var;
1840 offset += align - 1;
1841 offset &= ~(align - 1);
1842 ins->opcode = OP_REGOFFSET;
1843 ins->inst_basereg = cfg->frame_reg;
1844 ins->inst_offset = offset;
1847 ins = cfg->arch.ss_trigger_page_var;
1850 offset += align - 1;
1851 offset &= ~(align - 1);
1852 ins->opcode = OP_REGOFFSET;
1853 ins->inst_basereg = cfg->frame_reg;
1854 ins->inst_offset = offset;
1858 if (cfg->arch.seq_point_read_var) {
1861 ins = cfg->arch.seq_point_read_var;
1865 offset += align - 1;
1866 offset &= ~(align - 1);
1867 ins->opcode = OP_REGOFFSET;
1868 ins->inst_basereg = cfg->frame_reg;
1869 ins->inst_offset = offset;
1872 ins = cfg->arch.seq_point_ss_method_var;
1875 offset += align - 1;
1876 offset &= ~(align - 1);
1877 ins->opcode = OP_REGOFFSET;
1878 ins->inst_basereg = cfg->frame_reg;
1879 ins->inst_offset = offset;
1882 ins = cfg->arch.seq_point_bp_method_var;
1885 offset += align - 1;
1886 offset &= ~(align - 1);
1887 ins->opcode = OP_REGOFFSET;
1888 ins->inst_basereg = cfg->frame_reg;
1889 ins->inst_offset = offset;
1893 cfg->locals_min_stack_offset = offset;
1895 curinst = cfg->locals_start;
1896 for (i = curinst; i < cfg->num_varinfo; ++i) {
1899 ins = cfg->varinfo [i];
1900 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1903 t = ins->inst_vtype;
1904 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1907 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1908 * pinvoke wrappers when they call functions returning structure */
1909 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1910 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
1914 size = mono_type_size (t, &align);
1916 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1917 * since it loads/stores misaligned words, which don't do the right thing.
1919 if (align < 4 && size >= 4)
1921 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1922 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1923 offset += align - 1;
1924 offset &= ~(align - 1);
1925 ins->opcode = OP_REGOFFSET;
1926 ins->inst_offset = offset;
1927 ins->inst_basereg = cfg->frame_reg;
1929 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1932 cfg->locals_max_stack_offset = offset;
1936 ins = cfg->args [curinst];
1937 if (ins->opcode != OP_REGVAR) {
1938 ins->opcode = OP_REGOFFSET;
1939 ins->inst_basereg = cfg->frame_reg;
1940 offset += sizeof (gpointer) - 1;
1941 offset &= ~(sizeof (gpointer) - 1);
1942 ins->inst_offset = offset;
1943 offset += sizeof (gpointer);
1948 if (sig->call_convention == MONO_CALL_VARARG) {
1952 /* Allocate a local slot to hold the sig cookie address */
1953 offset += align - 1;
1954 offset &= ~(align - 1);
1955 cfg->sig_cookie = offset;
1959 for (i = 0; i < sig->param_count; ++i) {
1960 ins = cfg->args [curinst];
1962 if (ins->opcode != OP_REGVAR) {
1963 ins->opcode = OP_REGOFFSET;
1964 ins->inst_basereg = cfg->frame_reg;
1965 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
1967 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1968 * since it loads/stores misaligned words, which don't do the right thing.
1970 if (align < 4 && size >= 4)
1972 /* The code in the prolog () stores words when storing vtypes received in a register */
1973 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1975 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1976 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1977 offset += align - 1;
1978 offset &= ~(align - 1);
1979 ins->inst_offset = offset;
1985 /* align the offset to 8 bytes */
1986 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
1987 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1992 cfg->stack_offset = offset;
1996 mono_arch_create_vars (MonoCompile *cfg)
1998 MonoMethodSignature *sig;
2001 sig = mono_method_signature (cfg->method);
2003 if (!cfg->arch.cinfo)
2004 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2005 cinfo = cfg->arch.cinfo;
2007 if (cinfo->ret.storage == RegTypeStructByVal)
2008 cfg->ret_var_is_local = TRUE;
2010 if (cinfo->vtype_retaddr) {
2011 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2012 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2013 printf ("vret_addr = ");
2014 mono_print_ins (cfg->vret_addr);
2018 if (cfg->gen_seq_points) {
2019 if (cfg->soft_breakpoints) {
2020 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2021 ins->flags |= MONO_INST_VOLATILE;
2022 cfg->arch.seq_point_read_var = ins;
2024 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2025 ins->flags |= MONO_INST_VOLATILE;
2026 cfg->arch.seq_point_ss_method_var = ins;
2028 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2029 ins->flags |= MONO_INST_VOLATILE;
2030 cfg->arch.seq_point_bp_method_var = ins;
2032 g_assert (!cfg->compile_aot);
2033 } else if (cfg->compile_aot) {
2034 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2035 ins->flags |= MONO_INST_VOLATILE;
2036 cfg->arch.seq_point_info_var = ins;
2038 /* Allocate a separate variable for this to save 1 load per seq point */
2039 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2040 ins->flags |= MONO_INST_VOLATILE;
2041 cfg->arch.ss_trigger_page_var = ins;
2047 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2049 MonoMethodSignature *tmp_sig;
2052 if (call->tail_call)
2055 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2058 * mono_ArgIterator_Setup assumes the signature cookie is
2059 * passed first and all the arguments which were before it are
2060 * passed on the stack after the signature. So compensate by
2061 * passing a different signature.
2063 tmp_sig = mono_metadata_signature_dup (call->signature);
2064 tmp_sig->param_count -= call->signature->sentinelpos;
2065 tmp_sig->sentinelpos = 0;
2066 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2068 sig_reg = mono_alloc_ireg (cfg);
2069 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2071 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2076 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2081 LLVMCallInfo *linfo;
2083 n = sig->param_count + sig->hasthis;
2085 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2087 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2090 * LLVM always uses the native ABI while we use our own ABI, the
2091 * only difference is the handling of vtypes:
2092 * - we only pass/receive them in registers in some cases, and only
2093 * in 1 or 2 integer registers.
2095 if (cinfo->vtype_retaddr) {
2096 /* Vtype returned using a hidden argument */
2097 linfo->ret.storage = LLVMArgVtypeRetAddr;
2098 linfo->vret_arg_index = cinfo->vret_arg_index;
2099 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2100 cfg->exception_message = g_strdup ("unknown ret conv");
2101 cfg->disable_llvm = TRUE;
2105 for (i = 0; i < n; ++i) {
2106 ainfo = cinfo->args + i;
2108 linfo->args [i].storage = LLVMArgNone;
2110 switch (ainfo->storage) {
2111 case RegTypeGeneral:
2112 case RegTypeIRegPair:
2114 linfo->args [i].storage = LLVMArgInIReg;
2116 case RegTypeStructByVal:
2117 // FIXME: Passing entirely on the stack or split reg/stack
2118 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2119 linfo->args [i].storage = LLVMArgVtypeInReg;
2120 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2121 if (ainfo->size == 2)
2122 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2124 linfo->args [i].pair_storage [1] = LLVMArgNone;
2126 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2127 cfg->disable_llvm = TRUE;
2131 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2132 cfg->disable_llvm = TRUE;
2142 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2145 MonoMethodSignature *sig;
2149 sig = call->signature;
2150 n = sig->param_count + sig->hasthis;
2152 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2154 for (i = 0; i < n; ++i) {
2155 ArgInfo *ainfo = cinfo->args + i;
2158 if (i >= sig->hasthis)
2159 t = sig->params [i - sig->hasthis];
2161 t = &mono_defaults.int_class->byval_arg;
2162 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2164 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2165 /* Emit the signature cookie just before the implicit arguments */
2166 emit_sig_cookie (cfg, call, cinfo);
2169 in = call->args [i];
2171 switch (ainfo->storage) {
2172 case RegTypeGeneral:
2173 case RegTypeIRegPair:
2174 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2175 MONO_INST_NEW (cfg, ins, OP_MOVE);
2176 ins->dreg = mono_alloc_ireg (cfg);
2177 ins->sreg1 = in->dreg + 1;
2178 MONO_ADD_INS (cfg->cbb, ins);
2179 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2181 MONO_INST_NEW (cfg, ins, OP_MOVE);
2182 ins->dreg = mono_alloc_ireg (cfg);
2183 ins->sreg1 = in->dreg + 2;
2184 MONO_ADD_INS (cfg->cbb, ins);
2185 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2186 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2187 if (ainfo->size == 4) {
2188 if (IS_SOFT_FLOAT) {
2189 /* mono_emit_call_args () have already done the r8->r4 conversion */
2190 /* The converted value is in an int vreg */
2191 MONO_INST_NEW (cfg, ins, OP_MOVE);
2192 ins->dreg = mono_alloc_ireg (cfg);
2193 ins->sreg1 = in->dreg;
2194 MONO_ADD_INS (cfg->cbb, ins);
2195 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2199 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2200 creg = mono_alloc_ireg (cfg);
2201 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2202 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2205 if (IS_SOFT_FLOAT) {
2206 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2207 ins->dreg = mono_alloc_ireg (cfg);
2208 ins->sreg1 = in->dreg;
2209 MONO_ADD_INS (cfg->cbb, ins);
2210 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2212 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2213 ins->dreg = mono_alloc_ireg (cfg);
2214 ins->sreg1 = in->dreg;
2215 MONO_ADD_INS (cfg->cbb, ins);
2216 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2221 creg = mono_alloc_ireg (cfg);
2222 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2223 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2224 creg = mono_alloc_ireg (cfg);
2225 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2226 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2229 cfg->flags |= MONO_CFG_HAS_FPOUT;
2231 MONO_INST_NEW (cfg, ins, OP_MOVE);
2232 ins->dreg = mono_alloc_ireg (cfg);
2233 ins->sreg1 = in->dreg;
2234 MONO_ADD_INS (cfg->cbb, ins);
2236 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2239 case RegTypeStructByAddr:
2242 /* FIXME: where si the data allocated? */
2243 arg->backend.reg3 = ainfo->reg;
2244 call->used_iregs |= 1 << ainfo->reg;
2245 g_assert_not_reached ();
2248 case RegTypeStructByVal:
2249 case RegTypeGSharedVtInReg:
2250 case RegTypeGSharedVtOnStack:
2251 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2252 ins->opcode = OP_OUTARG_VT;
2253 ins->sreg1 = in->dreg;
2254 ins->klass = in->klass;
2255 ins->inst_p0 = call;
2256 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2257 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2258 mono_call_inst_add_outarg_vt (cfg, call, ins);
2259 MONO_ADD_INS (cfg->cbb, ins);
2262 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2264 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2265 if (t->type == MONO_TYPE_R8) {
2266 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2274 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2277 case RegTypeBaseGen:
2278 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2279 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2280 MONO_INST_NEW (cfg, ins, OP_MOVE);
2281 ins->dreg = mono_alloc_ireg (cfg);
2282 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2283 MONO_ADD_INS (cfg->cbb, ins);
2284 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2285 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2288 /* This should work for soft-float as well */
2290 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2291 creg = mono_alloc_ireg (cfg);
2292 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2293 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2294 creg = mono_alloc_ireg (cfg);
2295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2296 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2297 cfg->flags |= MONO_CFG_HAS_FPOUT;
2299 g_assert_not_reached ();
2303 int fdreg = mono_alloc_freg (cfg);
2305 if (ainfo->size == 8) {
2306 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2307 ins->sreg1 = in->dreg;
2309 MONO_ADD_INS (cfg->cbb, ins);
2311 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2316 * Mono's register allocator doesn't speak single-precision registers that
2317 * overlap double-precision registers (i.e. armhf). So we have to work around
2318 * the register allocator and load the value from memory manually.
2320 * So we create a variable for the float argument and an instruction to store
2321 * the argument into the variable. We then store the list of these arguments
2322 * in cfg->float_args. This list is then used by emit_float_args later to
2323 * pass the arguments in the various call opcodes.
2325 * This is not very nice, and we should really try to fix the allocator.
2328 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2330 /* Make sure the instruction isn't seen as pointless and removed.
2332 float_arg->flags |= MONO_INST_VOLATILE;
2334 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2336 /* We use the dreg to look up the instruction later. The hreg is used to
2337 * emit the instruction that loads the value into the FP reg.
2339 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2340 fad->vreg = float_arg->dreg;
2341 fad->hreg = ainfo->reg;
2343 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2346 call->used_iregs |= 1 << ainfo->reg;
2347 cfg->flags |= MONO_CFG_HAS_FPOUT;
2351 g_assert_not_reached ();
2355 /* Handle the case where there are no implicit arguments */
2356 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2357 emit_sig_cookie (cfg, call, cinfo);
2359 if (cinfo->ret.storage == RegTypeStructByVal) {
2360 /* The JIT will transform this into a normal call */
2361 call->vret_in_reg = TRUE;
2362 } else if (cinfo->vtype_retaddr) {
2364 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2365 vtarg->sreg1 = call->vret_var->dreg;
2366 vtarg->dreg = mono_alloc_preg (cfg);
2367 MONO_ADD_INS (cfg->cbb, vtarg);
2369 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2372 call->stack_usage = cinfo->stack_usage;
2378 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2380 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2381 ArgInfo *ainfo = ins->inst_p1;
2382 int ovf_size = ainfo->vtsize;
2383 int doffset = ainfo->offset;
2384 int struct_size = ainfo->struct_size;
2385 int i, soffset, dreg, tmpreg;
2387 if (ainfo->storage == RegTypeGSharedVtInReg) {
2389 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2392 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2393 /* Pass by addr on stack */
2394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2399 for (i = 0; i < ainfo->size; ++i) {
2400 dreg = mono_alloc_ireg (cfg);
2401 switch (struct_size) {
2403 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2406 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2409 tmpreg = mono_alloc_ireg (cfg);
2410 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2411 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2412 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2413 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2414 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2415 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2416 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2422 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2423 soffset += sizeof (gpointer);
2424 struct_size -= sizeof (gpointer);
2426 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2428 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2432 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2434 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2437 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2440 if (COMPILE_LLVM (cfg)) {
2441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2443 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2444 ins->sreg1 = val->dreg + 1;
2445 ins->sreg2 = val->dreg + 2;
2446 MONO_ADD_INS (cfg->cbb, ins);
2451 case MONO_ARM_FPU_NONE:
2452 if (ret->type == MONO_TYPE_R8) {
2455 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2456 ins->dreg = cfg->ret->dreg;
2457 ins->sreg1 = val->dreg;
2458 MONO_ADD_INS (cfg->cbb, ins);
2461 if (ret->type == MONO_TYPE_R4) {
2462 /* Already converted to an int in method_to_ir () */
2463 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2467 case MONO_ARM_FPU_VFP:
2468 case MONO_ARM_FPU_VFP_HARD:
2469 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2472 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2473 ins->dreg = cfg->ret->dreg;
2474 ins->sreg1 = val->dreg;
2475 MONO_ADD_INS (cfg->cbb, ins);
2480 g_assert_not_reached ();
2484 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2487 #endif /* #ifndef DISABLE_JIT */
2490 mono_arch_is_inst_imm (gint64 imm)
2495 #define DYN_CALL_STACK_ARGS 6
2498 MonoMethodSignature *sig;
2503 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2509 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2513 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2516 switch (cinfo->ret.storage) {
2518 case RegTypeGeneral:
2519 case RegTypeIRegPair:
2520 case RegTypeStructByAddr:
2531 for (i = 0; i < cinfo->nargs; ++i) {
2532 ArgInfo *ainfo = &cinfo->args [i];
2535 switch (ainfo->storage) {
2536 case RegTypeGeneral:
2538 case RegTypeIRegPair:
2541 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2544 case RegTypeStructByVal:
2545 if (ainfo->size == 0)
2546 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2548 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2549 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2557 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2558 for (i = 0; i < sig->param_count; ++i) {
2559 MonoType *t = sig->params [i];
2585 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2587 ArchDynCallInfo *info;
2590 cinfo = get_call_info (NULL, NULL, sig);
2592 if (!dyn_call_supported (cinfo, sig)) {
2597 info = g_new0 (ArchDynCallInfo, 1);
2598 // FIXME: Preprocess the info to speed up start_dyn_call ()
2600 info->cinfo = cinfo;
2602 return (MonoDynCallInfo*)info;
2606 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2608 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2610 g_free (ainfo->cinfo);
2615 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2617 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2618 DynCallArgs *p = (DynCallArgs*)buf;
2619 int arg_index, greg, i, j, pindex;
2620 MonoMethodSignature *sig = dinfo->sig;
2622 g_assert (buf_len >= sizeof (DynCallArgs));
2631 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2632 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2637 if (dinfo->cinfo->vtype_retaddr)
2638 p->regs [greg ++] = (mgreg_t)ret;
2640 for (i = pindex; i < sig->param_count; i++) {
2641 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2642 gpointer *arg = args [arg_index ++];
2643 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2646 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2648 else if (ainfo->storage == RegTypeBase)
2649 slot = PARAM_REGS + (ainfo->offset / 4);
2651 g_assert_not_reached ();
2654 p->regs [slot] = (mgreg_t)*arg;
2659 case MONO_TYPE_STRING:
2660 case MONO_TYPE_CLASS:
2661 case MONO_TYPE_ARRAY:
2662 case MONO_TYPE_SZARRAY:
2663 case MONO_TYPE_OBJECT:
2667 p->regs [slot] = (mgreg_t)*arg;
2669 case MONO_TYPE_BOOLEAN:
2671 p->regs [slot] = *(guint8*)arg;
2674 p->regs [slot] = *(gint8*)arg;
2677 p->regs [slot] = *(gint16*)arg;
2680 case MONO_TYPE_CHAR:
2681 p->regs [slot] = *(guint16*)arg;
2684 p->regs [slot] = *(gint32*)arg;
2687 p->regs [slot] = *(guint32*)arg;
2691 p->regs [slot ++] = (mgreg_t)arg [0];
2692 p->regs [slot] = (mgreg_t)arg [1];
2695 p->regs [slot] = *(mgreg_t*)arg;
2698 p->regs [slot ++] = (mgreg_t)arg [0];
2699 p->regs [slot] = (mgreg_t)arg [1];
2701 case MONO_TYPE_GENERICINST:
2702 if (MONO_TYPE_IS_REFERENCE (t)) {
2703 p->regs [slot] = (mgreg_t)*arg;
2708 case MONO_TYPE_VALUETYPE:
2709 g_assert (ainfo->storage == RegTypeStructByVal);
2711 if (ainfo->size == 0)
2712 slot = PARAM_REGS + (ainfo->offset / 4);
2716 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2717 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2720 g_assert_not_reached ();
2726 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2728 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2729 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2730 guint8 *ret = ((DynCallArgs*)buf)->ret;
2731 mgreg_t res = ((DynCallArgs*)buf)->res;
2732 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2734 switch (mono_type_get_underlying_type (sig->ret)->type) {
2735 case MONO_TYPE_VOID:
2736 *(gpointer*)ret = NULL;
2738 case MONO_TYPE_STRING:
2739 case MONO_TYPE_CLASS:
2740 case MONO_TYPE_ARRAY:
2741 case MONO_TYPE_SZARRAY:
2742 case MONO_TYPE_OBJECT:
2746 *(gpointer*)ret = (gpointer)res;
2752 case MONO_TYPE_BOOLEAN:
2753 *(guint8*)ret = res;
2756 *(gint16*)ret = res;
2759 case MONO_TYPE_CHAR:
2760 *(guint16*)ret = res;
2763 *(gint32*)ret = res;
2766 *(guint32*)ret = res;
2770 /* This handles endianness as well */
2771 ((gint32*)ret) [0] = res;
2772 ((gint32*)ret) [1] = res2;
2774 case MONO_TYPE_GENERICINST:
2775 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2776 *(gpointer*)ret = (gpointer)res;
2781 case MONO_TYPE_VALUETYPE:
2782 g_assert (ainfo->cinfo->vtype_retaddr);
2787 *(float*)ret = *(float*)&res;
2789 case MONO_TYPE_R8: {
2796 *(double*)ret = *(double*)®s;
2800 g_assert_not_reached ();
2807 * Allow tracing to work with this interface (with an optional argument)
2811 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2815 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2816 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2817 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2818 code = emit_call_reg (code, ARMREG_R2);
2831 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2834 int save_mode = SAVE_NONE;
2836 MonoMethod *method = cfg->method;
2837 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2838 int save_offset = cfg->param_area;
2842 offset = code - cfg->native_code;
2843 /* we need about 16 instructions */
2844 if (offset > (cfg->code_size - 16 * 4)) {
2845 cfg->code_size *= 2;
2846 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2847 code = cfg->native_code + offset;
2850 case MONO_TYPE_VOID:
2851 /* special case string .ctor icall */
2852 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2853 save_mode = SAVE_ONE;
2855 save_mode = SAVE_NONE;
2859 save_mode = SAVE_TWO;
2863 save_mode = SAVE_FP;
2865 case MONO_TYPE_VALUETYPE:
2866 save_mode = SAVE_STRUCT;
2869 save_mode = SAVE_ONE;
2873 switch (save_mode) {
2875 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2876 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2877 if (enable_arguments) {
2878 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2879 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2883 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2884 if (enable_arguments) {
2885 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2889 /* FIXME: what reg? */
2890 if (enable_arguments) {
2891 /* FIXME: what reg? */
2895 if (enable_arguments) {
2896 /* FIXME: get the actual address */
2897 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2905 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2906 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2907 code = emit_call_reg (code, ARMREG_IP);
2909 switch (save_mode) {
2911 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2912 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2915 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2929 * The immediate field for cond branches is big enough for all reasonable methods
2931 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2932 if (0 && ins->inst_true_bb->native_offset) { \
2933 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2935 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2936 ARM_B_COND (code, (condcode), 0); \
2939 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2941 /* emit an exception if condition is fail
2943 * We assign the extra code used to throw the implicit exceptions
2944 * to cfg->bb_exit as far as the big branch handling is concerned
2946 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2948 mono_add_patch_info (cfg, code - cfg->native_code, \
2949 MONO_PATCH_INFO_EXC, exc_name); \
2950 ARM_BL_COND (code, (condcode), 0); \
2953 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2956 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2961 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2963 MonoInst *ins, *n, *last_ins = NULL;
2965 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2966 switch (ins->opcode) {
2969 /* Already done by an arch-independent pass */
2971 case OP_LOAD_MEMBASE:
2972 case OP_LOADI4_MEMBASE:
2974 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2975 * OP_LOAD_MEMBASE offset(basereg), reg
2977 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2978 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2979 ins->inst_basereg == last_ins->inst_destbasereg &&
2980 ins->inst_offset == last_ins->inst_offset) {
2981 if (ins->dreg == last_ins->sreg1) {
2982 MONO_DELETE_INS (bb, ins);
2985 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2986 ins->opcode = OP_MOVE;
2987 ins->sreg1 = last_ins->sreg1;
2991 * Note: reg1 must be different from the basereg in the second load
2992 * OP_LOAD_MEMBASE offset(basereg), reg1
2993 * OP_LOAD_MEMBASE offset(basereg), reg2
2995 * OP_LOAD_MEMBASE offset(basereg), reg1
2996 * OP_MOVE reg1, reg2
2998 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2999 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3000 ins->inst_basereg != last_ins->dreg &&
3001 ins->inst_basereg == last_ins->inst_basereg &&
3002 ins->inst_offset == last_ins->inst_offset) {
3004 if (ins->dreg == last_ins->dreg) {
3005 MONO_DELETE_INS (bb, ins);
3008 ins->opcode = OP_MOVE;
3009 ins->sreg1 = last_ins->dreg;
3012 //g_assert_not_reached ();
3016 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3017 * OP_LOAD_MEMBASE offset(basereg), reg
3019 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3020 * OP_ICONST reg, imm
3022 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3023 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3024 ins->inst_basereg == last_ins->inst_destbasereg &&
3025 ins->inst_offset == last_ins->inst_offset) {
3026 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3027 ins->opcode = OP_ICONST;
3028 ins->inst_c0 = last_ins->inst_imm;
3029 g_assert_not_reached (); // check this rule
3033 case OP_LOADU1_MEMBASE:
3034 case OP_LOADI1_MEMBASE:
3035 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3036 ins->inst_basereg == last_ins->inst_destbasereg &&
3037 ins->inst_offset == last_ins->inst_offset) {
3038 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3039 ins->sreg1 = last_ins->sreg1;
3042 case OP_LOADU2_MEMBASE:
3043 case OP_LOADI2_MEMBASE:
3044 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3045 ins->inst_basereg == last_ins->inst_destbasereg &&
3046 ins->inst_offset == last_ins->inst_offset) {
3047 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3048 ins->sreg1 = last_ins->sreg1;
3052 ins->opcode = OP_MOVE;
3056 if (ins->dreg == ins->sreg1) {
3057 MONO_DELETE_INS (bb, ins);
3061 * OP_MOVE sreg, dreg
3062 * OP_MOVE dreg, sreg
3064 if (last_ins && last_ins->opcode == OP_MOVE &&
3065 ins->sreg1 == last_ins->dreg &&
3066 ins->dreg == last_ins->sreg1) {
3067 MONO_DELETE_INS (bb, ins);
3075 bb->last_ins = last_ins;
3079 * the branch_cc_table should maintain the order of these
3093 branch_cc_table [] = {
3107 #define ADD_NEW_INS(cfg,dest,op) do { \
3108 MONO_INST_NEW ((cfg), (dest), (op)); \
3109 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3113 map_to_reg_reg_op (int op)
3122 case OP_COMPARE_IMM:
3124 case OP_ICOMPARE_IMM:
3138 case OP_LOAD_MEMBASE:
3139 return OP_LOAD_MEMINDEX;
3140 case OP_LOADI4_MEMBASE:
3141 return OP_LOADI4_MEMINDEX;
3142 case OP_LOADU4_MEMBASE:
3143 return OP_LOADU4_MEMINDEX;
3144 case OP_LOADU1_MEMBASE:
3145 return OP_LOADU1_MEMINDEX;
3146 case OP_LOADI2_MEMBASE:
3147 return OP_LOADI2_MEMINDEX;
3148 case OP_LOADU2_MEMBASE:
3149 return OP_LOADU2_MEMINDEX;
3150 case OP_LOADI1_MEMBASE:
3151 return OP_LOADI1_MEMINDEX;
3152 case OP_STOREI1_MEMBASE_REG:
3153 return OP_STOREI1_MEMINDEX;
3154 case OP_STOREI2_MEMBASE_REG:
3155 return OP_STOREI2_MEMINDEX;
3156 case OP_STOREI4_MEMBASE_REG:
3157 return OP_STOREI4_MEMINDEX;
3158 case OP_STORE_MEMBASE_REG:
3159 return OP_STORE_MEMINDEX;
3160 case OP_STORER4_MEMBASE_REG:
3161 return OP_STORER4_MEMINDEX;
3162 case OP_STORER8_MEMBASE_REG:
3163 return OP_STORER8_MEMINDEX;
3164 case OP_STORE_MEMBASE_IMM:
3165 return OP_STORE_MEMBASE_REG;
3166 case OP_STOREI1_MEMBASE_IMM:
3167 return OP_STOREI1_MEMBASE_REG;
3168 case OP_STOREI2_MEMBASE_IMM:
3169 return OP_STOREI2_MEMBASE_REG;
3170 case OP_STOREI4_MEMBASE_IMM:
3171 return OP_STOREI4_MEMBASE_REG;
3173 g_assert_not_reached ();
3177 * Remove from the instruction list the instructions that can't be
3178 * represented with very simple instructions with no register
3182 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3184 MonoInst *ins, *temp, *last_ins = NULL;
3185 int rot_amount, imm8, low_imm;
3187 MONO_BB_FOR_EACH_INS (bb, ins) {
3189 switch (ins->opcode) {
3193 case OP_COMPARE_IMM:
3194 case OP_ICOMPARE_IMM:
3208 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3209 ADD_NEW_INS (cfg, temp, OP_ICONST);
3210 temp->inst_c0 = ins->inst_imm;
3211 temp->dreg = mono_alloc_ireg (cfg);
3212 ins->sreg2 = temp->dreg;
3213 ins->opcode = mono_op_imm_to_op (ins->opcode);
3215 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3221 if (ins->inst_imm == 1) {
3222 ins->opcode = OP_MOVE;
3225 if (ins->inst_imm == 0) {
3226 ins->opcode = OP_ICONST;
3230 imm8 = mono_is_power_of_two (ins->inst_imm);
3232 ins->opcode = OP_SHL_IMM;
3233 ins->inst_imm = imm8;
3236 ADD_NEW_INS (cfg, temp, OP_ICONST);
3237 temp->inst_c0 = ins->inst_imm;
3238 temp->dreg = mono_alloc_ireg (cfg);
3239 ins->sreg2 = temp->dreg;
3240 ins->opcode = OP_IMUL;
3246 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3247 /* ARM sets the C flag to 1 if there was _no_ overflow */
3248 ins->next->opcode = OP_COND_EXC_NC;
3251 case OP_IDIV_UN_IMM:
3253 case OP_IREM_UN_IMM:
3254 ADD_NEW_INS (cfg, temp, OP_ICONST);
3255 temp->inst_c0 = ins->inst_imm;
3256 temp->dreg = mono_alloc_ireg (cfg);
3257 ins->sreg2 = temp->dreg;
3258 ins->opcode = mono_op_imm_to_op (ins->opcode);
3260 case OP_LOCALLOC_IMM:
3261 ADD_NEW_INS (cfg, temp, OP_ICONST);
3262 temp->inst_c0 = ins->inst_imm;
3263 temp->dreg = mono_alloc_ireg (cfg);
3264 ins->sreg1 = temp->dreg;
3265 ins->opcode = OP_LOCALLOC;
3267 case OP_LOAD_MEMBASE:
3268 case OP_LOADI4_MEMBASE:
3269 case OP_LOADU4_MEMBASE:
3270 case OP_LOADU1_MEMBASE:
3271 /* we can do two things: load the immed in a register
3272 * and use an indexed load, or see if the immed can be
3273 * represented as an ad_imm + a load with a smaller offset
3274 * that fits. We just do the first for now, optimize later.
3276 if (arm_is_imm12 (ins->inst_offset))
3278 ADD_NEW_INS (cfg, temp, OP_ICONST);
3279 temp->inst_c0 = ins->inst_offset;
3280 temp->dreg = mono_alloc_ireg (cfg);
3281 ins->sreg2 = temp->dreg;
3282 ins->opcode = map_to_reg_reg_op (ins->opcode);
3284 case OP_LOADI2_MEMBASE:
3285 case OP_LOADU2_MEMBASE:
3286 case OP_LOADI1_MEMBASE:
3287 if (arm_is_imm8 (ins->inst_offset))
3289 ADD_NEW_INS (cfg, temp, OP_ICONST);
3290 temp->inst_c0 = ins->inst_offset;
3291 temp->dreg = mono_alloc_ireg (cfg);
3292 ins->sreg2 = temp->dreg;
3293 ins->opcode = map_to_reg_reg_op (ins->opcode);
3295 case OP_LOADR4_MEMBASE:
3296 case OP_LOADR8_MEMBASE:
3297 if (arm_is_fpimm8 (ins->inst_offset))
3299 low_imm = ins->inst_offset & 0x1ff;
3300 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3301 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3302 temp->inst_imm = ins->inst_offset & ~0x1ff;
3303 temp->sreg1 = ins->inst_basereg;
3304 temp->dreg = mono_alloc_ireg (cfg);
3305 ins->inst_basereg = temp->dreg;
3306 ins->inst_offset = low_imm;
3310 ADD_NEW_INS (cfg, temp, OP_ICONST);
3311 temp->inst_c0 = ins->inst_offset;
3312 temp->dreg = mono_alloc_ireg (cfg);
3314 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3315 add_ins->sreg1 = ins->inst_basereg;
3316 add_ins->sreg2 = temp->dreg;
3317 add_ins->dreg = mono_alloc_ireg (cfg);
3319 ins->inst_basereg = add_ins->dreg;
3320 ins->inst_offset = 0;
3323 case OP_STORE_MEMBASE_REG:
3324 case OP_STOREI4_MEMBASE_REG:
3325 case OP_STOREI1_MEMBASE_REG:
3326 if (arm_is_imm12 (ins->inst_offset))
3328 ADD_NEW_INS (cfg, temp, OP_ICONST);
3329 temp->inst_c0 = ins->inst_offset;
3330 temp->dreg = mono_alloc_ireg (cfg);
3331 ins->sreg2 = temp->dreg;
3332 ins->opcode = map_to_reg_reg_op (ins->opcode);
3334 case OP_STOREI2_MEMBASE_REG:
3335 if (arm_is_imm8 (ins->inst_offset))
3337 ADD_NEW_INS (cfg, temp, OP_ICONST);
3338 temp->inst_c0 = ins->inst_offset;
3339 temp->dreg = mono_alloc_ireg (cfg);
3340 ins->sreg2 = temp->dreg;
3341 ins->opcode = map_to_reg_reg_op (ins->opcode);
3343 case OP_STORER4_MEMBASE_REG:
3344 case OP_STORER8_MEMBASE_REG:
3345 if (arm_is_fpimm8 (ins->inst_offset))
3347 low_imm = ins->inst_offset & 0x1ff;
3348 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3349 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3350 temp->inst_imm = ins->inst_offset & ~0x1ff;
3351 temp->sreg1 = ins->inst_destbasereg;
3352 temp->dreg = mono_alloc_ireg (cfg);
3353 ins->inst_destbasereg = temp->dreg;
3354 ins->inst_offset = low_imm;
3358 ADD_NEW_INS (cfg, temp, OP_ICONST);
3359 temp->inst_c0 = ins->inst_offset;
3360 temp->dreg = mono_alloc_ireg (cfg);
3362 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3363 add_ins->sreg1 = ins->inst_destbasereg;
3364 add_ins->sreg2 = temp->dreg;
3365 add_ins->dreg = mono_alloc_ireg (cfg);
3367 ins->inst_destbasereg = add_ins->dreg;
3368 ins->inst_offset = 0;
3371 case OP_STORE_MEMBASE_IMM:
3372 case OP_STOREI1_MEMBASE_IMM:
3373 case OP_STOREI2_MEMBASE_IMM:
3374 case OP_STOREI4_MEMBASE_IMM:
3375 ADD_NEW_INS (cfg, temp, OP_ICONST);
3376 temp->inst_c0 = ins->inst_imm;
3377 temp->dreg = mono_alloc_ireg (cfg);
3378 ins->sreg1 = temp->dreg;
3379 ins->opcode = map_to_reg_reg_op (ins->opcode);
3381 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3383 gboolean swap = FALSE;
3387 /* Optimized away */
3392 /* Some fp compares require swapped operands */
3393 switch (ins->next->opcode) {
3395 ins->next->opcode = OP_FBLT;
3399 ins->next->opcode = OP_FBLT_UN;
3403 ins->next->opcode = OP_FBGE;
3407 ins->next->opcode = OP_FBGE_UN;
3415 ins->sreg1 = ins->sreg2;
3424 bb->last_ins = last_ins;
3425 bb->max_vreg = cfg->next_vreg;
3429 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3433 if (long_ins->opcode == OP_LNEG) {
3435 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3436 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3442 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3444 /* sreg is a float, dreg is an integer reg */
3447 ARM_TOSIZD (code, vfp_scratch1, sreg);
3449 ARM_TOUIZD (code, vfp_scratch1, sreg);
3450 ARM_FMRS (code, dreg, vfp_scratch1);
3454 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3455 else if (size == 2) {
3456 ARM_SHL_IMM (code, dreg, dreg, 16);
3457 ARM_SHR_IMM (code, dreg, dreg, 16);
3461 ARM_SHL_IMM (code, dreg, dreg, 24);
3462 ARM_SAR_IMM (code, dreg, dreg, 24);
3463 } else if (size == 2) {
3464 ARM_SHL_IMM (code, dreg, dreg, 16);
3465 ARM_SAR_IMM (code, dreg, dreg, 16);
3471 #endif /* #ifndef DISABLE_JIT */
3475 const guchar *target;
3480 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3483 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3484 PatchData *pdata = (PatchData*)user_data;
3485 guchar *code = data;
3486 guint32 *thunks = data;
3487 guint32 *endthunks = (guint32*)(code + bsize);
3489 int difflow, diffhigh;
3491 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3492 difflow = (char*)pdata->code - (char*)thunks;
3493 diffhigh = (char*)pdata->code - (char*)endthunks;
3494 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3498 * The thunk is composed of 3 words:
3499 * load constant from thunks [2] into ARM_IP
3502 * Note that the LR register is already setup
3504 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3505 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3506 while (thunks < endthunks) {
3507 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3508 if (thunks [2] == (guint32)pdata->target) {
3509 arm_patch (pdata->code, (guchar*)thunks);
3510 mono_arch_flush_icache (pdata->code, 4);
3513 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3514 /* found a free slot instead: emit thunk */
3515 /* ARMREG_IP is fine to use since this can't be an IMT call
3518 code = (guchar*)thunks;
3519 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3520 if (thumb_supported)
3521 ARM_BX (code, ARMREG_IP);
3523 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3524 thunks [2] = (guint32)pdata->target;
3525 mono_arch_flush_icache ((guchar*)thunks, 12);
3527 arm_patch (pdata->code, (guchar*)thunks);
3528 mono_arch_flush_icache (pdata->code, 4);
3532 /* skip 12 bytes, the size of the thunk */
3536 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3542 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3547 domain = mono_domain_get ();
3550 pdata.target = target;
3551 pdata.absolute = absolute;
3555 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3558 if (pdata.found != 1) {
3559 mono_domain_lock (domain);
3560 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3563 /* this uses the first available slot */
3565 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3567 mono_domain_unlock (domain);
3570 if (pdata.found != 1) {
3572 GHashTableIter iter;
3573 MonoJitDynamicMethodInfo *ji;
3576 * This might be a dynamic method, search its code manager. We can only
3577 * use the dynamic method containing CODE, since the others might be freed later.
3581 mono_domain_lock (domain);
3582 hash = domain_jit_info (domain)->dynamic_code_hash;
3584 /* FIXME: Speed this up */
3585 g_hash_table_iter_init (&iter, hash);
3586 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3587 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3588 if (pdata.found == 1)
3592 mono_domain_unlock (domain);
3594 if (pdata.found != 1)
3595 g_print ("thunk failed for %p from %p\n", target, code);
3596 g_assert (pdata.found == 1);
3600 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3602 guint32 *code32 = (void*)code;
3603 guint32 ins = *code32;
3604 guint32 prim = (ins >> 25) & 7;
3605 guint32 tval = GPOINTER_TO_UINT (target);
3607 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3608 if (prim == 5) { /* 101b */
3609 /* the diff starts 8 bytes from the branch opcode */
3610 gint diff = target - code - 8;
3612 gint tmask = 0xffffffff;
3613 if (tval & 1) { /* entering thumb mode */
3614 diff = target - 1 - code - 8;
3615 g_assert (thumb_supported);
3616 tbits = 0xf << 28; /* bl->blx bit pattern */
3617 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3618 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3622 tmask = ~(1 << 24); /* clear the link bit */
3623 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3628 if (diff <= 33554431) {
3630 ins = (ins & 0xff000000) | diff;
3632 *code32 = ins | tbits;
3636 /* diff between 0 and -33554432 */
3637 if (diff >= -33554432) {
3639 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3641 *code32 = ins | tbits;
3646 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3650 #ifdef USE_JUMP_TABLES
3652 gpointer *jte = mono_jumptable_get_entry (code);
3654 jte [0] = (gpointer) target;
3658 * The alternative call sequences looks like this:
3660 * ldr ip, [pc] // loads the address constant
3661 * b 1f // jumps around the constant
3662 * address constant embedded in the code
3667 * There are two cases for patching:
3668 * a) at the end of method emission: in this case code points to the start
3669 * of the call sequence
3670 * b) during runtime patching of the call site: in this case code points
3671 * to the mov pc, ip instruction
3673 * We have to handle also the thunk jump code sequence:
3677 * address constant // execution never reaches here
3679 if ((ins & 0x0ffffff0) == 0x12fff10) {
3680 /* Branch and exchange: the address is constructed in a reg
3681 * We can patch BX when the code sequence is the following:
3682 * ldr ip, [pc, #0] ; 0x8
3689 guint8 *emit = (guint8*)ccode;
3690 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3692 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3693 ARM_BX (emit, ARMREG_IP);
3695 /*patching from magic trampoline*/
3696 if (ins == ccode [3]) {
3697 g_assert (code32 [-4] == ccode [0]);
3698 g_assert (code32 [-3] == ccode [1]);
3699 g_assert (code32 [-1] == ccode [2]);
3700 code32 [-2] = (guint32)target;
3703 /*patching from JIT*/
3704 if (ins == ccode [0]) {
3705 g_assert (code32 [1] == ccode [1]);
3706 g_assert (code32 [3] == ccode [2]);
3707 g_assert (code32 [4] == ccode [3]);
3708 code32 [2] = (guint32)target;
3711 g_assert_not_reached ();
3712 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3720 guint8 *emit = (guint8*)ccode;
3721 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3723 ARM_BLX_REG (emit, ARMREG_IP);
3725 g_assert (code32 [-3] == ccode [0]);
3726 g_assert (code32 [-2] == ccode [1]);
3727 g_assert (code32 [0] == ccode [2]);
3729 code32 [-1] = (guint32)target;
3732 guint32 *tmp = ccode;
3733 guint8 *emit = (guint8*)tmp;
3734 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3735 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3736 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3737 ARM_BX (emit, ARMREG_IP);
3738 if (ins == ccode [2]) {
3739 g_assert_not_reached (); // should be -2 ...
3740 code32 [-1] = (guint32)target;
3743 if (ins == ccode [0]) {
3744 /* handles both thunk jump code and the far call sequence */
3745 code32 [2] = (guint32)target;
3748 g_assert_not_reached ();
3750 // g_print ("patched with 0x%08x\n", ins);
3755 arm_patch (guchar *code, const guchar *target)
3757 arm_patch_general (NULL, code, target, NULL);
3761 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3762 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3763 * to be used with the emit macros.
3764 * Return -1 otherwise.
3767 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3770 for (i = 0; i < 31; i+= 2) {
3771 res = (val << (32 - i)) | (val >> i);
3774 *rot_amount = i? 32 - i: 0;
3781 * Emits in code a sequence of instructions that load the value 'val'
3782 * into the dreg register. Uses at most 4 instructions.
3785 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3787 int imm8, rot_amount;
3789 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3790 /* skip the constant pool */
3796 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3797 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3798 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3799 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3802 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3804 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3808 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3810 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3812 if (val & 0xFF0000) {
3813 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3815 if (val & 0xFF000000) {
3816 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3818 } else if (val & 0xFF00) {
3819 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3820 if (val & 0xFF0000) {
3821 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3823 if (val & 0xFF000000) {
3824 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3826 } else if (val & 0xFF0000) {
3827 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3828 if (val & 0xFF000000) {
3829 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3832 //g_assert_not_reached ();
3838 mono_arm_thumb_supported (void)
3840 return thumb_supported;
3846 * emit_load_volatile_arguments:
3848 * Load volatile arguments from the stack to the original input registers.
3849 * Required before a tail call.
3852 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3854 MonoMethod *method = cfg->method;
3855 MonoMethodSignature *sig;
3860 /* FIXME: Generate intermediate code instead */
3862 sig = mono_method_signature (method);
3864 /* This is the opposite of the code in emit_prolog */
3868 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3870 if (cinfo->vtype_retaddr) {
3871 ArgInfo *ainfo = &cinfo->ret;
3872 inst = cfg->vret_addr;
3873 g_assert (arm_is_imm12 (inst->inst_offset));
3874 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3876 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3877 ArgInfo *ainfo = cinfo->args + i;
3878 inst = cfg->args [pos];
3880 if (cfg->verbose_level > 2)
3881 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3882 if (inst->opcode == OP_REGVAR) {
3883 if (ainfo->storage == RegTypeGeneral)
3884 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3885 else if (ainfo->storage == RegTypeFP) {
3886 g_assert_not_reached ();
3887 } else if (ainfo->storage == RegTypeBase) {
3891 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3892 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3894 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3895 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3899 g_assert_not_reached ();
3901 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3902 switch (ainfo->size) {
3909 g_assert (arm_is_imm12 (inst->inst_offset));
3910 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3911 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3912 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3915 if (arm_is_imm12 (inst->inst_offset)) {
3916 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3918 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3919 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3923 } else if (ainfo->storage == RegTypeBaseGen) {
3926 } else if (ainfo->storage == RegTypeBase) {
3928 } else if (ainfo->storage == RegTypeFP) {
3929 g_assert_not_reached ();
3930 } else if (ainfo->storage == RegTypeStructByVal) {
3931 int doffset = inst->inst_offset;
3935 if (mono_class_from_mono_type (inst->inst_vtype))
3936 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3937 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3938 if (arm_is_imm12 (doffset)) {
3939 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3941 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3942 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3944 soffset += sizeof (gpointer);
3945 doffset += sizeof (gpointer);
3950 } else if (ainfo->storage == RegTypeStructByAddr) {
3965 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3970 guint8 *code = cfg->native_code + cfg->code_len;
3971 MonoInst *last_ins = NULL;
3972 guint last_offset = 0;
3974 int imm8, rot_amount;
3976 /* we don't align basic blocks of loops on arm */
3978 if (cfg->verbose_level > 2)
3979 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3981 cpos = bb->max_offset;
3983 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3984 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3985 //g_assert (!mono_compile_aot);
3988 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3989 /* this is not thread save, but good enough */
3990 /* fixme: howto handle overflows? */
3991 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3994 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3995 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3996 (gpointer)"mono_break");
3997 code = emit_call_seq (cfg, code);
4000 MONO_BB_FOR_EACH_INS (bb, ins) {
4001 offset = code - cfg->native_code;
4003 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4005 if (offset > (cfg->code_size - max_len - 16)) {
4006 cfg->code_size *= 2;
4007 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4008 code = cfg->native_code + offset;
4010 // if (ins->cil_code)
4011 // g_print ("cil code\n");
4012 mono_debug_record_line_number (cfg, ins, offset);
4014 switch (ins->opcode) {
4015 case OP_MEMORY_BARRIER:
4017 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4018 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4022 #ifdef HAVE_AEABI_READ_TP
4023 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4024 (gpointer)"__aeabi_read_tp");
4025 code = emit_call_seq (cfg, code);
4027 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4029 g_assert_not_reached ();
4032 case OP_CARD_TABLE_WBARRIER: {
4033 int card_table_shift;
4034 gpointer card_table_mask;
4035 gboolean card_table_nursery_check = mono_gc_card_table_nursery_check ();
4036 int ptr = ins->sreg1;
4037 int value = ins->sreg2;
4040 mono_gc_get_card_table (&card_table_shift, &card_table_mask);
4042 if (card_table_nursery_check) {
4043 code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_LR, MONO_PATCH_INFO_NURSERY_START_SHIFTED, NULL);
4044 code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_IP, MONO_PATCH_INFO_NURSERY_SHIFT, NULL);
4045 ARM_SHR_REG (code, ARMREG_IP, value, ARMREG_IP);
4046 ARM_CMP_REG_REG (code, ARMREG_LR, ARMREG_IP);
4048 ARM_B_COND (code, ARMCOND_NE, 0);
4052 code = emit_aotconst (cfg, cfg->native_code, code, ARMREG_LR, MONO_PATCH_INFO_GC_CARD_TABLE_ADDR, NULL);
4053 ARM_SHR_IMM (code, ARMREG_IP, ptr, card_table_shift);
4054 if (card_table_mask) {
4055 imm8 = mono_arm_is_rotated_imm8 ((gsize)card_table_mask, &rot_amount);
4056 g_assert (imm8 >= 0);
4057 ARM_AND_REG_IMM (code, ARMREG_IP, ARMREG_IP, imm8, rot_amount);
4059 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ARMREG_IP);
4060 code = mono_arm_emit_load_imm (code, ARMREG_IP, 1);
4061 ARM_STRB_IMM (code, ARMREG_IP, 0, ARMREG_LR);
4063 arm_patch (br, code);
4067 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4068 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4071 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4072 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4074 case OP_STOREI1_MEMBASE_IMM:
4075 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4076 g_assert (arm_is_imm12 (ins->inst_offset));
4077 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4079 case OP_STOREI2_MEMBASE_IMM:
4080 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4081 g_assert (arm_is_imm8 (ins->inst_offset));
4082 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4084 case OP_STORE_MEMBASE_IMM:
4085 case OP_STOREI4_MEMBASE_IMM:
4086 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4087 g_assert (arm_is_imm12 (ins->inst_offset));
4088 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4090 case OP_STOREI1_MEMBASE_REG:
4091 g_assert (arm_is_imm12 (ins->inst_offset));
4092 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4094 case OP_STOREI2_MEMBASE_REG:
4095 g_assert (arm_is_imm8 (ins->inst_offset));
4096 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4098 case OP_STORE_MEMBASE_REG:
4099 case OP_STOREI4_MEMBASE_REG:
4100 /* this case is special, since it happens for spill code after lowering has been called */
4101 if (arm_is_imm12 (ins->inst_offset)) {
4102 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4104 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4105 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4108 case OP_STOREI1_MEMINDEX:
4109 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4111 case OP_STOREI2_MEMINDEX:
4112 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4114 case OP_STORE_MEMINDEX:
4115 case OP_STOREI4_MEMINDEX:
4116 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4119 g_assert_not_reached ();
4121 case OP_LOAD_MEMINDEX:
4122 case OP_LOADI4_MEMINDEX:
4123 case OP_LOADU4_MEMINDEX:
4124 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4126 case OP_LOADI1_MEMINDEX:
4127 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4129 case OP_LOADU1_MEMINDEX:
4130 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4132 case OP_LOADI2_MEMINDEX:
4133 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4135 case OP_LOADU2_MEMINDEX:
4136 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4138 case OP_LOAD_MEMBASE:
4139 case OP_LOADI4_MEMBASE:
4140 case OP_LOADU4_MEMBASE:
4141 /* this case is special, since it happens for spill code after lowering has been called */
4142 if (arm_is_imm12 (ins->inst_offset)) {
4143 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4145 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4146 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4149 case OP_LOADI1_MEMBASE:
4150 g_assert (arm_is_imm8 (ins->inst_offset));
4151 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4153 case OP_LOADU1_MEMBASE:
4154 g_assert (arm_is_imm12 (ins->inst_offset));
4155 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4157 case OP_LOADU2_MEMBASE:
4158 g_assert (arm_is_imm8 (ins->inst_offset));
4159 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4161 case OP_LOADI2_MEMBASE:
4162 g_assert (arm_is_imm8 (ins->inst_offset));
4163 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4165 case OP_ICONV_TO_I1:
4166 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4167 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4169 case OP_ICONV_TO_I2:
4170 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4171 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4173 case OP_ICONV_TO_U1:
4174 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4176 case OP_ICONV_TO_U2:
4177 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4178 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4182 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4184 case OP_COMPARE_IMM:
4185 case OP_ICOMPARE_IMM:
4186 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4187 g_assert (imm8 >= 0);
4188 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4192 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4193 * So instead of emitting a trap, we emit a call a C function and place a
4196 //*(int*)code = 0xef9f0001;
4199 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4200 (gpointer)"mono_break");
4201 code = emit_call_seq (cfg, code);
4203 case OP_RELAXED_NOP:
4208 case OP_DUMMY_STORE:
4209 case OP_NOT_REACHED:
4212 case OP_SEQ_POINT: {
4214 MonoInst *info_var = cfg->arch.seq_point_info_var;
4215 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4216 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4217 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4218 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4220 int dreg = ARMREG_LR;
4222 if (cfg->soft_breakpoints) {
4223 g_assert (!cfg->compile_aot);
4227 * For AOT, we use one got slot per method, which will point to a
4228 * SeqPointInfo structure, containing all the information required
4229 * by the code below.
4231 if (cfg->compile_aot) {
4232 g_assert (info_var);
4233 g_assert (info_var->opcode == OP_REGOFFSET);
4234 g_assert (arm_is_imm12 (info_var->inst_offset));
4237 if (!cfg->soft_breakpoints) {
4239 * Read from the single stepping trigger page. This will cause a
4240 * SIGSEGV when single stepping is enabled.
4241 * We do this _before_ the breakpoint, so single stepping after
4242 * a breakpoint is hit will step to the next IL offset.
4244 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4247 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4248 if (cfg->soft_breakpoints) {
4249 /* Load the address of the sequence point trigger variable. */
4252 g_assert (var->opcode == OP_REGOFFSET);
4253 g_assert (arm_is_imm12 (var->inst_offset));
4254 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4256 /* Read the value and check whether it is non-zero. */
4257 ARM_LDR_IMM (code, dreg, dreg, 0);
4258 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4260 /* Load the address of the sequence point method. */
4261 var = ss_method_var;
4263 g_assert (var->opcode == OP_REGOFFSET);
4264 g_assert (arm_is_imm12 (var->inst_offset));
4265 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4267 /* Call it conditionally. */
4268 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4270 if (cfg->compile_aot) {
4271 /* Load the trigger page addr from the variable initialized in the prolog */
4272 var = ss_trigger_page_var;
4274 g_assert (var->opcode == OP_REGOFFSET);
4275 g_assert (arm_is_imm12 (var->inst_offset));
4276 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4278 #ifdef USE_JUMP_TABLES
4279 gpointer *jte = mono_jumptable_add_entry ();
4280 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4281 jte [0] = ss_trigger_page;
4283 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4285 *(int*)code = (int)ss_trigger_page;
4289 ARM_LDR_IMM (code, dreg, dreg, 0);
4293 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4295 if (cfg->soft_breakpoints) {
4296 /* Load the address of the breakpoint method into ip. */
4297 var = bp_method_var;
4299 g_assert (var->opcode == OP_REGOFFSET);
4300 g_assert (arm_is_imm12 (var->inst_offset));
4301 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4304 * A placeholder for a possible breakpoint inserted by
4305 * mono_arch_set_breakpoint ().
4308 } else if (cfg->compile_aot) {
4309 guint32 offset = code - cfg->native_code;
4312 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4313 /* Add the offset */
4314 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4315 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4316 if (arm_is_imm12 ((int)val)) {
4317 ARM_LDR_IMM (code, dreg, dreg, val);
4319 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4321 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4323 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4324 g_assert (!(val & 0xFF000000));
4326 ARM_LDR_IMM (code, dreg, dreg, 0);
4328 /* What is faster, a branch or a load ? */
4329 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4330 /* The breakpoint instruction */
4331 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4334 * A placeholder for a possible breakpoint inserted by
4335 * mono_arch_set_breakpoint ().
4337 for (i = 0; i < 4; ++i)
4344 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4347 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4351 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4354 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4355 g_assert (imm8 >= 0);
4356 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4360 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4361 g_assert (imm8 >= 0);
4362 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4366 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4367 g_assert (imm8 >= 0);
4368 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4371 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4372 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4374 case OP_IADD_OVF_UN:
4375 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4376 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4379 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4380 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4382 case OP_ISUB_OVF_UN:
4383 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4384 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4386 case OP_ADD_OVF_CARRY:
4387 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4388 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4390 case OP_ADD_OVF_UN_CARRY:
4391 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4392 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4394 case OP_SUB_OVF_CARRY:
4395 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4396 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4398 case OP_SUB_OVF_UN_CARRY:
4399 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4400 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4404 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4407 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4408 g_assert (imm8 >= 0);
4409 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4412 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4416 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4420 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4421 g_assert (imm8 >= 0);
4422 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4426 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4427 g_assert (imm8 >= 0);
4428 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4430 case OP_ARM_RSBS_IMM:
4431 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4432 g_assert (imm8 >= 0);
4433 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4435 case OP_ARM_RSC_IMM:
4436 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4437 g_assert (imm8 >= 0);
4438 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4441 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4445 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4446 g_assert (imm8 >= 0);
4447 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4450 g_assert (v7s_supported);
4451 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4454 g_assert (v7s_supported);
4455 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4458 g_assert (v7s_supported);
4459 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4460 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4463 g_assert (v7s_supported);
4464 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4465 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4469 g_assert_not_reached ();
4471 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4475 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4476 g_assert (imm8 >= 0);
4477 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4480 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4484 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4485 g_assert (imm8 >= 0);
4486 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4489 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4494 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4495 else if (ins->dreg != ins->sreg1)
4496 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4499 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4504 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4505 else if (ins->dreg != ins->sreg1)
4506 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4509 case OP_ISHR_UN_IMM:
4511 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4512 else if (ins->dreg != ins->sreg1)
4513 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4516 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4519 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4522 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4525 if (ins->dreg == ins->sreg2)
4526 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4528 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4531 g_assert_not_reached ();
4534 /* FIXME: handle ovf/ sreg2 != dreg */
4535 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4536 /* FIXME: MUL doesn't set the C/O flags on ARM */
4538 case OP_IMUL_OVF_UN:
4539 /* FIXME: handle ovf/ sreg2 != dreg */
4540 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4541 /* FIXME: MUL doesn't set the C/O flags on ARM */
4544 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4547 /* Load the GOT offset */
4548 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4549 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4551 *(gpointer*)code = NULL;
4553 /* Load the value from the GOT */
4554 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4556 case OP_OBJC_GET_SELECTOR:
4557 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4558 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4560 *(gpointer*)code = NULL;
4562 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4564 case OP_ICONV_TO_I4:
4565 case OP_ICONV_TO_U4:
4567 if (ins->dreg != ins->sreg1)
4568 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4571 int saved = ins->sreg2;
4572 if (ins->sreg2 == ARM_LSW_REG) {
4573 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4576 if (ins->sreg1 != ARM_LSW_REG)
4577 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4578 if (saved != ARM_MSW_REG)
4579 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4584 ARM_CPYD (code, ins->dreg, ins->sreg1);
4586 case OP_FCONV_TO_R4:
4588 ARM_CVTD (code, ins->dreg, ins->sreg1);
4589 ARM_CVTS (code, ins->dreg, ins->dreg);
4594 * Keep in sync with mono_arch_emit_epilog
4596 g_assert (!cfg->method->save_lmf);
4598 code = emit_load_volatile_arguments (cfg, code);
4600 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4602 if (cfg->used_int_regs)
4603 ARM_POP (code, cfg->used_int_regs);
4604 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4606 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4608 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4609 if (cfg->compile_aot) {
4610 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4612 *(gpointer*)code = NULL;
4614 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4616 code = mono_arm_patchable_b (code, ARMCOND_AL);
4620 /* ensure ins->sreg1 is not NULL */
4621 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4624 g_assert (cfg->sig_cookie < 128);
4625 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4626 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4635 call = (MonoCallInst*)ins;
4638 code = emit_float_args (cfg, call, code, &max_len, &offset);
4640 if (ins->flags & MONO_INST_HAS_METHOD)
4641 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4643 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4644 code = emit_call_seq (cfg, code);
4645 ins->flags |= MONO_INST_GC_CALLSITE;
4646 ins->backend.pc_offset = code - cfg->native_code;
4647 code = emit_move_return_value (cfg, ins, code);
4653 case OP_VOIDCALL_REG:
4656 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4658 code = emit_call_reg (code, ins->sreg1);
4659 ins->flags |= MONO_INST_GC_CALLSITE;
4660 ins->backend.pc_offset = code - cfg->native_code;
4661 code = emit_move_return_value (cfg, ins, code);
4663 case OP_FCALL_MEMBASE:
4664 case OP_LCALL_MEMBASE:
4665 case OP_VCALL_MEMBASE:
4666 case OP_VCALL2_MEMBASE:
4667 case OP_VOIDCALL_MEMBASE:
4668 case OP_CALL_MEMBASE: {
4669 gboolean imt_arg = FALSE;
4671 g_assert (ins->sreg1 != ARMREG_LR);
4672 call = (MonoCallInst*)ins;
4675 code = emit_float_args (cfg, call, code, &max_len, &offset);
4677 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4679 if (!arm_is_imm12 (ins->inst_offset))
4680 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4681 #ifdef USE_JUMP_TABLES
4687 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4689 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4691 if (!arm_is_imm12 (ins->inst_offset))
4692 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4694 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4697 * We can't embed the method in the code stream in PIC code, or
4699 * Instead, we put it in V5 in code emitted by
4700 * mono_arch_emit_imt_argument (), and embed NULL here to
4701 * signal the IMT thunk that the value is in V5.
4703 #ifdef USE_JUMP_TABLES
4704 /* In case of jumptables we always use value in V5. */
4707 if (call->dynamic_imt_arg)
4708 *((gpointer*)code) = NULL;
4710 *((gpointer*)code) = (gpointer)call->method;
4714 ins->flags |= MONO_INST_GC_CALLSITE;
4715 ins->backend.pc_offset = code - cfg->native_code;
4716 code = emit_move_return_value (cfg, ins, code);
4720 /* keep alignment */
4721 int alloca_waste = cfg->param_area;
4724 /* round the size to 8 bytes */
4725 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4726 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4728 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4729 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4730 /* memzero the area: dreg holds the size, sp is the pointer */
4731 if (ins->flags & MONO_INST_INIT) {
4732 guint8 *start_loop, *branch_to_cond;
4733 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4734 branch_to_cond = code;
4737 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4738 arm_patch (branch_to_cond, code);
4739 /* decrement by 4 and set flags */
4740 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4741 ARM_B_COND (code, ARMCOND_GE, 0);
4742 arm_patch (code - 4, start_loop);
4744 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4749 MonoInst *var = cfg->dyn_call_var;
4751 g_assert (var->opcode == OP_REGOFFSET);
4752 g_assert (arm_is_imm12 (var->inst_offset));
4754 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4755 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4757 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4759 /* Save args buffer */
4760 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4762 /* Set stack slots using R0 as scratch reg */
4763 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4764 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4765 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4766 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4769 /* Set argument registers */
4770 for (i = 0; i < PARAM_REGS; ++i)
4771 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4774 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4775 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4778 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4779 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4780 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4784 if (ins->sreg1 != ARMREG_R0)
4785 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4786 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4787 (gpointer)"mono_arch_throw_exception");
4788 code = emit_call_seq (cfg, code);
4792 if (ins->sreg1 != ARMREG_R0)
4793 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4794 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4795 (gpointer)"mono_arch_rethrow_exception");
4796 code = emit_call_seq (cfg, code);
4799 case OP_START_HANDLER: {
4800 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4803 /* Reserve a param area, see filter-stack.exe */
4804 if (cfg->param_area) {
4805 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4806 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4808 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4809 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4813 if (arm_is_imm12 (spvar->inst_offset)) {
4814 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
4816 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4817 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
4821 case OP_ENDFILTER: {
4822 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4825 /* Free the param area */
4826 if (cfg->param_area) {
4827 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4828 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4830 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4831 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4835 if (ins->sreg1 != ARMREG_R0)
4836 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4837 if (arm_is_imm12 (spvar->inst_offset)) {
4838 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4840 g_assert (ARMREG_IP != spvar->inst_basereg);
4841 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4842 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4844 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4847 case OP_ENDFINALLY: {
4848 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4851 /* Free the param area */
4852 if (cfg->param_area) {
4853 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4854 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4856 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4857 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4861 if (arm_is_imm12 (spvar->inst_offset)) {
4862 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4864 g_assert (ARMREG_IP != spvar->inst_basereg);
4865 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4866 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4868 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4871 case OP_CALL_HANDLER:
4872 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4873 code = mono_arm_patchable_bl (code, ARMCOND_AL);
4874 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4877 ins->inst_c0 = code - cfg->native_code;
4880 /*if (ins->inst_target_bb->native_offset) {
4882 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4884 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4885 code = mono_arm_patchable_b (code, ARMCOND_AL);
4889 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
4893 * In the normal case we have:
4894 * ldr pc, [pc, ins->sreg1 << 2]
4897 * ldr lr, [pc, ins->sreg1 << 2]
4899 * After follows the data.
4900 * FIXME: add aot support.
4902 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
4903 #ifdef USE_JUMP_TABLES
4905 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
4906 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4907 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
4911 max_len += 4 * GPOINTER_TO_INT (ins->klass);
4912 if (offset + max_len > (cfg->code_size - 16)) {
4913 cfg->code_size += max_len;
4914 cfg->code_size *= 2;
4915 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4916 code = cfg->native_code + offset;
4918 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
4920 code += 4 * GPOINTER_TO_INT (ins->klass);
4925 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4926 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4930 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4931 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
4935 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4936 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
4940 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4941 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
4945 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4946 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
4948 case OP_COND_EXC_EQ:
4949 case OP_COND_EXC_NE_UN:
4950 case OP_COND_EXC_LT:
4951 case OP_COND_EXC_LT_UN:
4952 case OP_COND_EXC_GT:
4953 case OP_COND_EXC_GT_UN:
4954 case OP_COND_EXC_GE:
4955 case OP_COND_EXC_GE_UN:
4956 case OP_COND_EXC_LE:
4957 case OP_COND_EXC_LE_UN:
4958 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4960 case OP_COND_EXC_IEQ:
4961 case OP_COND_EXC_INE_UN:
4962 case OP_COND_EXC_ILT:
4963 case OP_COND_EXC_ILT_UN:
4964 case OP_COND_EXC_IGT:
4965 case OP_COND_EXC_IGT_UN:
4966 case OP_COND_EXC_IGE:
4967 case OP_COND_EXC_IGE_UN:
4968 case OP_COND_EXC_ILE:
4969 case OP_COND_EXC_ILE_UN:
4970 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4973 case OP_COND_EXC_IC:
4974 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
4976 case OP_COND_EXC_OV:
4977 case OP_COND_EXC_IOV:
4978 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
4980 case OP_COND_EXC_NC:
4981 case OP_COND_EXC_INC:
4982 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
4984 case OP_COND_EXC_NO:
4985 case OP_COND_EXC_INO:
4986 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
4998 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5001 /* floating point opcodes */
5003 if (cfg->compile_aot) {
5004 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5006 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5008 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5011 /* FIXME: we can optimize the imm load by dealing with part of
5012 * the displacement in LDFD (aligning to 512).
5014 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5015 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5019 if (cfg->compile_aot) {
5020 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5022 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5024 ARM_CVTS (code, ins->dreg, ins->dreg);
5026 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5027 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5028 ARM_CVTS (code, ins->dreg, ins->dreg);
5031 case OP_STORER8_MEMBASE_REG:
5032 /* This is generated by the local regalloc pass which runs after the lowering pass */
5033 if (!arm_is_fpimm8 (ins->inst_offset)) {
5034 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5035 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5036 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5038 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5041 case OP_LOADR8_MEMBASE:
5042 /* This is generated by the local regalloc pass which runs after the lowering pass */
5043 if (!arm_is_fpimm8 (ins->inst_offset)) {
5044 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5045 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5046 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5048 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5051 case OP_STORER4_MEMBASE_REG:
5052 g_assert (arm_is_fpimm8 (ins->inst_offset));
5053 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5054 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5056 case OP_LOADR4_MEMBASE:
5057 g_assert (arm_is_fpimm8 (ins->inst_offset));
5058 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5059 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5061 case OP_ICONV_TO_R_UN: {
5062 g_assert_not_reached ();
5065 case OP_ICONV_TO_R4:
5066 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5067 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5068 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5070 case OP_ICONV_TO_R8:
5071 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5072 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5076 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5077 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5079 if (!IS_HARD_FLOAT) {
5080 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5083 if (IS_HARD_FLOAT) {
5084 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5086 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5090 case OP_FCONV_TO_I1:
5091 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5093 case OP_FCONV_TO_U1:
5094 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5096 case OP_FCONV_TO_I2:
5097 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5099 case OP_FCONV_TO_U2:
5100 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5102 case OP_FCONV_TO_I4:
5104 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5106 case OP_FCONV_TO_U4:
5108 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5110 case OP_FCONV_TO_I8:
5111 case OP_FCONV_TO_U8:
5112 g_assert_not_reached ();
5113 /* Implemented as helper calls */
5115 case OP_LCONV_TO_R_UN:
5116 g_assert_not_reached ();
5117 /* Implemented as helper calls */
5119 case OP_LCONV_TO_OVF_I4_2: {
5120 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5122 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5125 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5126 high_bit_not_set = code;
5127 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5129 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5130 valid_negative = code;
5131 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5132 invalid_negative = code;
5133 ARM_B_COND (code, ARMCOND_AL, 0);
5135 arm_patch (high_bit_not_set, code);
5137 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5138 valid_positive = code;
5139 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5141 arm_patch (invalid_negative, code);
5142 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5144 arm_patch (valid_negative, code);
5145 arm_patch (valid_positive, code);
5147 if (ins->dreg != ins->sreg1)
5148 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5152 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5155 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5158 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5161 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5164 ARM_NEGD (code, ins->dreg, ins->sreg1);
5168 g_assert_not_reached ();
5172 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5178 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5181 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5186 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5189 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5190 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5194 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5197 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5198 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5199 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5203 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5206 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5207 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5211 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5214 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5215 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5216 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5218 /* ARM FPA flags table:
5219 * N Less than ARMCOND_MI
5220 * Z Equal ARMCOND_EQ
5221 * C Greater Than or Equal ARMCOND_CS
5222 * V Unordered ARMCOND_VS
5225 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5228 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5231 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5234 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5235 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5241 g_assert_not_reached ();
5245 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5247 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5248 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5249 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5253 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5254 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5259 #ifdef USE_JUMP_TABLES
5261 gpointer *jte = mono_jumptable_add_entries (2);
5262 jte [0] = GUINT_TO_POINTER (0xffffffff);
5263 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5264 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5265 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5268 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5269 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5271 *(guint32*)code = 0xffffffff;
5273 *(guint32*)code = 0x7fefffff;
5276 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5278 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5279 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5281 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5282 ARM_CPYD (code, ins->dreg, ins->sreg1);
5287 case OP_GC_LIVENESS_DEF:
5288 case OP_GC_LIVENESS_USE:
5289 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5290 ins->backend.pc_offset = code - cfg->native_code;
5292 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5293 ins->backend.pc_offset = code - cfg->native_code;
5294 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5298 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5299 g_assert_not_reached ();
5302 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5303 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5304 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5305 g_assert_not_reached ();
5311 last_offset = offset;
5314 cfg->code_len = code - cfg->native_code;
5317 #endif /* DISABLE_JIT */
5319 #ifdef HAVE_AEABI_READ_TP
5320 void __aeabi_read_tp (void);
5324 mono_arch_register_lowlevel_calls (void)
5326 /* The signature doesn't matter */
5327 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5328 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5330 #ifndef MONO_CROSS_COMPILE
5331 #ifdef HAVE_AEABI_READ_TP
5332 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5337 #define patch_lis_ori(ip,val) do {\
5338 guint16 *__lis_ori = (guint16*)(ip); \
5339 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5340 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5344 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5346 MonoJumpInfo *patch_info;
5347 gboolean compile_aot = !run_cctors;
5349 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5350 unsigned char *ip = patch_info->ip.i + code;
5351 const unsigned char *target;
5353 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5354 #ifdef USE_JUMP_TABLES
5355 gpointer *jt = mono_jumptable_get_entry (ip);
5357 gpointer *jt = (gpointer*)(ip + 8);
5360 /* jt is the inlined jump table, 2 instructions after ip
5361 * In the normal case we store the absolute addresses,
5362 * otherwise the displacements.
5364 for (i = 0; i < patch_info->data.table->table_size; i++)
5365 jt [i] = code + (int)patch_info->data.table->table [i];
5370 switch (patch_info->type) {
5371 case MONO_PATCH_INFO_BB:
5372 case MONO_PATCH_INFO_LABEL:
5375 /* No need to patch these */
5380 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5382 switch (patch_info->type) {
5383 case MONO_PATCH_INFO_IP:
5384 g_assert_not_reached ();
5385 patch_lis_ori (ip, ip);
5387 case MONO_PATCH_INFO_METHOD_REL:
5388 g_assert_not_reached ();
5389 *((gpointer *)(ip)) = code + patch_info->data.offset;
5391 case MONO_PATCH_INFO_METHODCONST:
5392 case MONO_PATCH_INFO_CLASS:
5393 case MONO_PATCH_INFO_IMAGE:
5394 case MONO_PATCH_INFO_FIELD:
5395 case MONO_PATCH_INFO_VTABLE:
5396 case MONO_PATCH_INFO_IID:
5397 case MONO_PATCH_INFO_SFLDA:
5398 case MONO_PATCH_INFO_LDSTR:
5399 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5400 case MONO_PATCH_INFO_LDTOKEN:
5401 g_assert_not_reached ();
5402 /* from OP_AOTCONST : lis + ori */
5403 patch_lis_ori (ip, target);
5405 case MONO_PATCH_INFO_R4:
5406 case MONO_PATCH_INFO_R8:
5407 g_assert_not_reached ();
5408 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5410 case MONO_PATCH_INFO_EXC_NAME:
5411 g_assert_not_reached ();
5412 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5414 case MONO_PATCH_INFO_NONE:
5415 case MONO_PATCH_INFO_BB_OVF:
5416 case MONO_PATCH_INFO_EXC_OVF:
5417 /* everything is dealt with at epilog output time */
5422 arm_patch_general (domain, ip, target, dyn_code_mp);
5429 * Stack frame layout:
5431 * ------------------- fp
5432 * MonoLMF structure or saved registers
5433 * -------------------
5435 * -------------------
5437 * -------------------
5438 * optional 8 bytes for tracing
5439 * -------------------
5440 * param area size is cfg->param_area
5441 * ------------------- sp
5444 mono_arch_emit_prolog (MonoCompile *cfg)
5446 MonoMethod *method = cfg->method;
5448 MonoMethodSignature *sig;
5450 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5455 int prev_sp_offset, reg_offset;
5457 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5460 sig = mono_method_signature (method);
5461 cfg->code_size = 256 + sig->param_count * 64;
5462 code = cfg->native_code = g_malloc (cfg->code_size);
5464 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5466 alloc_size = cfg->stack_offset;
5472 * The iphone uses R7 as the frame pointer, and it points at the saved
5477 * We can't use r7 as a frame pointer since it points into the middle of
5478 * the frame, so we keep using our own frame pointer.
5479 * FIXME: Optimize this.
5481 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5482 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5483 prev_sp_offset += 8; /* r7 and lr */
5484 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5485 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5488 if (!method->save_lmf) {
5490 /* No need to push LR again */
5491 if (cfg->used_int_regs)
5492 ARM_PUSH (code, cfg->used_int_regs);
5494 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5495 prev_sp_offset += 4;
5497 for (i = 0; i < 16; ++i) {
5498 if (cfg->used_int_regs & (1 << i))
5499 prev_sp_offset += 4;
5501 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5503 for (i = 0; i < 16; ++i) {
5504 if ((cfg->used_int_regs & (1 << i))) {
5505 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5506 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5511 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5512 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5514 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5515 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5518 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5519 ARM_PUSH (code, 0x5ff0);
5520 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5521 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5523 for (i = 0; i < 16; ++i) {
5524 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5525 /* The original r7 is saved at the start */
5526 if (!(iphone_abi && i == ARMREG_R7))
5527 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5531 g_assert (reg_offset == 4 * 10);
5532 pos += sizeof (MonoLMF) - (4 * 10);
5536 orig_alloc_size = alloc_size;
5537 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5538 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5539 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5540 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5543 /* the stack used in the pushed regs */
5544 if (prev_sp_offset & 4)
5546 cfg->stack_usage = alloc_size;
5548 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5549 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5551 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5552 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5554 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5556 if (cfg->frame_reg != ARMREG_SP) {
5557 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5558 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5560 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5561 prev_sp_offset += alloc_size;
5563 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5564 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5566 /* compute max_offset in order to use short forward jumps
5567 * we could skip do it on arm because the immediate displacement
5568 * for jumps is large enough, it may be useful later for constant pools
5571 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5572 MonoInst *ins = bb->code;
5573 bb->max_offset = max_offset;
5575 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5578 MONO_BB_FOR_EACH_INS (bb, ins)
5579 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5582 /* store runtime generic context */
5583 if (cfg->rgctx_var) {
5584 MonoInst *ins = cfg->rgctx_var;
5586 g_assert (ins->opcode == OP_REGOFFSET);
5588 if (arm_is_imm12 (ins->inst_offset)) {
5589 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5591 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5592 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5596 /* load arguments allocated to register from the stack */
5599 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5601 if (cinfo->vtype_retaddr) {
5602 ArgInfo *ainfo = &cinfo->ret;
5603 inst = cfg->vret_addr;
5604 g_assert (arm_is_imm12 (inst->inst_offset));
5605 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5608 if (sig->call_convention == MONO_CALL_VARARG) {
5609 ArgInfo *cookie = &cinfo->sig_cookie;
5611 /* Save the sig cookie address */
5612 g_assert (cookie->storage == RegTypeBase);
5614 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5615 g_assert (arm_is_imm12 (cfg->sig_cookie));
5616 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5617 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5620 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5621 ArgInfo *ainfo = cinfo->args + i;
5622 inst = cfg->args [pos];
5624 if (cfg->verbose_level > 2)
5625 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5626 if (inst->opcode == OP_REGVAR) {
5627 if (ainfo->storage == RegTypeGeneral)
5628 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5629 else if (ainfo->storage == RegTypeFP) {
5630 g_assert_not_reached ();
5631 } else if (ainfo->storage == RegTypeBase) {
5632 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5633 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5635 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5636 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5639 g_assert_not_reached ();
5641 if (cfg->verbose_level > 2)
5642 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5644 /* the argument should be put on the stack: FIXME handle size != word */
5645 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5646 switch (ainfo->size) {
5648 if (arm_is_imm12 (inst->inst_offset))
5649 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5651 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5652 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5656 if (arm_is_imm8 (inst->inst_offset)) {
5657 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5659 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5660 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5664 if (arm_is_imm12 (inst->inst_offset)) {
5665 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5667 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5668 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5670 if (arm_is_imm12 (inst->inst_offset + 4)) {
5671 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5673 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5674 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5678 if (arm_is_imm12 (inst->inst_offset)) {
5679 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5681 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5682 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5686 } else if (ainfo->storage == RegTypeBaseGen) {
5687 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5688 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5690 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5691 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5693 if (arm_is_imm12 (inst->inst_offset + 4)) {
5694 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5695 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5697 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5698 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5699 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5700 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5702 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5703 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5704 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5706 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5707 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5710 switch (ainfo->size) {
5712 if (arm_is_imm8 (inst->inst_offset)) {
5713 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5715 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5716 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5720 if (arm_is_imm8 (inst->inst_offset)) {
5721 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5723 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5724 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5728 if (arm_is_imm12 (inst->inst_offset)) {
5729 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5731 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5732 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5734 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5735 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5737 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5738 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5740 if (arm_is_imm12 (inst->inst_offset + 4)) {
5741 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5743 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5744 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5748 if (arm_is_imm12 (inst->inst_offset)) {
5749 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5751 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5752 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5756 } else if (ainfo->storage == RegTypeFP) {
5757 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5758 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
5760 if (ainfo->size == 8)
5761 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
5763 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
5764 } else if (ainfo->storage == RegTypeStructByVal) {
5765 int doffset = inst->inst_offset;
5769 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5770 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5771 if (arm_is_imm12 (doffset)) {
5772 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5774 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5775 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5777 soffset += sizeof (gpointer);
5778 doffset += sizeof (gpointer);
5780 if (ainfo->vtsize) {
5781 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5782 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
5783 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
5785 } else if (ainfo->storage == RegTypeStructByAddr) {
5786 g_assert_not_reached ();
5787 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5788 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
5790 g_assert_not_reached ();
5795 if (method->save_lmf)
5796 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
5799 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5801 if (cfg->arch.seq_point_info_var) {
5802 MonoInst *ins = cfg->arch.seq_point_info_var;
5804 /* Initialize the variable from a GOT slot */
5805 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
5806 #ifdef USE_JUMP_TABLES
5808 gpointer *jte = mono_jumptable_add_entry ();
5809 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
5810 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
5812 /** XXX: is it correct? */
5814 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5816 *(gpointer*)code = NULL;
5819 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
5821 g_assert (ins->opcode == OP_REGOFFSET);
5823 if (arm_is_imm12 (ins->inst_offset)) {
5824 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5826 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5827 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5831 /* Initialize ss_trigger_page_var */
5832 if (!cfg->soft_breakpoints) {
5833 MonoInst *info_var = cfg->arch.seq_point_info_var;
5834 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
5835 int dreg = ARMREG_LR;
5838 g_assert (info_var->opcode == OP_REGOFFSET);
5839 g_assert (arm_is_imm12 (info_var->inst_offset));
5841 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
5842 /* Load the trigger page addr */
5843 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
5844 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
5848 if (cfg->arch.seq_point_read_var) {
5849 MonoInst *read_ins = cfg->arch.seq_point_read_var;
5850 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
5851 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
5852 #ifdef USE_JUMP_TABLES
5855 g_assert (read_ins->opcode == OP_REGOFFSET);
5856 g_assert (arm_is_imm12 (read_ins->inst_offset));
5857 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
5858 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
5859 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
5860 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
5862 #ifdef USE_JUMP_TABLES
5863 jte = mono_jumptable_add_entries (3);
5864 jte [0] = (gpointer)&ss_trigger_var;
5865 jte [1] = single_step_func_wrapper;
5866 jte [2] = breakpoint_func_wrapper;
5867 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
5869 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5871 *(volatile int **)code = &ss_trigger_var;
5873 *(gpointer*)code = single_step_func_wrapper;
5875 *(gpointer*)code = breakpoint_func_wrapper;
5879 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
5880 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
5881 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
5882 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
5883 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
5884 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
5887 cfg->code_len = code - cfg->native_code;
5888 g_assert (cfg->code_len < cfg->code_size);
5895 mono_arch_emit_epilog (MonoCompile *cfg)
5897 MonoMethod *method = cfg->method;
5898 int pos, i, rot_amount;
5899 int max_epilog_size = 16 + 20*4;
5903 if (cfg->method->save_lmf)
5904 max_epilog_size += 128;
5906 if (mono_jit_trace_calls != NULL)
5907 max_epilog_size += 50;
5909 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5910 max_epilog_size += 50;
5912 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5913 cfg->code_size *= 2;
5914 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5915 cfg->stat_code_reallocs++;
5919 * Keep in sync with OP_JMP
5921 code = cfg->native_code + cfg->code_len;
5923 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5924 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5928 /* Load returned vtypes into registers if needed */
5929 cinfo = cfg->arch.cinfo;
5930 if (cinfo->ret.storage == RegTypeStructByVal) {
5931 MonoInst *ins = cfg->ret;
5933 if (arm_is_imm12 (ins->inst_offset)) {
5934 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5936 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5937 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5941 if (method->save_lmf) {
5942 int lmf_offset, reg, sp_adj, regmask;
5943 /* all but r0-r3, sp and pc */
5944 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5947 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
5949 /* This points to r4 inside MonoLMF->iregs */
5950 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5952 regmask = 0x9ff0; /* restore lr to pc */
5953 /* Skip caller saved registers not used by the method */
5954 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
5955 regmask &= ~(1 << reg);
5960 /* Restored later */
5961 regmask &= ~(1 << ARMREG_PC);
5962 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
5963 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
5965 ARM_POP (code, regmask);
5967 /* Restore saved r7, restore LR to PC */
5968 /* Skip lr from the lmf */
5969 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
5970 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5973 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
5974 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
5976 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
5977 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
5981 /* Restore saved gregs */
5982 if (cfg->used_int_regs)
5983 ARM_POP (code, cfg->used_int_regs);
5984 /* Restore saved r7, restore LR to PC */
5985 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5987 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
5991 cfg->code_len = code - cfg->native_code;
5993 g_assert (cfg->code_len < cfg->code_size);
5998 mono_arch_emit_exceptions (MonoCompile *cfg)
6000 MonoJumpInfo *patch_info;
6003 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6004 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6005 int max_epilog_size = 50;
6007 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6008 exc_throw_pos [i] = NULL;
6009 exc_throw_found [i] = 0;
6012 /* count the number of exception infos */
6015 * make sure we have enough space for exceptions
6017 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6018 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6019 i = mini_exception_id_by_name (patch_info->data.target);
6020 if (!exc_throw_found [i]) {
6021 max_epilog_size += 32;
6022 exc_throw_found [i] = TRUE;
6027 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6028 cfg->code_size *= 2;
6029 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6030 cfg->stat_code_reallocs++;
6033 code = cfg->native_code + cfg->code_len;
6035 /* add code to raise exceptions */
6036 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6037 switch (patch_info->type) {
6038 case MONO_PATCH_INFO_EXC: {
6039 MonoClass *exc_class;
6040 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6042 i = mini_exception_id_by_name (patch_info->data.target);
6043 if (exc_throw_pos [i]) {
6044 arm_patch (ip, exc_throw_pos [i]);
6045 patch_info->type = MONO_PATCH_INFO_NONE;
6048 exc_throw_pos [i] = code;
6050 arm_patch (ip, code);
6052 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6053 g_assert (exc_class);
6055 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6056 #ifdef USE_JUMP_TABLES
6058 gpointer *jte = mono_jumptable_add_entries (2);
6059 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6060 patch_info->data.name = "mono_arch_throw_corlib_exception";
6061 patch_info->ip.i = code - cfg->native_code;
6062 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6063 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6064 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6065 ARM_BLX_REG (code, ARMREG_IP);
6066 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6069 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6070 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6071 patch_info->data.name = "mono_arch_throw_corlib_exception";
6072 patch_info->ip.i = code - cfg->native_code;
6074 *(guint32*)(gpointer)code = exc_class->type_token;
6085 cfg->code_len = code - cfg->native_code;
6087 g_assert (cfg->code_len < cfg->code_size);
6091 #endif /* #ifndef DISABLE_JIT */
6094 mono_arch_finish_init (void)
6096 lmf_tls_offset = mono_get_lmf_tls_offset ();
6097 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
6101 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6106 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6113 mono_arch_print_tree (MonoInst *tree, int arity)
6123 mono_arch_get_patch_offset (guint8 *code)
6130 mono_arch_flush_register_windows (void)
6134 #ifdef MONO_ARCH_HAVE_IMT
6139 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6141 int method_reg = mono_alloc_ireg (cfg);
6142 #ifdef USE_JUMP_TABLES
6143 int use_jumptables = TRUE;
6145 int use_jumptables = FALSE;
6148 if (cfg->compile_aot) {
6151 call->dynamic_imt_arg = TRUE;
6154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6156 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6157 ins->dreg = method_reg;
6158 ins->inst_p0 = call->method;
6159 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6160 MONO_ADD_INS (cfg->cbb, ins);
6162 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6163 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6164 /* Always pass in a register for simplicity */
6165 call->dynamic_imt_arg = TRUE;
6167 cfg->uses_rgctx_reg = TRUE;
6170 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6174 MONO_INST_NEW (cfg, ins, OP_PCONST);
6175 ins->inst_p0 = call->method;
6176 ins->dreg = method_reg;
6177 MONO_ADD_INS (cfg->cbb, ins);
6180 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6184 #endif /* DISABLE_JIT */
6187 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6189 #ifdef USE_JUMP_TABLES
6190 return (MonoMethod*)regs [ARMREG_V5];
6193 guint32 *code_ptr = (guint32*)code;
6195 method = GUINT_TO_POINTER (code_ptr [1]);
6199 return (MonoMethod*)regs [ARMREG_V5];
6201 /* The IMT value is stored in the code stream right after the LDC instruction. */
6202 /* This is no longer true for the gsharedvt_in trampoline */
6204 if (!IS_LDR_PC (code_ptr [0])) {
6205 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6206 g_assert (IS_LDR_PC (code_ptr [0]));
6210 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6211 return (MonoMethod*)regs [ARMREG_V5];
6213 return (MonoMethod*) method;
6218 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6220 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6223 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6224 #define BASE_SIZE (6 * 4)
6225 #define BSEARCH_ENTRY_SIZE (4 * 4)
6226 #define CMP_SIZE (3 * 4)
6227 #define BRANCH_SIZE (1 * 4)
6228 #define CALL_SIZE (2 * 4)
6229 #define WMC_SIZE (8 * 4)
6230 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6232 #ifdef USE_JUMP_TABLES
6234 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6236 g_assert (base [index] == NULL);
6237 base [index] = value;
6240 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6242 if (arm_is_imm12 (jti * 4)) {
6243 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6245 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6246 if ((jti * 4) >> 16)
6247 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6248 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6254 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6256 guint32 delta = DISTANCE (target, code);
6258 g_assert (delta >= 0 && delta <= 0xFFF);
6259 *target = *target | delta;
6265 #ifdef ENABLE_WRONG_METHOD_CHECK
6267 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6269 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6275 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6276 gpointer fail_tramp)
6279 arminstr_t *code, *start;
6280 #ifdef USE_JUMP_TABLES
6283 gboolean large_offsets = FALSE;
6284 guint32 **constant_pool_starts;
6285 arminstr_t *vtable_target = NULL;
6286 int extra_space = 0;
6288 #ifdef ENABLE_WRONG_METHOD_CHECK
6293 #ifdef USE_JUMP_TABLES
6294 for (i = 0; i < count; ++i) {
6295 MonoIMTCheckItem *item = imt_entries [i];
6296 item->chunk_size += 4 * 16;
6297 if (!item->is_equals)
6298 imt_entries [item->check_target_idx]->compare_done = TRUE;
6299 size += item->chunk_size;
6302 constant_pool_starts = g_new0 (guint32*, count);
6304 for (i = 0; i < count; ++i) {
6305 MonoIMTCheckItem *item = imt_entries [i];
6306 if (item->is_equals) {
6307 gboolean fail_case = !item->check_target_idx && fail_tramp;
6309 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6310 item->chunk_size += 32;
6311 large_offsets = TRUE;
6314 if (item->check_target_idx || fail_case) {
6315 if (!item->compare_done || fail_case)
6316 item->chunk_size += CMP_SIZE;
6317 item->chunk_size += BRANCH_SIZE;
6319 #ifdef ENABLE_WRONG_METHOD_CHECK
6320 item->chunk_size += WMC_SIZE;
6324 item->chunk_size += 16;
6325 large_offsets = TRUE;
6327 item->chunk_size += CALL_SIZE;
6329 item->chunk_size += BSEARCH_ENTRY_SIZE;
6330 imt_entries [item->check_target_idx]->compare_done = TRUE;
6332 size += item->chunk_size;
6336 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6340 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6342 code = mono_domain_code_reserve (domain, size);
6346 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6347 for (i = 0; i < count; ++i) {
6348 MonoIMTCheckItem *item = imt_entries [i];
6349 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6353 #ifdef USE_JUMP_TABLES
6354 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6355 /* If jumptables we always pass the IMT method in R5 */
6356 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6357 #define VTABLE_JTI 0
6358 #define IMT_METHOD_OFFSET 0
6359 #define TARGET_CODE_OFFSET 1
6360 #define JUMP_CODE_OFFSET 2
6361 #define RECORDS_PER_ENTRY 3
6362 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6363 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6364 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6366 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6367 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6368 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6369 set_jumptable_element (jte, VTABLE_JTI, vtable);
6372 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6374 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6375 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6376 vtable_target = code;
6377 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6379 if (mono_use_llvm) {
6380 /* LLVM always passes the IMT method in R5 */
6381 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6383 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6384 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6385 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6389 for (i = 0; i < count; ++i) {
6390 MonoIMTCheckItem *item = imt_entries [i];
6391 #ifdef USE_JUMP_TABLES
6392 guint32 imt_method_jti = 0, target_code_jti = 0;
6394 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6396 gint32 vtable_offset;
6398 item->code_target = (guint8*)code;
6400 if (item->is_equals) {
6401 gboolean fail_case = !item->check_target_idx && fail_tramp;
6403 if (item->check_target_idx || fail_case) {
6404 if (!item->compare_done || fail_case) {
6405 #ifdef USE_JUMP_TABLES
6406 imt_method_jti = IMT_METHOD_JTI (i);
6407 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6410 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6412 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6414 #ifdef USE_JUMP_TABLES
6415 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6416 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6417 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6419 item->jmp_code = (guint8*)code;
6420 ARM_B_COND (code, ARMCOND_NE, 0);
6423 /*Enable the commented code to assert on wrong method*/
6424 #ifdef ENABLE_WRONG_METHOD_CHECK
6425 #ifdef USE_JUMP_TABLES
6426 imt_method_jti = IMT_METHOD_JTI (i);
6427 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6430 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6432 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6434 ARM_B_COND (code, ARMCOND_EQ, 0);
6436 /* Define this if your system is so bad that gdb is failing. */
6437 #ifdef BROKEN_DEV_ENV
6438 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6440 arm_patch (code - 1, mini_dump_bad_imt);
6444 arm_patch (cond, code);
6448 if (item->has_target_code) {
6449 /* Load target address */
6450 #ifdef USE_JUMP_TABLES
6451 target_code_jti = TARGET_CODE_JTI (i);
6452 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6453 /* Restore registers */
6454 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6456 ARM_BX (code, ARMREG_R1);
6457 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6459 target_code_ins = code;
6460 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6461 /* Save it to the fourth slot */
6462 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6463 /* Restore registers and branch */
6464 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6466 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6469 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6470 if (!arm_is_imm12 (vtable_offset)) {
6472 * We need to branch to a computed address but we don't have
6473 * a free register to store it, since IP must contain the
6474 * vtable address. So we push the two values to the stack, and
6475 * load them both using LDM.
6477 /* Compute target address */
6478 #ifdef USE_JUMP_TABLES
6479 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6480 if (vtable_offset >> 16)
6481 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6482 /* IP had vtable base. */
6483 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6484 /* Restore registers and branch */
6485 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6486 ARM_BX (code, ARMREG_IP);
6488 vtable_offset_ins = code;
6489 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6490 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6491 /* Save it to the fourth slot */
6492 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6493 /* Restore registers and branch */
6494 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6496 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6499 #ifdef USE_JUMP_TABLES
6500 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6501 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6502 ARM_BX (code, ARMREG_IP);
6504 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6506 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6507 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6513 #ifdef USE_JUMP_TABLES
6514 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6515 target_code_jti = TARGET_CODE_JTI (i);
6516 /* Load target address */
6517 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6518 /* Restore registers */
6519 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6521 ARM_BX (code, ARMREG_R1);
6522 set_jumptable_element (jte, target_code_jti, fail_tramp);
6524 arm_patch (item->jmp_code, (guchar*)code);
6526 target_code_ins = code;
6527 /* Load target address */
6528 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6529 /* Save it to the fourth slot */
6530 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6531 /* Restore registers and branch */
6532 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6534 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6536 item->jmp_code = NULL;
6539 #ifdef USE_JUMP_TABLES
6541 set_jumptable_element (jte, imt_method_jti, item->key);
6544 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6546 /*must emit after unconditional branch*/
6547 if (vtable_target) {
6548 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6549 item->chunk_size += 4;
6550 vtable_target = NULL;
6553 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6554 constant_pool_starts [i] = code;
6556 code += extra_space;
6561 #ifdef USE_JUMP_TABLES
6562 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6563 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6564 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6565 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6566 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6568 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6569 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6571 item->jmp_code = (guint8*)code;
6572 ARM_B_COND (code, ARMCOND_HS, 0);
6578 for (i = 0; i < count; ++i) {
6579 MonoIMTCheckItem *item = imt_entries [i];
6580 if (item->jmp_code) {
6581 if (item->check_target_idx)
6582 #ifdef USE_JUMP_TABLES
6583 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6585 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6588 if (i > 0 && item->is_equals) {
6590 #ifdef USE_JUMP_TABLES
6591 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6592 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6594 arminstr_t *space_start = constant_pool_starts [i];
6595 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6596 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6604 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6605 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6610 #ifndef USE_JUMP_TABLES
6611 g_free (constant_pool_starts);
6614 mono_arch_flush_icache ((guint8*)start, size);
6615 mono_stats.imt_thunks_size += code - start;
6617 g_assert (DISTANCE (start, code) <= size);
6624 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6626 return ctx->regs [reg];
6630 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6632 ctx->regs [reg] = val;
6636 * mono_arch_get_trampolines:
6638 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6642 mono_arch_get_trampolines (gboolean aot)
6644 return mono_arm_get_exception_trampolines (aot);
6648 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6650 * mono_arch_set_breakpoint:
6652 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6653 * The location should contain code emitted by OP_SEQ_POINT.
6656 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6659 guint32 native_offset = ip - (guint8*)ji->code_start;
6660 MonoDebugOptions *opt = mini_get_debug_options ();
6662 if (opt->soft_breakpoints) {
6663 g_assert (!ji->from_aot);
6665 ARM_BLX_REG (code, ARMREG_LR);
6666 mono_arch_flush_icache (code - 4, 4);
6667 } else if (ji->from_aot) {
6668 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6670 g_assert (native_offset % 4 == 0);
6671 g_assert (info->bp_addrs [native_offset / 4] == 0);
6672 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6674 int dreg = ARMREG_LR;
6676 /* Read from another trigger page */
6677 #ifdef USE_JUMP_TABLES
6678 gpointer *jte = mono_jumptable_add_entry ();
6679 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6680 jte [0] = bp_trigger_page;
6682 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6684 *(int*)code = (int)bp_trigger_page;
6687 ARM_LDR_IMM (code, dreg, dreg, 0);
6689 mono_arch_flush_icache (code - 16, 16);
6692 /* This is currently implemented by emitting an SWI instruction, which
6693 * qemu/linux seems to convert to a SIGILL.
6695 *(int*)code = (0xef << 24) | 8;
6697 mono_arch_flush_icache (code - 4, 4);
6703 * mono_arch_clear_breakpoint:
6705 * Clear the breakpoint at IP.
6708 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6710 MonoDebugOptions *opt = mini_get_debug_options ();
6714 if (opt->soft_breakpoints) {
6715 g_assert (!ji->from_aot);
6718 mono_arch_flush_icache (code - 4, 4);
6719 } else if (ji->from_aot) {
6720 guint32 native_offset = ip - (guint8*)ji->code_start;
6721 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6723 g_assert (native_offset % 4 == 0);
6724 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6725 info->bp_addrs [native_offset / 4] = 0;
6727 for (i = 0; i < 4; ++i)
6730 mono_arch_flush_icache (ip, code - ip);
6735 * mono_arch_start_single_stepping:
6737 * Start single stepping.
6740 mono_arch_start_single_stepping (void)
6742 if (ss_trigger_page)
6743 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6749 * mono_arch_stop_single_stepping:
6751 * Stop single stepping.
6754 mono_arch_stop_single_stepping (void)
6756 if (ss_trigger_page)
6757 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6763 #define DBG_SIGNAL SIGBUS
6765 #define DBG_SIGNAL SIGSEGV
6769 * mono_arch_is_single_step_event:
6771 * Return whenever the machine state in SIGCTX corresponds to a single
6775 mono_arch_is_single_step_event (void *info, void *sigctx)
6777 siginfo_t *sinfo = info;
6779 if (!ss_trigger_page)
6782 /* Sometimes the address is off by 4 */
6783 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6790 * mono_arch_is_breakpoint_event:
6792 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
6795 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6797 siginfo_t *sinfo = info;
6799 if (!ss_trigger_page)
6802 if (sinfo->si_signo == DBG_SIGNAL) {
6803 /* Sometimes the address is off by 4 */
6804 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6814 * mono_arch_skip_breakpoint:
6816 * See mini-amd64.c for docs.
6819 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
6821 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6825 * mono_arch_skip_single_step:
6827 * See mini-amd64.c for docs.
6830 mono_arch_skip_single_step (MonoContext *ctx)
6832 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6835 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
6838 * mono_arch_get_seq_point_info:
6840 * See mini-amd64.c for docs.
6843 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6848 // FIXME: Add a free function
6850 mono_domain_lock (domain);
6851 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
6853 mono_domain_unlock (domain);
6856 ji = mono_jit_info_table_find (domain, (char*)code);
6859 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
6861 info->ss_trigger_page = ss_trigger_page;
6862 info->bp_trigger_page = bp_trigger_page;
6864 mono_domain_lock (domain);
6865 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
6867 mono_domain_unlock (domain);
6874 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
6876 ext->lmf.previous_lmf = prev_lmf;
6877 /* Mark that this is a MonoLMFExt */
6878 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
6879 ext->lmf.sp = (gssize)ext;
6883 * mono_arch_set_target:
6885 * Set the target architecture the JIT backend should generate code for, in the form
6886 * of a GNU target triplet. Only used in AOT mode.
6889 mono_arch_set_target (char *mtriple)
6891 /* The GNU target triple format is not very well documented */
6892 if (strstr (mtriple, "armv7")) {
6893 v5_supported = TRUE;
6894 v6_supported = TRUE;
6895 v7_supported = TRUE;
6897 if (strstr (mtriple, "armv6")) {
6898 v5_supported = TRUE;
6899 v6_supported = TRUE;
6901 if (strstr (mtriple, "armv7s")) {
6902 v7s_supported = TRUE;
6904 if (strstr (mtriple, "thumbv7s")) {
6905 v5_supported = TRUE;
6906 v6_supported = TRUE;
6907 v7_supported = TRUE;
6908 v7s_supported = TRUE;
6909 thumb_supported = TRUE;
6910 thumb2_supported = TRUE;
6912 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
6913 v5_supported = TRUE;
6914 v6_supported = TRUE;
6915 thumb_supported = TRUE;
6918 if (strstr (mtriple, "gnueabi"))
6919 eabi_supported = TRUE;
6922 #if defined(ENABLE_GSHAREDVT)
6924 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
6926 #endif /* !MONOTOUCH */