2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
93 static CRITICAL_SECTION mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
250 g_assert (dreg != sreg);
251 code = mono_arm_emit_load_imm (code, dreg, imm);
252 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
257 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
259 /* we can use r0-r3, since this is called only for incoming args on the stack */
260 if (size > sizeof (gpointer) * 4) {
262 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
263 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
264 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
265 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
266 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
267 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
268 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
269 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
270 ARM_B_COND (code, ARMCOND_NE, 0);
271 arm_patch (code - 4, start_loop);
274 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
275 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
277 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
278 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
284 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
285 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
286 doffset = soffset = 0;
288 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
289 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
295 g_assert (size == 0);
300 emit_call_reg (guint8 *code, int reg)
303 ARM_BLX_REG (code, reg);
305 #ifdef USE_JUMP_TABLES
306 g_assert_not_reached ();
308 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
312 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
318 emit_call_seq (MonoCompile *cfg, guint8 *code)
320 #ifdef USE_JUMP_TABLES
321 code = mono_arm_patchable_bl (code, ARMCOND_AL);
323 if (cfg->method->dynamic) {
324 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
326 *(gpointer*)code = NULL;
328 code = emit_call_reg (code, ARMREG_IP);
337 mono_arm_patchable_b (guint8 *code, int cond)
339 #ifdef USE_JUMP_TABLES
342 jte = mono_jumptable_add_entry ();
343 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
344 ARM_BX_COND (code, cond, ARMREG_IP);
346 ARM_B_COND (code, cond, 0);
352 mono_arm_patchable_bl (guint8 *code, int cond)
354 #ifdef USE_JUMP_TABLES
357 jte = mono_jumptable_add_entry ();
358 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
359 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
361 ARM_BL_COND (code, cond, 0);
366 #ifdef USE_JUMP_TABLES
368 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
370 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
371 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
376 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
378 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
379 ARM_LDR_IMM (code, reg, reg, 0);
385 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
387 switch (ins->opcode) {
390 case OP_FCALL_MEMBASE:
392 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
394 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
396 ARM_FMSR (code, ins->dreg, ARMREG_R0);
397 ARM_CVTS (code, ins->dreg, ins->dreg);
401 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
403 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
416 * Emit code to push an LMF structure on the LMF stack.
417 * On arm, this is intermixed with the initialization of other fields of the structure.
420 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
422 gboolean get_lmf_fast = FALSE;
425 #ifdef HAVE_AEABI_READ_TP
426 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
428 if (lmf_addr_tls_offset != -1) {
431 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
432 (gpointer)"__aeabi_read_tp");
433 code = emit_call_seq (cfg, code);
435 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
441 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
444 /* Inline mono_get_lmf_addr () */
445 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
447 /* Load mono_jit_tls_id */
449 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
450 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
452 *(gpointer*)code = NULL;
454 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
455 /* call pthread_getspecific () */
456 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
457 (gpointer)"pthread_getspecific");
458 code = emit_call_seq (cfg, code);
459 /* lmf_addr = &jit_tls->lmf */
460 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
461 g_assert (arm_is_imm8 (lmf_offset));
462 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
469 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
470 (gpointer)"mono_get_lmf_addr");
471 code = emit_call_seq (cfg, code);
473 /* we build the MonoLMF structure on the stack - see mini-arm.h */
474 /* lmf_offset is the offset from the previous stack pointer,
475 * alloc_size is the total stack space allocated, so the offset
476 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
477 * The pointer to the struct is put in r1 (new_lmf).
478 * ip is used as scratch
479 * The callee-saved registers are already in the MonoLMF structure
481 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
482 /* r0 is the result from mono_get_lmf_addr () */
483 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
484 /* new_lmf->previous_lmf = *lmf_addr */
485 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
486 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
487 /* *(lmf_addr) = r1 */
488 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
489 /* Skip method (only needed for trampoline LMF frames) */
490 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
491 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
492 /* save the current IP */
493 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
494 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
496 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
497 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
508 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
512 for (list = inst->float_args; list; list = list->next) {
513 FloatArgData *fad = list->data;
514 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
515 gboolean imm = arm_is_fpimm8 (var->inst_offset);
517 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
523 if (*offset + *max_len > cfg->code_size) {
524 cfg->code_size += *max_len;
525 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
527 code = cfg->native_code + *offset;
531 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
532 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
534 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
536 *offset = code - cfg->native_code;
543 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
547 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
549 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
552 if (!arm_is_fpimm8 (inst->inst_offset)) {
553 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
554 ARM_FSTD (code, reg, ARMREG_LR, 0);
556 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
563 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
567 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
569 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
572 if (!arm_is_fpimm8 (inst->inst_offset)) {
573 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
574 ARM_FLDD (code, reg, ARMREG_LR, 0);
576 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
585 * Emit code to pop an LMF structure from the LMF stack.
588 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
592 if (lmf_offset < 32) {
593 basereg = cfg->frame_reg;
598 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
601 /* ip = previous_lmf */
602 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
604 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
605 /* *(lmf_addr) = previous_lmf */
606 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
611 #endif /* #ifndef DISABLE_JIT */
614 * mono_arch_get_argument_info:
615 * @csig: a method signature
616 * @param_count: the number of parameters to consider
617 * @arg_info: an array to store the result infos
619 * Gathers information on parameters such as size, alignment and
620 * padding. arg_info should be large enought to hold param_count + 1 entries.
622 * Returns the size of the activation frame.
625 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
627 int k, frame_size = 0;
628 guint32 size, align, pad;
632 t = mini_type_get_underlying_type (gsctx, csig->ret);
633 if (MONO_TYPE_ISSTRUCT (t)) {
634 frame_size += sizeof (gpointer);
638 arg_info [0].offset = offset;
641 frame_size += sizeof (gpointer);
645 arg_info [0].size = frame_size;
647 for (k = 0; k < param_count; k++) {
648 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
650 /* ignore alignment for now */
653 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
654 arg_info [k].pad = pad;
656 arg_info [k + 1].pad = 0;
657 arg_info [k + 1].size = size;
659 arg_info [k + 1].offset = offset;
663 align = MONO_ARCH_FRAME_ALIGNMENT;
664 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
665 arg_info [k].pad = pad;
670 #define MAX_ARCH_DELEGATE_PARAMS 3
673 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
675 guint8 *code, *start;
678 start = code = mono_global_codeman_reserve (12);
680 /* Replace the this argument with the target */
681 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
682 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
683 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
685 g_assert ((code - start) <= 12);
687 mono_arch_flush_icache (start, 12);
691 size = 8 + param_count * 4;
692 start = code = mono_global_codeman_reserve (size);
694 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
695 /* slide down the arguments */
696 for (i = 0; i < param_count; ++i) {
697 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
699 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
701 g_assert ((code - start) <= size);
703 mono_arch_flush_icache (start, size);
707 *code_size = code - start;
713 * mono_arch_get_delegate_invoke_impls:
715 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
719 mono_arch_get_delegate_invoke_impls (void)
727 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
728 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
730 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
731 code = get_delegate_invoke_impl (FALSE, i, &code_len);
732 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
733 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
741 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
743 guint8 *code, *start;
746 /* FIXME: Support more cases */
747 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
748 if (MONO_TYPE_ISSTRUCT (sig_ret))
752 static guint8* cached = NULL;
753 mono_mini_arch_lock ();
755 mono_mini_arch_unlock ();
760 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
762 start = get_delegate_invoke_impl (TRUE, 0, NULL);
764 mono_mini_arch_unlock ();
767 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
770 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
772 for (i = 0; i < sig->param_count; ++i)
773 if (!mono_is_regsize_var (sig->params [i]))
776 mono_mini_arch_lock ();
777 code = cache [sig->param_count];
779 mono_mini_arch_unlock ();
784 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
785 start = mono_aot_get_trampoline (name);
788 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
790 cache [sig->param_count] = start;
791 mono_mini_arch_unlock ();
799 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
801 return (gpointer)regs [ARMREG_R0];
805 * Initialize the cpu to execute managed code.
808 mono_arch_cpu_init (void)
810 i8_align = MONO_ABI_ALIGNOF (gint64);
811 #ifdef MONO_CROSS_COMPILE
812 /* Need to set the alignment of i8 since it can different on the target */
813 #ifdef TARGET_ANDROID
815 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
821 create_function_wrapper (gpointer function)
823 guint8 *start, *code;
825 start = code = mono_global_codeman_reserve (96);
828 * Construct the MonoContext structure on the stack.
831 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
833 /* save ip, lr and pc into their correspodings ctx.regs slots. */
834 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
835 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
836 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
838 /* save r0..r10 and fp */
839 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
840 ARM_STM (code, ARMREG_IP, 0x0fff);
842 /* now we can update fp. */
843 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
845 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
846 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
847 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
848 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
850 /* make ctx.eip hold the address of the call. */
851 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
852 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
854 /* r0 now points to the MonoContext */
855 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
858 #ifdef USE_JUMP_TABLES
860 gpointer *jte = mono_jumptable_add_entry ();
861 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
865 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
867 *(gpointer*)code = function;
870 ARM_BLX_REG (code, ARMREG_IP);
872 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
873 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
874 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
875 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
877 /* make ip point to the regs array, then restore everything, including pc. */
878 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
879 ARM_LDM (code, ARMREG_IP, 0xffff);
881 mono_arch_flush_icache (start, code - start);
887 * Initialize architecture specific code.
890 mono_arch_init (void)
892 const char *cpu_arch;
894 InitializeCriticalSection (&mini_arch_mutex);
895 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
896 if (mini_get_debug_options ()->soft_breakpoints) {
897 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
898 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
903 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
904 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
905 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
908 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
909 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
910 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
911 #if defined(ENABLE_GSHAREDVT)
912 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
915 #if defined(__ARM_EABI__)
916 eabi_supported = TRUE;
919 #if defined(ARM_FPU_VFP_HARD)
920 arm_fpu = MONO_ARM_FPU_VFP_HARD;
922 arm_fpu = MONO_ARM_FPU_VFP;
924 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
925 /* If we're compiling with a soft float fallback and it
926 turns out that no VFP unit is available, we need to
927 switch to soft float. We don't do this for iOS, since
928 iOS devices always have a VFP unit. */
929 if (!mono_hwcap_arm_has_vfp)
930 arm_fpu = MONO_ARM_FPU_NONE;
934 v5_supported = mono_hwcap_arm_is_v5;
935 v6_supported = mono_hwcap_arm_is_v6;
936 v7_supported = mono_hwcap_arm_is_v7;
937 v7s_supported = mono_hwcap_arm_is_v7s;
939 #if defined(__APPLE__)
940 /* iOS is special-cased here because we don't yet
941 have a way to properly detect CPU features on it. */
942 thumb_supported = TRUE;
945 thumb_supported = mono_hwcap_arm_has_thumb;
946 thumb2_supported = mono_hwcap_arm_has_thumb2;
949 /* Format: armv(5|6|7[s])[-thumb[2]] */
950 cpu_arch = g_getenv ("MONO_CPU_ARCH");
952 /* Do this here so it overrides any detection. */
954 if (strncmp (cpu_arch, "armv", 4) == 0) {
955 v5_supported = cpu_arch [4] >= '5';
956 v6_supported = cpu_arch [4] >= '6';
957 v7_supported = cpu_arch [4] >= '7';
958 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
961 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
962 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
967 * Cleanup architecture specific code.
970 mono_arch_cleanup (void)
975 * This function returns the optimizations supported on this cpu.
978 mono_arch_cpu_optimizations (guint32 *exclude_mask)
980 /* no arm-specific optimizations yet */
986 * This function test for all SIMD functions supported.
988 * Returns a bitmask corresponding to all supported versions.
992 mono_arch_cpu_enumerate_simd_versions (void)
994 /* SIMD is currently unimplemented */
1002 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1004 if (v7s_supported) {
1018 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1020 mono_arch_is_soft_float (void)
1022 return arm_fpu == MONO_ARM_FPU_NONE;
1027 mono_arm_is_hard_float (void)
1029 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1033 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1036 t = mini_type_get_underlying_type (gsctx, t);
1043 case MONO_TYPE_FNPTR:
1045 case MONO_TYPE_OBJECT:
1046 case MONO_TYPE_STRING:
1047 case MONO_TYPE_CLASS:
1048 case MONO_TYPE_SZARRAY:
1049 case MONO_TYPE_ARRAY:
1051 case MONO_TYPE_GENERICINST:
1052 if (!mono_type_generic_inst_is_valuetype (t))
1055 case MONO_TYPE_VALUETYPE:
1062 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1067 for (i = 0; i < cfg->num_varinfo; i++) {
1068 MonoInst *ins = cfg->varinfo [i];
1069 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1072 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1075 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1078 /* we can only allocate 32 bit values */
1079 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1080 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1081 g_assert (i == vmv->idx);
1082 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1089 #define USE_EXTRA_TEMPS 0
1092 mono_arch_get_global_int_regs (MonoCompile *cfg)
1096 mono_arch_compute_omit_fp (cfg);
1099 * FIXME: Interface calls might go through a static rgctx trampoline which
1100 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1103 if (cfg->flags & MONO_CFG_HAS_CALLS)
1104 cfg->uses_rgctx_reg = TRUE;
1106 if (cfg->arch.omit_fp)
1107 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1108 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1109 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1110 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1112 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1113 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1116 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1117 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1118 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1119 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1120 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1126 * mono_arch_regalloc_cost:
1128 * Return the cost, in number of memory references, of the action of
1129 * allocating the variable VMV into a register during global register
1133 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1139 #endif /* #ifndef DISABLE_JIT */
1141 #ifndef __GNUC_PREREQ
1142 #define __GNUC_PREREQ(maj, min) (0)
1146 mono_arch_flush_icache (guint8 *code, gint size)
1148 #if defined(__native_client__)
1149 // For Native Client we don't have to flush i-cache here,
1150 // as it's being done by dyncode interface.
1153 #ifdef MONO_CROSS_COMPILE
1155 sys_icache_invalidate (code, size);
1156 #elif __GNUC_PREREQ(4, 1)
1157 __clear_cache (code, code + size);
1158 #elif defined(PLATFORM_ANDROID)
1159 const int syscall = 0xf0002;
1167 : "r" (code), "r" (code + size), "r" (syscall)
1168 : "r0", "r1", "r7", "r2"
1171 __asm __volatile ("mov r0, %0\n"
1174 "swi 0x9f0002 @ sys_cacheflush"
1176 : "r" (code), "r" (code + size), "r" (0)
1177 : "r0", "r1", "r3" );
1179 #endif /* !__native_client__ */
1190 RegTypeStructByAddr,
1191 /* gsharedvt argument passed by addr in greg */
1192 RegTypeGSharedVtInReg,
1193 /* gsharedvt argument passed by addr on stack */
1194 RegTypeGSharedVtOnStack,
1199 guint16 vtsize; /* in param area */
1203 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1208 guint32 stack_usage;
1209 gboolean vtype_retaddr;
1210 /* The index of the vret arg in the argument list */
1220 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1223 if (*gr > ARMREG_R3) {
1225 ainfo->offset = *stack_size;
1226 ainfo->reg = ARMREG_SP; /* in the caller */
1227 ainfo->storage = RegTypeBase;
1230 ainfo->storage = RegTypeGeneral;
1237 split = i8_align == 4;
1242 if (*gr == ARMREG_R3 && split) {
1243 /* first word in r3 and the second on the stack */
1244 ainfo->offset = *stack_size;
1245 ainfo->reg = ARMREG_SP; /* in the caller */
1246 ainfo->storage = RegTypeBaseGen;
1248 } else if (*gr >= ARMREG_R3) {
1249 if (eabi_supported) {
1250 /* darwin aligns longs to 4 byte only */
1251 if (i8_align == 8) {
1256 ainfo->offset = *stack_size;
1257 ainfo->reg = ARMREG_SP; /* in the caller */
1258 ainfo->storage = RegTypeBase;
1261 if (eabi_supported) {
1262 if (i8_align == 8 && ((*gr) & 1))
1265 ainfo->storage = RegTypeIRegPair;
1274 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1277 * If we're calling a function like this:
1279 * void foo(float a, double b, float c)
1281 * We pass a in s0 and b in d1. That leaves us
1282 * with s1 being unused. The armhf ABI recognizes
1283 * this and requires register assignment to then
1284 * use that for the next single-precision arg,
1285 * i.e. c in this example. So float_spare either
1286 * tells us which reg to use for the next single-
1287 * precision arg, or it's -1, meaning use *fpr.
1289 * Note that even though most of the JIT speaks
1290 * double-precision, fpr represents single-
1291 * precision registers.
1293 * See parts 5.5 and 6.1.2 of the AAPCS for how
1297 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1298 ainfo->storage = RegTypeFP;
1302 * If we're passing a double-precision value
1303 * and *fpr is odd (e.g. it's s1, s3, ...)
1304 * we need to use the next even register. So
1305 * we mark the current *fpr as a spare that
1306 * can be used for the next single-precision
1310 *float_spare = *fpr;
1315 * At this point, we have an even register
1316 * so we assign that and move along.
1320 } else if (*float_spare >= 0) {
1322 * We're passing a single-precision value
1323 * and it looks like a spare single-
1324 * precision register is available. Let's
1328 ainfo->reg = *float_spare;
1332 * If we hit this branch, we're passing a
1333 * single-precision value and we can simply
1334 * use the next available register.
1342 * We've exhausted available floating point
1343 * regs, so pass the rest on the stack.
1351 ainfo->offset = *stack_size;
1352 ainfo->reg = ARMREG_SP;
1353 ainfo->storage = RegTypeBase;
1360 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1362 guint i, gr, fpr, pstart;
1364 int n = sig->hasthis + sig->param_count;
1365 MonoType *simpletype;
1366 guint32 stack_size = 0;
1368 gboolean is_pinvoke = sig->pinvoke;
1372 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1374 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1381 t = mini_type_get_underlying_type (gsctx, sig->ret);
1382 if (MONO_TYPE_ISSTRUCT (t)) {
1385 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1386 cinfo->ret.storage = RegTypeStructByVal;
1388 cinfo->vtype_retaddr = TRUE;
1390 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1391 cinfo->vtype_retaddr = TRUE;
1397 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1398 * the first argument, allowing 'this' to be always passed in the first arg reg.
1399 * Also do this if the first argument is a reference type, since virtual calls
1400 * are sometimes made using calli without sig->hasthis set, like in the delegate
1403 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1405 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1407 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1411 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1412 cinfo->vret_arg_index = 1;
1416 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1420 if (cinfo->vtype_retaddr)
1421 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1424 DEBUG(printf("params: %d\n", sig->param_count));
1425 for (i = pstart; i < sig->param_count; ++i) {
1426 ArgInfo *ainfo = &cinfo->args [n];
1428 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1429 /* Prevent implicit arguments and sig_cookie from
1430 being passed in registers */
1433 /* Emit the signature cookie just before the implicit arguments */
1434 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1436 DEBUG(printf("param %d: ", i));
1437 if (sig->params [i]->byref) {
1438 DEBUG(printf("byref\n"));
1439 add_general (&gr, &stack_size, ainfo, TRUE);
1443 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1444 switch (simpletype->type) {
1445 case MONO_TYPE_BOOLEAN:
1448 cinfo->args [n].size = 1;
1449 add_general (&gr, &stack_size, ainfo, TRUE);
1452 case MONO_TYPE_CHAR:
1455 cinfo->args [n].size = 2;
1456 add_general (&gr, &stack_size, ainfo, TRUE);
1461 cinfo->args [n].size = 4;
1462 add_general (&gr, &stack_size, ainfo, TRUE);
1468 case MONO_TYPE_FNPTR:
1469 case MONO_TYPE_CLASS:
1470 case MONO_TYPE_OBJECT:
1471 case MONO_TYPE_STRING:
1472 case MONO_TYPE_SZARRAY:
1473 case MONO_TYPE_ARRAY:
1474 cinfo->args [n].size = sizeof (gpointer);
1475 add_general (&gr, &stack_size, ainfo, TRUE);
1478 case MONO_TYPE_GENERICINST:
1479 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1480 cinfo->args [n].size = sizeof (gpointer);
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1485 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1486 /* gsharedvt arguments are passed by ref */
1487 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1489 switch (ainfo->storage) {
1490 case RegTypeGeneral:
1491 ainfo->storage = RegTypeGSharedVtInReg;
1494 ainfo->storage = RegTypeGSharedVtOnStack;
1497 g_assert_not_reached ();
1503 case MONO_TYPE_TYPEDBYREF:
1504 case MONO_TYPE_VALUETYPE: {
1510 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1511 size = sizeof (MonoTypedRef);
1512 align = sizeof (gpointer);
1514 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1516 size = mono_class_native_size (klass, &align);
1518 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1520 DEBUG(printf ("load %d bytes struct\n", size));
1523 align_size += (sizeof (gpointer) - 1);
1524 align_size &= ~(sizeof (gpointer) - 1);
1525 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1526 ainfo->storage = RegTypeStructByVal;
1527 ainfo->struct_size = size;
1528 /* FIXME: align stack_size if needed */
1529 if (eabi_supported) {
1530 if (align >= 8 && (gr & 1))
1533 if (gr > ARMREG_R3) {
1535 ainfo->vtsize = nwords;
1537 int rest = ARMREG_R3 - gr + 1;
1538 int n_in_regs = rest >= nwords? nwords: rest;
1540 ainfo->size = n_in_regs;
1541 ainfo->vtsize = nwords - n_in_regs;
1544 nwords -= n_in_regs;
1546 if (sig->call_convention == MONO_CALL_VARARG)
1547 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1548 stack_size = ALIGN_TO (stack_size, align);
1549 ainfo->offset = stack_size;
1550 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1551 stack_size += nwords * sizeof (gpointer);
1558 add_general (&gr, &stack_size, ainfo, FALSE);
1565 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1567 add_general (&gr, &stack_size, ainfo, TRUE);
1575 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1577 add_general (&gr, &stack_size, ainfo, FALSE);
1582 case MONO_TYPE_MVAR:
1583 /* gsharedvt arguments are passed by ref */
1584 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1585 add_general (&gr, &stack_size, ainfo, TRUE);
1586 switch (ainfo->storage) {
1587 case RegTypeGeneral:
1588 ainfo->storage = RegTypeGSharedVtInReg;
1591 ainfo->storage = RegTypeGSharedVtOnStack;
1594 g_assert_not_reached ();
1599 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1603 /* Handle the case where there are no implicit arguments */
1604 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1605 /* Prevent implicit arguments and sig_cookie from
1606 being passed in registers */
1609 /* Emit the signature cookie just before the implicit arguments */
1610 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1614 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1615 switch (simpletype->type) {
1616 case MONO_TYPE_BOOLEAN:
1621 case MONO_TYPE_CHAR:
1627 case MONO_TYPE_FNPTR:
1628 case MONO_TYPE_CLASS:
1629 case MONO_TYPE_OBJECT:
1630 case MONO_TYPE_SZARRAY:
1631 case MONO_TYPE_ARRAY:
1632 case MONO_TYPE_STRING:
1633 cinfo->ret.storage = RegTypeGeneral;
1634 cinfo->ret.reg = ARMREG_R0;
1638 cinfo->ret.storage = RegTypeIRegPair;
1639 cinfo->ret.reg = ARMREG_R0;
1643 cinfo->ret.storage = RegTypeFP;
1645 if (IS_HARD_FLOAT) {
1646 cinfo->ret.reg = ARM_VFP_F0;
1648 cinfo->ret.reg = ARMREG_R0;
1652 case MONO_TYPE_GENERICINST:
1653 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1654 cinfo->ret.storage = RegTypeGeneral;
1655 cinfo->ret.reg = ARMREG_R0;
1658 // FIXME: Only for variable types
1659 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1660 cinfo->ret.storage = RegTypeStructByAddr;
1661 g_assert (cinfo->vtype_retaddr);
1665 case MONO_TYPE_VALUETYPE:
1666 case MONO_TYPE_TYPEDBYREF:
1667 if (cinfo->ret.storage != RegTypeStructByVal)
1668 cinfo->ret.storage = RegTypeStructByAddr;
1671 case MONO_TYPE_MVAR:
1672 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1673 cinfo->ret.storage = RegTypeStructByAddr;
1674 g_assert (cinfo->vtype_retaddr);
1676 case MONO_TYPE_VOID:
1679 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1683 /* align stack size to 8 */
1684 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1685 stack_size = (stack_size + 7) & ~7;
1687 cinfo->stack_usage = stack_size;
1693 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1695 MonoType *callee_ret;
1699 if (cfg->compile_aot && !cfg->full_aot)
1700 /* OP_TAILCALL doesn't work with AOT */
1703 c1 = get_call_info (NULL, NULL, caller_sig);
1704 c2 = get_call_info (NULL, NULL, callee_sig);
1707 * Tail calls with more callee stack usage than the caller cannot be supported, since
1708 * the extra stack space would be left on the stack after the tail call.
1710 res = c1->stack_usage >= c2->stack_usage;
1711 callee_ret = mini_replace_type (callee_sig->ret);
1712 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1713 /* An address on the callee's stack is passed as the first argument */
1716 if (c2->stack_usage > 16 * 4)
1728 debug_omit_fp (void)
1731 return mono_debug_count ();
1738 * mono_arch_compute_omit_fp:
1740 * Determine whenever the frame pointer can be eliminated.
1743 mono_arch_compute_omit_fp (MonoCompile *cfg)
1745 MonoMethodSignature *sig;
1746 MonoMethodHeader *header;
1750 if (cfg->arch.omit_fp_computed)
1753 header = cfg->header;
1755 sig = mono_method_signature (cfg->method);
1757 if (!cfg->arch.cinfo)
1758 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1759 cinfo = cfg->arch.cinfo;
1762 * FIXME: Remove some of the restrictions.
1764 cfg->arch.omit_fp = TRUE;
1765 cfg->arch.omit_fp_computed = TRUE;
1767 if (cfg->disable_omit_fp)
1768 cfg->arch.omit_fp = FALSE;
1769 if (!debug_omit_fp ())
1770 cfg->arch.omit_fp = FALSE;
1772 if (cfg->method->save_lmf)
1773 cfg->arch.omit_fp = FALSE;
1775 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1776 cfg->arch.omit_fp = FALSE;
1777 if (header->num_clauses)
1778 cfg->arch.omit_fp = FALSE;
1779 if (cfg->param_area)
1780 cfg->arch.omit_fp = FALSE;
1781 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1782 cfg->arch.omit_fp = FALSE;
1783 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1784 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1785 cfg->arch.omit_fp = FALSE;
1786 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1787 ArgInfo *ainfo = &cinfo->args [i];
1789 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1791 * The stack offset can only be determined when the frame
1794 cfg->arch.omit_fp = FALSE;
1799 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1800 MonoInst *ins = cfg->varinfo [i];
1803 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1808 * Set var information according to the calling convention. arm version.
1809 * The locals var stuff should most likely be split in another method.
1812 mono_arch_allocate_vars (MonoCompile *cfg)
1814 MonoMethodSignature *sig;
1815 MonoMethodHeader *header;
1818 int i, offset, size, align, curinst;
1822 sig = mono_method_signature (cfg->method);
1824 if (!cfg->arch.cinfo)
1825 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1826 cinfo = cfg->arch.cinfo;
1827 sig_ret = mini_replace_type (sig->ret);
1829 mono_arch_compute_omit_fp (cfg);
1831 if (cfg->arch.omit_fp)
1832 cfg->frame_reg = ARMREG_SP;
1834 cfg->frame_reg = ARMREG_FP;
1836 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1838 /* allow room for the vararg method args: void* and long/double */
1839 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1840 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1842 header = cfg->header;
1844 /* See mono_arch_get_global_int_regs () */
1845 if (cfg->flags & MONO_CFG_HAS_CALLS)
1846 cfg->uses_rgctx_reg = TRUE;
1848 if (cfg->frame_reg != ARMREG_SP)
1849 cfg->used_int_regs |= 1 << cfg->frame_reg;
1851 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1852 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1853 cfg->used_int_regs |= (1 << ARMREG_V5);
1857 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1858 if (sig_ret->type != MONO_TYPE_VOID) {
1859 cfg->ret->opcode = OP_REGVAR;
1860 cfg->ret->inst_c0 = ARMREG_R0;
1863 /* local vars are at a positive offset from the stack pointer */
1865 * also note that if the function uses alloca, we use FP
1866 * to point at the local variables.
1868 offset = 0; /* linkage area */
1869 /* align the offset to 16 bytes: not sure this is needed here */
1871 //offset &= ~(8 - 1);
1873 /* add parameter area size for called functions */
1874 offset += cfg->param_area;
1877 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1880 /* allow room to save the return value */
1881 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1884 /* the MonoLMF structure is stored just below the stack pointer */
1885 if (cinfo->ret.storage == RegTypeStructByVal) {
1886 cfg->ret->opcode = OP_REGOFFSET;
1887 cfg->ret->inst_basereg = cfg->frame_reg;
1888 offset += sizeof (gpointer) - 1;
1889 offset &= ~(sizeof (gpointer) - 1);
1890 cfg->ret->inst_offset = - offset;
1891 offset += sizeof(gpointer);
1892 } else if (cinfo->vtype_retaddr) {
1893 ins = cfg->vret_addr;
1894 offset += sizeof(gpointer) - 1;
1895 offset &= ~(sizeof(gpointer) - 1);
1896 ins->inst_offset = offset;
1897 ins->opcode = OP_REGOFFSET;
1898 ins->inst_basereg = cfg->frame_reg;
1899 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1900 printf ("vret_addr =");
1901 mono_print_ins (cfg->vret_addr);
1903 offset += sizeof(gpointer);
1906 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1907 if (cfg->arch.seq_point_info_var) {
1910 ins = cfg->arch.seq_point_info_var;
1914 offset += align - 1;
1915 offset &= ~(align - 1);
1916 ins->opcode = OP_REGOFFSET;
1917 ins->inst_basereg = cfg->frame_reg;
1918 ins->inst_offset = offset;
1921 ins = cfg->arch.ss_trigger_page_var;
1924 offset += align - 1;
1925 offset &= ~(align - 1);
1926 ins->opcode = OP_REGOFFSET;
1927 ins->inst_basereg = cfg->frame_reg;
1928 ins->inst_offset = offset;
1932 if (cfg->arch.seq_point_read_var) {
1935 ins = cfg->arch.seq_point_read_var;
1939 offset += align - 1;
1940 offset &= ~(align - 1);
1941 ins->opcode = OP_REGOFFSET;
1942 ins->inst_basereg = cfg->frame_reg;
1943 ins->inst_offset = offset;
1946 ins = cfg->arch.seq_point_ss_method_var;
1949 offset += align - 1;
1950 offset &= ~(align - 1);
1951 ins->opcode = OP_REGOFFSET;
1952 ins->inst_basereg = cfg->frame_reg;
1953 ins->inst_offset = offset;
1956 ins = cfg->arch.seq_point_bp_method_var;
1959 offset += align - 1;
1960 offset &= ~(align - 1);
1961 ins->opcode = OP_REGOFFSET;
1962 ins->inst_basereg = cfg->frame_reg;
1963 ins->inst_offset = offset;
1967 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1968 /* Allocate a temporary used by the atomic ops */
1972 /* Allocate a local slot to hold the sig cookie address */
1973 offset += align - 1;
1974 offset &= ~(align - 1);
1975 cfg->arch.atomic_tmp_offset = offset;
1978 cfg->arch.atomic_tmp_offset = -1;
1981 cfg->locals_min_stack_offset = offset;
1983 curinst = cfg->locals_start;
1984 for (i = curinst; i < cfg->num_varinfo; ++i) {
1987 ins = cfg->varinfo [i];
1988 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1991 t = ins->inst_vtype;
1992 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1995 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1996 * pinvoke wrappers when they call functions returning structure */
1997 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1998 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2002 size = mono_type_size (t, &align);
2004 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2005 * since it loads/stores misaligned words, which don't do the right thing.
2007 if (align < 4 && size >= 4)
2009 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2010 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2011 offset += align - 1;
2012 offset &= ~(align - 1);
2013 ins->opcode = OP_REGOFFSET;
2014 ins->inst_offset = offset;
2015 ins->inst_basereg = cfg->frame_reg;
2017 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2020 cfg->locals_max_stack_offset = offset;
2024 ins = cfg->args [curinst];
2025 if (ins->opcode != OP_REGVAR) {
2026 ins->opcode = OP_REGOFFSET;
2027 ins->inst_basereg = cfg->frame_reg;
2028 offset += sizeof (gpointer) - 1;
2029 offset &= ~(sizeof (gpointer) - 1);
2030 ins->inst_offset = offset;
2031 offset += sizeof (gpointer);
2036 if (sig->call_convention == MONO_CALL_VARARG) {
2040 /* Allocate a local slot to hold the sig cookie address */
2041 offset += align - 1;
2042 offset &= ~(align - 1);
2043 cfg->sig_cookie = offset;
2047 for (i = 0; i < sig->param_count; ++i) {
2048 ins = cfg->args [curinst];
2050 if (ins->opcode != OP_REGVAR) {
2051 ins->opcode = OP_REGOFFSET;
2052 ins->inst_basereg = cfg->frame_reg;
2053 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2055 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2056 * since it loads/stores misaligned words, which don't do the right thing.
2058 if (align < 4 && size >= 4)
2060 /* The code in the prolog () stores words when storing vtypes received in a register */
2061 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2063 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2064 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2065 offset += align - 1;
2066 offset &= ~(align - 1);
2067 ins->inst_offset = offset;
2073 /* align the offset to 8 bytes */
2074 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2075 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2080 cfg->stack_offset = offset;
2084 mono_arch_create_vars (MonoCompile *cfg)
2086 MonoMethodSignature *sig;
2090 sig = mono_method_signature (cfg->method);
2092 if (!cfg->arch.cinfo)
2093 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2094 cinfo = cfg->arch.cinfo;
2096 if (IS_HARD_FLOAT) {
2097 for (i = 0; i < 2; i++) {
2098 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2099 inst->flags |= MONO_INST_VOLATILE;
2101 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2105 if (cinfo->ret.storage == RegTypeStructByVal)
2106 cfg->ret_var_is_local = TRUE;
2108 if (cinfo->vtype_retaddr) {
2109 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2110 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2111 printf ("vret_addr = ");
2112 mono_print_ins (cfg->vret_addr);
2116 if (cfg->gen_seq_points) {
2117 if (cfg->soft_breakpoints) {
2118 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2119 ins->flags |= MONO_INST_VOLATILE;
2120 cfg->arch.seq_point_read_var = ins;
2122 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2123 ins->flags |= MONO_INST_VOLATILE;
2124 cfg->arch.seq_point_ss_method_var = ins;
2126 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2127 ins->flags |= MONO_INST_VOLATILE;
2128 cfg->arch.seq_point_bp_method_var = ins;
2130 g_assert (!cfg->compile_aot);
2131 } else if (cfg->compile_aot) {
2132 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2133 ins->flags |= MONO_INST_VOLATILE;
2134 cfg->arch.seq_point_info_var = ins;
2136 /* Allocate a separate variable for this to save 1 load per seq point */
2137 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2138 ins->flags |= MONO_INST_VOLATILE;
2139 cfg->arch.ss_trigger_page_var = ins;
2145 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2147 MonoMethodSignature *tmp_sig;
2150 if (call->tail_call)
2153 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2156 * mono_ArgIterator_Setup assumes the signature cookie is
2157 * passed first and all the arguments which were before it are
2158 * passed on the stack after the signature. So compensate by
2159 * passing a different signature.
2161 tmp_sig = mono_metadata_signature_dup (call->signature);
2162 tmp_sig->param_count -= call->signature->sentinelpos;
2163 tmp_sig->sentinelpos = 0;
2164 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2166 sig_reg = mono_alloc_ireg (cfg);
2167 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2169 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2174 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2179 LLVMCallInfo *linfo;
2181 n = sig->param_count + sig->hasthis;
2183 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2185 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2188 * LLVM always uses the native ABI while we use our own ABI, the
2189 * only difference is the handling of vtypes:
2190 * - we only pass/receive them in registers in some cases, and only
2191 * in 1 or 2 integer registers.
2193 if (cinfo->vtype_retaddr) {
2194 /* Vtype returned using a hidden argument */
2195 linfo->ret.storage = LLVMArgVtypeRetAddr;
2196 linfo->vret_arg_index = cinfo->vret_arg_index;
2197 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2198 cfg->exception_message = g_strdup ("unknown ret conv");
2199 cfg->disable_llvm = TRUE;
2203 for (i = 0; i < n; ++i) {
2204 ainfo = cinfo->args + i;
2206 linfo->args [i].storage = LLVMArgNone;
2208 switch (ainfo->storage) {
2209 case RegTypeGeneral:
2210 case RegTypeIRegPair:
2212 linfo->args [i].storage = LLVMArgInIReg;
2214 case RegTypeStructByVal:
2215 // FIXME: Passing entirely on the stack or split reg/stack
2216 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2217 linfo->args [i].storage = LLVMArgVtypeInReg;
2218 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2219 if (ainfo->size == 2)
2220 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2222 linfo->args [i].pair_storage [1] = LLVMArgNone;
2224 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2225 cfg->disable_llvm = TRUE;
2229 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2230 cfg->disable_llvm = TRUE;
2240 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2243 MonoMethodSignature *sig;
2247 sig = call->signature;
2248 n = sig->param_count + sig->hasthis;
2250 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2252 for (i = 0; i < n; ++i) {
2253 ArgInfo *ainfo = cinfo->args + i;
2256 if (i >= sig->hasthis)
2257 t = sig->params [i - sig->hasthis];
2259 t = &mono_defaults.int_class->byval_arg;
2260 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2262 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2263 /* Emit the signature cookie just before the implicit arguments */
2264 emit_sig_cookie (cfg, call, cinfo);
2267 in = call->args [i];
2269 switch (ainfo->storage) {
2270 case RegTypeGeneral:
2271 case RegTypeIRegPair:
2272 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2273 MONO_INST_NEW (cfg, ins, OP_MOVE);
2274 ins->dreg = mono_alloc_ireg (cfg);
2275 ins->sreg1 = in->dreg + 1;
2276 MONO_ADD_INS (cfg->cbb, ins);
2277 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2279 MONO_INST_NEW (cfg, ins, OP_MOVE);
2280 ins->dreg = mono_alloc_ireg (cfg);
2281 ins->sreg1 = in->dreg + 2;
2282 MONO_ADD_INS (cfg->cbb, ins);
2283 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2284 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2285 if (ainfo->size == 4) {
2286 if (IS_SOFT_FLOAT) {
2287 /* mono_emit_call_args () have already done the r8->r4 conversion */
2288 /* The converted value is in an int vreg */
2289 MONO_INST_NEW (cfg, ins, OP_MOVE);
2290 ins->dreg = mono_alloc_ireg (cfg);
2291 ins->sreg1 = in->dreg;
2292 MONO_ADD_INS (cfg->cbb, ins);
2293 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2297 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2298 creg = mono_alloc_ireg (cfg);
2299 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2300 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2303 if (IS_SOFT_FLOAT) {
2304 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2305 ins->dreg = mono_alloc_ireg (cfg);
2306 ins->sreg1 = in->dreg;
2307 MONO_ADD_INS (cfg->cbb, ins);
2308 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2310 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2311 ins->dreg = mono_alloc_ireg (cfg);
2312 ins->sreg1 = in->dreg;
2313 MONO_ADD_INS (cfg->cbb, ins);
2314 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2318 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2319 creg = mono_alloc_ireg (cfg);
2320 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2321 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2322 creg = mono_alloc_ireg (cfg);
2323 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2324 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2327 cfg->flags |= MONO_CFG_HAS_FPOUT;
2329 MONO_INST_NEW (cfg, ins, OP_MOVE);
2330 ins->dreg = mono_alloc_ireg (cfg);
2331 ins->sreg1 = in->dreg;
2332 MONO_ADD_INS (cfg->cbb, ins);
2334 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2337 case RegTypeStructByAddr:
2340 /* FIXME: where si the data allocated? */
2341 arg->backend.reg3 = ainfo->reg;
2342 call->used_iregs |= 1 << ainfo->reg;
2343 g_assert_not_reached ();
2346 case RegTypeStructByVal:
2347 case RegTypeGSharedVtInReg:
2348 case RegTypeGSharedVtOnStack:
2349 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2350 ins->opcode = OP_OUTARG_VT;
2351 ins->sreg1 = in->dreg;
2352 ins->klass = in->klass;
2353 ins->inst_p0 = call;
2354 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2355 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2356 mono_call_inst_add_outarg_vt (cfg, call, ins);
2357 MONO_ADD_INS (cfg->cbb, ins);
2360 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2362 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2363 if (t->type == MONO_TYPE_R8) {
2364 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2369 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2372 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2375 case RegTypeBaseGen:
2376 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2378 MONO_INST_NEW (cfg, ins, OP_MOVE);
2379 ins->dreg = mono_alloc_ireg (cfg);
2380 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2381 MONO_ADD_INS (cfg->cbb, ins);
2382 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2383 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2386 /* This should work for soft-float as well */
2388 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2389 creg = mono_alloc_ireg (cfg);
2390 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2391 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2392 creg = mono_alloc_ireg (cfg);
2393 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2395 cfg->flags |= MONO_CFG_HAS_FPOUT;
2397 g_assert_not_reached ();
2401 int fdreg = mono_alloc_freg (cfg);
2403 if (ainfo->size == 8) {
2404 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2405 ins->sreg1 = in->dreg;
2407 MONO_ADD_INS (cfg->cbb, ins);
2409 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2414 * Mono's register allocator doesn't speak single-precision registers that
2415 * overlap double-precision registers (i.e. armhf). So we have to work around
2416 * the register allocator and load the value from memory manually.
2418 * So we create a variable for the float argument and an instruction to store
2419 * the argument into the variable. We then store the list of these arguments
2420 * in cfg->float_args. This list is then used by emit_float_args later to
2421 * pass the arguments in the various call opcodes.
2423 * This is not very nice, and we should really try to fix the allocator.
2426 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2428 /* Make sure the instruction isn't seen as pointless and removed.
2430 float_arg->flags |= MONO_INST_VOLATILE;
2432 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2434 /* We use the dreg to look up the instruction later. The hreg is used to
2435 * emit the instruction that loads the value into the FP reg.
2437 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2438 fad->vreg = float_arg->dreg;
2439 fad->hreg = ainfo->reg;
2441 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2444 call->used_iregs |= 1 << ainfo->reg;
2445 cfg->flags |= MONO_CFG_HAS_FPOUT;
2449 g_assert_not_reached ();
2453 /* Handle the case where there are no implicit arguments */
2454 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2455 emit_sig_cookie (cfg, call, cinfo);
2457 if (cinfo->ret.storage == RegTypeStructByVal) {
2458 /* The JIT will transform this into a normal call */
2459 call->vret_in_reg = TRUE;
2460 } else if (cinfo->vtype_retaddr) {
2462 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2463 vtarg->sreg1 = call->vret_var->dreg;
2464 vtarg->dreg = mono_alloc_preg (cfg);
2465 MONO_ADD_INS (cfg->cbb, vtarg);
2467 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2470 call->stack_usage = cinfo->stack_usage;
2476 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2478 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2479 ArgInfo *ainfo = ins->inst_p1;
2480 int ovf_size = ainfo->vtsize;
2481 int doffset = ainfo->offset;
2482 int struct_size = ainfo->struct_size;
2483 int i, soffset, dreg, tmpreg;
2485 if (ainfo->storage == RegTypeGSharedVtInReg) {
2487 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2490 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2491 /* Pass by addr on stack */
2492 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2497 for (i = 0; i < ainfo->size; ++i) {
2498 dreg = mono_alloc_ireg (cfg);
2499 switch (struct_size) {
2501 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2504 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2507 tmpreg = mono_alloc_ireg (cfg);
2508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2510 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2511 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2513 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2514 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2517 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2520 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2521 soffset += sizeof (gpointer);
2522 struct_size -= sizeof (gpointer);
2524 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2526 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2530 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2532 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2535 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2538 if (COMPILE_LLVM (cfg)) {
2539 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2541 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2542 ins->sreg1 = val->dreg + 1;
2543 ins->sreg2 = val->dreg + 2;
2544 MONO_ADD_INS (cfg->cbb, ins);
2549 case MONO_ARM_FPU_NONE:
2550 if (ret->type == MONO_TYPE_R8) {
2553 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2554 ins->dreg = cfg->ret->dreg;
2555 ins->sreg1 = val->dreg;
2556 MONO_ADD_INS (cfg->cbb, ins);
2559 if (ret->type == MONO_TYPE_R4) {
2560 /* Already converted to an int in method_to_ir () */
2561 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2565 case MONO_ARM_FPU_VFP:
2566 case MONO_ARM_FPU_VFP_HARD:
2567 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2570 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2571 ins->dreg = cfg->ret->dreg;
2572 ins->sreg1 = val->dreg;
2573 MONO_ADD_INS (cfg->cbb, ins);
2578 g_assert_not_reached ();
2582 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2585 #endif /* #ifndef DISABLE_JIT */
2588 mono_arch_is_inst_imm (gint64 imm)
2594 MonoMethodSignature *sig;
2597 MonoType **param_types;
2601 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2605 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2608 switch (cinfo->ret.storage) {
2610 case RegTypeGeneral:
2611 case RegTypeIRegPair:
2612 case RegTypeStructByAddr:
2623 for (i = 0; i < cinfo->nargs; ++i) {
2624 ArgInfo *ainfo = &cinfo->args [i];
2627 switch (ainfo->storage) {
2628 case RegTypeGeneral:
2630 case RegTypeIRegPair:
2633 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2636 case RegTypeStructByVal:
2637 if (ainfo->size == 0)
2638 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2640 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2641 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2649 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2650 for (i = 0; i < sig->param_count; ++i) {
2651 MonoType *t = sig->params [i];
2677 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2679 ArchDynCallInfo *info;
2683 cinfo = get_call_info (NULL, NULL, sig);
2685 if (!dyn_call_supported (cinfo, sig)) {
2690 info = g_new0 (ArchDynCallInfo, 1);
2691 // FIXME: Preprocess the info to speed up start_dyn_call ()
2693 info->cinfo = cinfo;
2694 info->rtype = mini_replace_type (sig->ret);
2695 info->param_types = g_new0 (MonoType*, sig->param_count);
2696 for (i = 0; i < sig->param_count; ++i)
2697 info->param_types [i] = mini_replace_type (sig->params [i]);
2699 return (MonoDynCallInfo*)info;
2703 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2705 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2707 g_free (ainfo->cinfo);
2712 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2714 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2715 DynCallArgs *p = (DynCallArgs*)buf;
2716 int arg_index, greg, i, j, pindex;
2717 MonoMethodSignature *sig = dinfo->sig;
2719 g_assert (buf_len >= sizeof (DynCallArgs));
2728 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2729 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2734 if (dinfo->cinfo->vtype_retaddr)
2735 p->regs [greg ++] = (mgreg_t)ret;
2737 for (i = pindex; i < sig->param_count; i++) {
2738 MonoType *t = dinfo->param_types [i];
2739 gpointer *arg = args [arg_index ++];
2740 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2743 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2745 else if (ainfo->storage == RegTypeBase)
2746 slot = PARAM_REGS + (ainfo->offset / 4);
2748 g_assert_not_reached ();
2751 p->regs [slot] = (mgreg_t)*arg;
2756 case MONO_TYPE_STRING:
2757 case MONO_TYPE_CLASS:
2758 case MONO_TYPE_ARRAY:
2759 case MONO_TYPE_SZARRAY:
2760 case MONO_TYPE_OBJECT:
2764 p->regs [slot] = (mgreg_t)*arg;
2766 case MONO_TYPE_BOOLEAN:
2768 p->regs [slot] = *(guint8*)arg;
2771 p->regs [slot] = *(gint8*)arg;
2774 p->regs [slot] = *(gint16*)arg;
2777 case MONO_TYPE_CHAR:
2778 p->regs [slot] = *(guint16*)arg;
2781 p->regs [slot] = *(gint32*)arg;
2784 p->regs [slot] = *(guint32*)arg;
2788 p->regs [slot ++] = (mgreg_t)arg [0];
2789 p->regs [slot] = (mgreg_t)arg [1];
2792 p->regs [slot] = *(mgreg_t*)arg;
2795 p->regs [slot ++] = (mgreg_t)arg [0];
2796 p->regs [slot] = (mgreg_t)arg [1];
2798 case MONO_TYPE_GENERICINST:
2799 if (MONO_TYPE_IS_REFERENCE (t)) {
2800 p->regs [slot] = (mgreg_t)*arg;
2805 case MONO_TYPE_VALUETYPE:
2806 g_assert (ainfo->storage == RegTypeStructByVal);
2808 if (ainfo->size == 0)
2809 slot = PARAM_REGS + (ainfo->offset / 4);
2813 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2814 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2817 g_assert_not_reached ();
2823 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2825 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2826 MonoType *ptype = ainfo->rtype;
2827 guint8 *ret = ((DynCallArgs*)buf)->ret;
2828 mgreg_t res = ((DynCallArgs*)buf)->res;
2829 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2831 switch (ptype->type) {
2832 case MONO_TYPE_VOID:
2833 *(gpointer*)ret = NULL;
2835 case MONO_TYPE_STRING:
2836 case MONO_TYPE_CLASS:
2837 case MONO_TYPE_ARRAY:
2838 case MONO_TYPE_SZARRAY:
2839 case MONO_TYPE_OBJECT:
2843 *(gpointer*)ret = (gpointer)res;
2849 case MONO_TYPE_BOOLEAN:
2850 *(guint8*)ret = res;
2853 *(gint16*)ret = res;
2856 case MONO_TYPE_CHAR:
2857 *(guint16*)ret = res;
2860 *(gint32*)ret = res;
2863 *(guint32*)ret = res;
2867 /* This handles endianness as well */
2868 ((gint32*)ret) [0] = res;
2869 ((gint32*)ret) [1] = res2;
2871 case MONO_TYPE_GENERICINST:
2872 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2873 *(gpointer*)ret = (gpointer)res;
2878 case MONO_TYPE_VALUETYPE:
2879 g_assert (ainfo->cinfo->vtype_retaddr);
2884 *(float*)ret = *(float*)&res;
2886 case MONO_TYPE_R8: {
2893 *(double*)ret = *(double*)®s;
2897 g_assert_not_reached ();
2904 * Allow tracing to work with this interface (with an optional argument)
2908 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2912 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2913 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2914 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2915 code = emit_call_reg (code, ARMREG_R2);
2929 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2932 int save_mode = SAVE_NONE;
2934 MonoMethod *method = cfg->method;
2935 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2936 int rtype = ret_type->type;
2937 int save_offset = cfg->param_area;
2941 offset = code - cfg->native_code;
2942 /* we need about 16 instructions */
2943 if (offset > (cfg->code_size - 16 * 4)) {
2944 cfg->code_size *= 2;
2945 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2946 code = cfg->native_code + offset;
2949 case MONO_TYPE_VOID:
2950 /* special case string .ctor icall */
2951 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2952 save_mode = SAVE_ONE;
2954 save_mode = SAVE_NONE;
2958 save_mode = SAVE_TWO;
2962 save_mode = SAVE_ONE_FP;
2964 save_mode = SAVE_ONE;
2968 save_mode = SAVE_TWO_FP;
2970 save_mode = SAVE_TWO;
2972 case MONO_TYPE_GENERICINST:
2973 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2974 save_mode = SAVE_ONE;
2978 case MONO_TYPE_VALUETYPE:
2979 save_mode = SAVE_STRUCT;
2982 save_mode = SAVE_ONE;
2986 switch (save_mode) {
2988 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2989 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2990 if (enable_arguments) {
2991 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2992 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2996 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2997 if (enable_arguments) {
2998 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3002 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3003 if (enable_arguments) {
3004 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3008 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3009 if (enable_arguments) {
3010 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3014 if (enable_arguments) {
3015 /* FIXME: get the actual address */
3016 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3024 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3025 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3026 code = emit_call_reg (code, ARMREG_IP);
3028 switch (save_mode) {
3030 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3031 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3034 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3037 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3040 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3051 * The immediate field for cond branches is big enough for all reasonable methods
3053 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3054 if (0 && ins->inst_true_bb->native_offset) { \
3055 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3057 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3058 ARM_B_COND (code, (condcode), 0); \
3061 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3063 /* emit an exception if condition is fail
3065 * We assign the extra code used to throw the implicit exceptions
3066 * to cfg->bb_exit as far as the big branch handling is concerned
3068 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3070 mono_add_patch_info (cfg, code - cfg->native_code, \
3071 MONO_PATCH_INFO_EXC, exc_name); \
3072 ARM_BL_COND (code, (condcode), 0); \
3075 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3078 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3083 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3085 MonoInst *ins, *n, *last_ins = NULL;
3087 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3088 switch (ins->opcode) {
3091 /* Already done by an arch-independent pass */
3093 case OP_LOAD_MEMBASE:
3094 case OP_LOADI4_MEMBASE:
3096 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3097 * OP_LOAD_MEMBASE offset(basereg), reg
3099 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3100 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3101 ins->inst_basereg == last_ins->inst_destbasereg &&
3102 ins->inst_offset == last_ins->inst_offset) {
3103 if (ins->dreg == last_ins->sreg1) {
3104 MONO_DELETE_INS (bb, ins);
3107 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3108 ins->opcode = OP_MOVE;
3109 ins->sreg1 = last_ins->sreg1;
3113 * Note: reg1 must be different from the basereg in the second load
3114 * OP_LOAD_MEMBASE offset(basereg), reg1
3115 * OP_LOAD_MEMBASE offset(basereg), reg2
3117 * OP_LOAD_MEMBASE offset(basereg), reg1
3118 * OP_MOVE reg1, reg2
3120 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3121 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3122 ins->inst_basereg != last_ins->dreg &&
3123 ins->inst_basereg == last_ins->inst_basereg &&
3124 ins->inst_offset == last_ins->inst_offset) {
3126 if (ins->dreg == last_ins->dreg) {
3127 MONO_DELETE_INS (bb, ins);
3130 ins->opcode = OP_MOVE;
3131 ins->sreg1 = last_ins->dreg;
3134 //g_assert_not_reached ();
3138 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3139 * OP_LOAD_MEMBASE offset(basereg), reg
3141 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3142 * OP_ICONST reg, imm
3144 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3145 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3146 ins->inst_basereg == last_ins->inst_destbasereg &&
3147 ins->inst_offset == last_ins->inst_offset) {
3148 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3149 ins->opcode = OP_ICONST;
3150 ins->inst_c0 = last_ins->inst_imm;
3151 g_assert_not_reached (); // check this rule
3155 case OP_LOADU1_MEMBASE:
3156 case OP_LOADI1_MEMBASE:
3157 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3158 ins->inst_basereg == last_ins->inst_destbasereg &&
3159 ins->inst_offset == last_ins->inst_offset) {
3160 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3161 ins->sreg1 = last_ins->sreg1;
3164 case OP_LOADU2_MEMBASE:
3165 case OP_LOADI2_MEMBASE:
3166 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3167 ins->inst_basereg == last_ins->inst_destbasereg &&
3168 ins->inst_offset == last_ins->inst_offset) {
3169 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3170 ins->sreg1 = last_ins->sreg1;
3174 ins->opcode = OP_MOVE;
3178 if (ins->dreg == ins->sreg1) {
3179 MONO_DELETE_INS (bb, ins);
3183 * OP_MOVE sreg, dreg
3184 * OP_MOVE dreg, sreg
3186 if (last_ins && last_ins->opcode == OP_MOVE &&
3187 ins->sreg1 == last_ins->dreg &&
3188 ins->dreg == last_ins->sreg1) {
3189 MONO_DELETE_INS (bb, ins);
3197 bb->last_ins = last_ins;
3201 * the branch_cc_table should maintain the order of these
3215 branch_cc_table [] = {
3229 #define ADD_NEW_INS(cfg,dest,op) do { \
3230 MONO_INST_NEW ((cfg), (dest), (op)); \
3231 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3235 map_to_reg_reg_op (int op)
3244 case OP_COMPARE_IMM:
3246 case OP_ICOMPARE_IMM:
3260 case OP_LOAD_MEMBASE:
3261 return OP_LOAD_MEMINDEX;
3262 case OP_LOADI4_MEMBASE:
3263 return OP_LOADI4_MEMINDEX;
3264 case OP_LOADU4_MEMBASE:
3265 return OP_LOADU4_MEMINDEX;
3266 case OP_LOADU1_MEMBASE:
3267 return OP_LOADU1_MEMINDEX;
3268 case OP_LOADI2_MEMBASE:
3269 return OP_LOADI2_MEMINDEX;
3270 case OP_LOADU2_MEMBASE:
3271 return OP_LOADU2_MEMINDEX;
3272 case OP_LOADI1_MEMBASE:
3273 return OP_LOADI1_MEMINDEX;
3274 case OP_STOREI1_MEMBASE_REG:
3275 return OP_STOREI1_MEMINDEX;
3276 case OP_STOREI2_MEMBASE_REG:
3277 return OP_STOREI2_MEMINDEX;
3278 case OP_STOREI4_MEMBASE_REG:
3279 return OP_STOREI4_MEMINDEX;
3280 case OP_STORE_MEMBASE_REG:
3281 return OP_STORE_MEMINDEX;
3282 case OP_STORER4_MEMBASE_REG:
3283 return OP_STORER4_MEMINDEX;
3284 case OP_STORER8_MEMBASE_REG:
3285 return OP_STORER8_MEMINDEX;
3286 case OP_STORE_MEMBASE_IMM:
3287 return OP_STORE_MEMBASE_REG;
3288 case OP_STOREI1_MEMBASE_IMM:
3289 return OP_STOREI1_MEMBASE_REG;
3290 case OP_STOREI2_MEMBASE_IMM:
3291 return OP_STOREI2_MEMBASE_REG;
3292 case OP_STOREI4_MEMBASE_IMM:
3293 return OP_STOREI4_MEMBASE_REG;
3295 g_assert_not_reached ();
3299 * Remove from the instruction list the instructions that can't be
3300 * represented with very simple instructions with no register
3304 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3306 MonoInst *ins, *temp, *last_ins = NULL;
3307 int rot_amount, imm8, low_imm;
3309 MONO_BB_FOR_EACH_INS (bb, ins) {
3311 switch (ins->opcode) {
3315 case OP_COMPARE_IMM:
3316 case OP_ICOMPARE_IMM:
3330 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3331 ADD_NEW_INS (cfg, temp, OP_ICONST);
3332 temp->inst_c0 = ins->inst_imm;
3333 temp->dreg = mono_alloc_ireg (cfg);
3334 ins->sreg2 = temp->dreg;
3335 ins->opcode = mono_op_imm_to_op (ins->opcode);
3337 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3343 if (ins->inst_imm == 1) {
3344 ins->opcode = OP_MOVE;
3347 if (ins->inst_imm == 0) {
3348 ins->opcode = OP_ICONST;
3352 imm8 = mono_is_power_of_two (ins->inst_imm);
3354 ins->opcode = OP_SHL_IMM;
3355 ins->inst_imm = imm8;
3358 ADD_NEW_INS (cfg, temp, OP_ICONST);
3359 temp->inst_c0 = ins->inst_imm;
3360 temp->dreg = mono_alloc_ireg (cfg);
3361 ins->sreg2 = temp->dreg;
3362 ins->opcode = OP_IMUL;
3368 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3369 /* ARM sets the C flag to 1 if there was _no_ overflow */
3370 ins->next->opcode = OP_COND_EXC_NC;
3373 case OP_IDIV_UN_IMM:
3375 case OP_IREM_UN_IMM:
3376 ADD_NEW_INS (cfg, temp, OP_ICONST);
3377 temp->inst_c0 = ins->inst_imm;
3378 temp->dreg = mono_alloc_ireg (cfg);
3379 ins->sreg2 = temp->dreg;
3380 ins->opcode = mono_op_imm_to_op (ins->opcode);
3382 case OP_LOCALLOC_IMM:
3383 ADD_NEW_INS (cfg, temp, OP_ICONST);
3384 temp->inst_c0 = ins->inst_imm;
3385 temp->dreg = mono_alloc_ireg (cfg);
3386 ins->sreg1 = temp->dreg;
3387 ins->opcode = OP_LOCALLOC;
3389 case OP_LOAD_MEMBASE:
3390 case OP_LOADI4_MEMBASE:
3391 case OP_LOADU4_MEMBASE:
3392 case OP_LOADU1_MEMBASE:
3393 /* we can do two things: load the immed in a register
3394 * and use an indexed load, or see if the immed can be
3395 * represented as an ad_imm + a load with a smaller offset
3396 * that fits. We just do the first for now, optimize later.
3398 if (arm_is_imm12 (ins->inst_offset))
3400 ADD_NEW_INS (cfg, temp, OP_ICONST);
3401 temp->inst_c0 = ins->inst_offset;
3402 temp->dreg = mono_alloc_ireg (cfg);
3403 ins->sreg2 = temp->dreg;
3404 ins->opcode = map_to_reg_reg_op (ins->opcode);
3406 case OP_LOADI2_MEMBASE:
3407 case OP_LOADU2_MEMBASE:
3408 case OP_LOADI1_MEMBASE:
3409 if (arm_is_imm8 (ins->inst_offset))
3411 ADD_NEW_INS (cfg, temp, OP_ICONST);
3412 temp->inst_c0 = ins->inst_offset;
3413 temp->dreg = mono_alloc_ireg (cfg);
3414 ins->sreg2 = temp->dreg;
3415 ins->opcode = map_to_reg_reg_op (ins->opcode);
3417 case OP_LOADR4_MEMBASE:
3418 case OP_LOADR8_MEMBASE:
3419 if (arm_is_fpimm8 (ins->inst_offset))
3421 low_imm = ins->inst_offset & 0x1ff;
3422 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3423 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3424 temp->inst_imm = ins->inst_offset & ~0x1ff;
3425 temp->sreg1 = ins->inst_basereg;
3426 temp->dreg = mono_alloc_ireg (cfg);
3427 ins->inst_basereg = temp->dreg;
3428 ins->inst_offset = low_imm;
3432 ADD_NEW_INS (cfg, temp, OP_ICONST);
3433 temp->inst_c0 = ins->inst_offset;
3434 temp->dreg = mono_alloc_ireg (cfg);
3436 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3437 add_ins->sreg1 = ins->inst_basereg;
3438 add_ins->sreg2 = temp->dreg;
3439 add_ins->dreg = mono_alloc_ireg (cfg);
3441 ins->inst_basereg = add_ins->dreg;
3442 ins->inst_offset = 0;
3445 case OP_STORE_MEMBASE_REG:
3446 case OP_STOREI4_MEMBASE_REG:
3447 case OP_STOREI1_MEMBASE_REG:
3448 if (arm_is_imm12 (ins->inst_offset))
3450 ADD_NEW_INS (cfg, temp, OP_ICONST);
3451 temp->inst_c0 = ins->inst_offset;
3452 temp->dreg = mono_alloc_ireg (cfg);
3453 ins->sreg2 = temp->dreg;
3454 ins->opcode = map_to_reg_reg_op (ins->opcode);
3456 case OP_STOREI2_MEMBASE_REG:
3457 if (arm_is_imm8 (ins->inst_offset))
3459 ADD_NEW_INS (cfg, temp, OP_ICONST);
3460 temp->inst_c0 = ins->inst_offset;
3461 temp->dreg = mono_alloc_ireg (cfg);
3462 ins->sreg2 = temp->dreg;
3463 ins->opcode = map_to_reg_reg_op (ins->opcode);
3465 case OP_STORER4_MEMBASE_REG:
3466 case OP_STORER8_MEMBASE_REG:
3467 if (arm_is_fpimm8 (ins->inst_offset))
3469 low_imm = ins->inst_offset & 0x1ff;
3470 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3471 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3472 temp->inst_imm = ins->inst_offset & ~0x1ff;
3473 temp->sreg1 = ins->inst_destbasereg;
3474 temp->dreg = mono_alloc_ireg (cfg);
3475 ins->inst_destbasereg = temp->dreg;
3476 ins->inst_offset = low_imm;
3480 ADD_NEW_INS (cfg, temp, OP_ICONST);
3481 temp->inst_c0 = ins->inst_offset;
3482 temp->dreg = mono_alloc_ireg (cfg);
3484 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3485 add_ins->sreg1 = ins->inst_destbasereg;
3486 add_ins->sreg2 = temp->dreg;
3487 add_ins->dreg = mono_alloc_ireg (cfg);
3489 ins->inst_destbasereg = add_ins->dreg;
3490 ins->inst_offset = 0;
3493 case OP_STORE_MEMBASE_IMM:
3494 case OP_STOREI1_MEMBASE_IMM:
3495 case OP_STOREI2_MEMBASE_IMM:
3496 case OP_STOREI4_MEMBASE_IMM:
3497 ADD_NEW_INS (cfg, temp, OP_ICONST);
3498 temp->inst_c0 = ins->inst_imm;
3499 temp->dreg = mono_alloc_ireg (cfg);
3500 ins->sreg1 = temp->dreg;
3501 ins->opcode = map_to_reg_reg_op (ins->opcode);
3503 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3505 gboolean swap = FALSE;
3509 /* Optimized away */
3514 /* Some fp compares require swapped operands */
3515 switch (ins->next->opcode) {
3517 ins->next->opcode = OP_FBLT;
3521 ins->next->opcode = OP_FBLT_UN;
3525 ins->next->opcode = OP_FBGE;
3529 ins->next->opcode = OP_FBGE_UN;
3537 ins->sreg1 = ins->sreg2;
3546 bb->last_ins = last_ins;
3547 bb->max_vreg = cfg->next_vreg;
3551 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3555 if (long_ins->opcode == OP_LNEG) {
3557 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3558 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3564 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3566 /* sreg is a float, dreg is an integer reg */
3568 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3570 ARM_TOSIZD (code, vfp_scratch1, sreg);
3572 ARM_TOUIZD (code, vfp_scratch1, sreg);
3573 ARM_FMRS (code, dreg, vfp_scratch1);
3574 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3578 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3579 else if (size == 2) {
3580 ARM_SHL_IMM (code, dreg, dreg, 16);
3581 ARM_SHR_IMM (code, dreg, dreg, 16);
3585 ARM_SHL_IMM (code, dreg, dreg, 24);
3586 ARM_SAR_IMM (code, dreg, dreg, 24);
3587 } else if (size == 2) {
3588 ARM_SHL_IMM (code, dreg, dreg, 16);
3589 ARM_SAR_IMM (code, dreg, dreg, 16);
3595 #endif /* #ifndef DISABLE_JIT */
3599 const guchar *target;
3604 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3607 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3608 PatchData *pdata = (PatchData*)user_data;
3609 guchar *code = data;
3610 guint32 *thunks = data;
3611 guint32 *endthunks = (guint32*)(code + bsize);
3613 int difflow, diffhigh;
3615 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3616 difflow = (char*)pdata->code - (char*)thunks;
3617 diffhigh = (char*)pdata->code - (char*)endthunks;
3618 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3622 * The thunk is composed of 3 words:
3623 * load constant from thunks [2] into ARM_IP
3626 * Note that the LR register is already setup
3628 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3629 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3630 while (thunks < endthunks) {
3631 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3632 if (thunks [2] == (guint32)pdata->target) {
3633 arm_patch (pdata->code, (guchar*)thunks);
3634 mono_arch_flush_icache (pdata->code, 4);
3637 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3638 /* found a free slot instead: emit thunk */
3639 /* ARMREG_IP is fine to use since this can't be an IMT call
3642 code = (guchar*)thunks;
3643 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3644 if (thumb_supported)
3645 ARM_BX (code, ARMREG_IP);
3647 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3648 thunks [2] = (guint32)pdata->target;
3649 mono_arch_flush_icache ((guchar*)thunks, 12);
3651 arm_patch (pdata->code, (guchar*)thunks);
3652 mono_arch_flush_icache (pdata->code, 4);
3656 /* skip 12 bytes, the size of the thunk */
3660 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3666 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3671 domain = mono_domain_get ();
3674 pdata.target = target;
3675 pdata.absolute = absolute;
3679 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3682 if (pdata.found != 1) {
3683 mono_domain_lock (domain);
3684 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3687 /* this uses the first available slot */
3689 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3691 mono_domain_unlock (domain);
3694 if (pdata.found != 1) {
3696 GHashTableIter iter;
3697 MonoJitDynamicMethodInfo *ji;
3700 * This might be a dynamic method, search its code manager. We can only
3701 * use the dynamic method containing CODE, since the others might be freed later.
3705 mono_domain_lock (domain);
3706 hash = domain_jit_info (domain)->dynamic_code_hash;
3708 /* FIXME: Speed this up */
3709 g_hash_table_iter_init (&iter, hash);
3710 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3711 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3712 if (pdata.found == 1)
3716 mono_domain_unlock (domain);
3718 if (pdata.found != 1)
3719 g_print ("thunk failed for %p from %p\n", target, code);
3720 g_assert (pdata.found == 1);
3724 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3726 guint32 *code32 = (void*)code;
3727 guint32 ins = *code32;
3728 guint32 prim = (ins >> 25) & 7;
3729 guint32 tval = GPOINTER_TO_UINT (target);
3731 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3732 if (prim == 5) { /* 101b */
3733 /* the diff starts 8 bytes from the branch opcode */
3734 gint diff = target - code - 8;
3736 gint tmask = 0xffffffff;
3737 if (tval & 1) { /* entering thumb mode */
3738 diff = target - 1 - code - 8;
3739 g_assert (thumb_supported);
3740 tbits = 0xf << 28; /* bl->blx bit pattern */
3741 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3742 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3746 tmask = ~(1 << 24); /* clear the link bit */
3747 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3752 if (diff <= 33554431) {
3754 ins = (ins & 0xff000000) | diff;
3756 *code32 = ins | tbits;
3760 /* diff between 0 and -33554432 */
3761 if (diff >= -33554432) {
3763 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3765 *code32 = ins | tbits;
3770 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3774 #ifdef USE_JUMP_TABLES
3776 gpointer *jte = mono_jumptable_get_entry (code);
3778 jte [0] = (gpointer) target;
3782 * The alternative call sequences looks like this:
3784 * ldr ip, [pc] // loads the address constant
3785 * b 1f // jumps around the constant
3786 * address constant embedded in the code
3791 * There are two cases for patching:
3792 * a) at the end of method emission: in this case code points to the start
3793 * of the call sequence
3794 * b) during runtime patching of the call site: in this case code points
3795 * to the mov pc, ip instruction
3797 * We have to handle also the thunk jump code sequence:
3801 * address constant // execution never reaches here
3803 if ((ins & 0x0ffffff0) == 0x12fff10) {
3804 /* Branch and exchange: the address is constructed in a reg
3805 * We can patch BX when the code sequence is the following:
3806 * ldr ip, [pc, #0] ; 0x8
3813 guint8 *emit = (guint8*)ccode;
3814 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3816 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3817 ARM_BX (emit, ARMREG_IP);
3819 /*patching from magic trampoline*/
3820 if (ins == ccode [3]) {
3821 g_assert (code32 [-4] == ccode [0]);
3822 g_assert (code32 [-3] == ccode [1]);
3823 g_assert (code32 [-1] == ccode [2]);
3824 code32 [-2] = (guint32)target;
3827 /*patching from JIT*/
3828 if (ins == ccode [0]) {
3829 g_assert (code32 [1] == ccode [1]);
3830 g_assert (code32 [3] == ccode [2]);
3831 g_assert (code32 [4] == ccode [3]);
3832 code32 [2] = (guint32)target;
3835 g_assert_not_reached ();
3836 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3844 guint8 *emit = (guint8*)ccode;
3845 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3847 ARM_BLX_REG (emit, ARMREG_IP);
3849 g_assert (code32 [-3] == ccode [0]);
3850 g_assert (code32 [-2] == ccode [1]);
3851 g_assert (code32 [0] == ccode [2]);
3853 code32 [-1] = (guint32)target;
3856 guint32 *tmp = ccode;
3857 guint8 *emit = (guint8*)tmp;
3858 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3859 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3860 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3861 ARM_BX (emit, ARMREG_IP);
3862 if (ins == ccode [2]) {
3863 g_assert_not_reached (); // should be -2 ...
3864 code32 [-1] = (guint32)target;
3867 if (ins == ccode [0]) {
3868 /* handles both thunk jump code and the far call sequence */
3869 code32 [2] = (guint32)target;
3872 g_assert_not_reached ();
3874 // g_print ("patched with 0x%08x\n", ins);
3879 arm_patch (guchar *code, const guchar *target)
3881 arm_patch_general (NULL, code, target, NULL);
3885 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3886 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3887 * to be used with the emit macros.
3888 * Return -1 otherwise.
3891 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3894 for (i = 0; i < 31; i+= 2) {
3895 res = (val << (32 - i)) | (val >> i);
3898 *rot_amount = i? 32 - i: 0;
3905 * Emits in code a sequence of instructions that load the value 'val'
3906 * into the dreg register. Uses at most 4 instructions.
3909 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3911 int imm8, rot_amount;
3913 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3914 /* skip the constant pool */
3920 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3921 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3922 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3923 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3926 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3928 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3932 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3934 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3936 if (val & 0xFF0000) {
3937 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3939 if (val & 0xFF000000) {
3940 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3942 } else if (val & 0xFF00) {
3943 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3944 if (val & 0xFF0000) {
3945 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3947 if (val & 0xFF000000) {
3948 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3950 } else if (val & 0xFF0000) {
3951 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3952 if (val & 0xFF000000) {
3953 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3956 //g_assert_not_reached ();
3962 mono_arm_thumb_supported (void)
3964 return thumb_supported;
3970 * emit_load_volatile_arguments:
3972 * Load volatile arguments from the stack to the original input registers.
3973 * Required before a tail call.
3976 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3978 MonoMethod *method = cfg->method;
3979 MonoMethodSignature *sig;
3984 /* FIXME: Generate intermediate code instead */
3986 sig = mono_method_signature (method);
3988 /* This is the opposite of the code in emit_prolog */
3992 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3994 if (cinfo->vtype_retaddr) {
3995 ArgInfo *ainfo = &cinfo->ret;
3996 inst = cfg->vret_addr;
3997 g_assert (arm_is_imm12 (inst->inst_offset));
3998 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4000 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4001 ArgInfo *ainfo = cinfo->args + i;
4002 inst = cfg->args [pos];
4004 if (cfg->verbose_level > 2)
4005 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4006 if (inst->opcode == OP_REGVAR) {
4007 if (ainfo->storage == RegTypeGeneral)
4008 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4009 else if (ainfo->storage == RegTypeFP) {
4010 g_assert_not_reached ();
4011 } else if (ainfo->storage == RegTypeBase) {
4015 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4016 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4018 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4019 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4023 g_assert_not_reached ();
4025 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4026 switch (ainfo->size) {
4033 g_assert (arm_is_imm12 (inst->inst_offset));
4034 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4035 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4036 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4039 if (arm_is_imm12 (inst->inst_offset)) {
4040 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4042 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4043 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4047 } else if (ainfo->storage == RegTypeBaseGen) {
4050 } else if (ainfo->storage == RegTypeBase) {
4052 } else if (ainfo->storage == RegTypeFP) {
4053 g_assert_not_reached ();
4054 } else if (ainfo->storage == RegTypeStructByVal) {
4055 int doffset = inst->inst_offset;
4059 if (mono_class_from_mono_type (inst->inst_vtype))
4060 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4061 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4062 if (arm_is_imm12 (doffset)) {
4063 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4065 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4066 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4068 soffset += sizeof (gpointer);
4069 doffset += sizeof (gpointer);
4074 } else if (ainfo->storage == RegTypeStructByAddr) {
4089 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4094 guint8 *code = cfg->native_code + cfg->code_len;
4095 MonoInst *last_ins = NULL;
4096 guint last_offset = 0;
4098 int imm8, rot_amount;
4100 /* we don't align basic blocks of loops on arm */
4102 if (cfg->verbose_level > 2)
4103 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4105 cpos = bb->max_offset;
4107 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4108 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4109 //g_assert (!mono_compile_aot);
4112 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4113 /* this is not thread save, but good enough */
4114 /* fixme: howto handle overflows? */
4115 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4118 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4119 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4120 (gpointer)"mono_break");
4121 code = emit_call_seq (cfg, code);
4124 MONO_BB_FOR_EACH_INS (bb, ins) {
4125 offset = code - cfg->native_code;
4127 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4129 if (offset > (cfg->code_size - max_len - 16)) {
4130 cfg->code_size *= 2;
4131 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4132 code = cfg->native_code + offset;
4134 // if (ins->cil_code)
4135 // g_print ("cil code\n");
4136 mono_debug_record_line_number (cfg, ins, offset);
4138 switch (ins->opcode) {
4139 case OP_MEMORY_BARRIER:
4141 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4142 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4146 #ifdef HAVE_AEABI_READ_TP
4147 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4148 (gpointer)"__aeabi_read_tp");
4149 code = emit_call_seq (cfg, code);
4151 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4153 g_assert_not_reached ();
4156 case OP_ATOMIC_EXCHANGE_I4:
4157 case OP_ATOMIC_CAS_I4:
4158 case OP_ATOMIC_ADD_I4: {
4162 g_assert (v7_supported);
4165 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4167 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4169 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4173 g_assert (cfg->arch.atomic_tmp_offset != -1);
4174 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4176 switch (ins->opcode) {
4177 case OP_ATOMIC_EXCHANGE_I4:
4179 ARM_DMB (code, ARM_DMB_SY);
4180 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4181 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4182 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4184 ARM_B_COND (code, ARMCOND_NE, 0);
4185 arm_patch (buf [1], buf [0]);
4187 case OP_ATOMIC_CAS_I4:
4188 ARM_DMB (code, ARM_DMB_SY);
4190 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4191 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4193 ARM_B_COND (code, ARMCOND_NE, 0);
4194 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4195 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4197 ARM_B_COND (code, ARMCOND_NE, 0);
4198 arm_patch (buf [2], buf [0]);
4199 arm_patch (buf [1], code);
4201 case OP_ATOMIC_ADD_I4:
4203 ARM_DMB (code, ARM_DMB_SY);
4204 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4205 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4206 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4207 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4209 ARM_B_COND (code, ARMCOND_NE, 0);
4210 arm_patch (buf [1], buf [0]);
4213 g_assert_not_reached ();
4216 ARM_DMB (code, ARM_DMB_SY);
4217 if (tmpreg != ins->dreg)
4218 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4219 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4224 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4225 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4228 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4229 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4231 case OP_STOREI1_MEMBASE_IMM:
4232 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4233 g_assert (arm_is_imm12 (ins->inst_offset));
4234 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4236 case OP_STOREI2_MEMBASE_IMM:
4237 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4238 g_assert (arm_is_imm8 (ins->inst_offset));
4239 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4241 case OP_STORE_MEMBASE_IMM:
4242 case OP_STOREI4_MEMBASE_IMM:
4243 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4244 g_assert (arm_is_imm12 (ins->inst_offset));
4245 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4247 case OP_STOREI1_MEMBASE_REG:
4248 g_assert (arm_is_imm12 (ins->inst_offset));
4249 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4251 case OP_STOREI2_MEMBASE_REG:
4252 g_assert (arm_is_imm8 (ins->inst_offset));
4253 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4255 case OP_STORE_MEMBASE_REG:
4256 case OP_STOREI4_MEMBASE_REG:
4257 /* this case is special, since it happens for spill code after lowering has been called */
4258 if (arm_is_imm12 (ins->inst_offset)) {
4259 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4261 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4262 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4265 case OP_STOREI1_MEMINDEX:
4266 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4268 case OP_STOREI2_MEMINDEX:
4269 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4271 case OP_STORE_MEMINDEX:
4272 case OP_STOREI4_MEMINDEX:
4273 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4276 g_assert_not_reached ();
4278 case OP_LOAD_MEMINDEX:
4279 case OP_LOADI4_MEMINDEX:
4280 case OP_LOADU4_MEMINDEX:
4281 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4283 case OP_LOADI1_MEMINDEX:
4284 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4286 case OP_LOADU1_MEMINDEX:
4287 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4289 case OP_LOADI2_MEMINDEX:
4290 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4292 case OP_LOADU2_MEMINDEX:
4293 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4295 case OP_LOAD_MEMBASE:
4296 case OP_LOADI4_MEMBASE:
4297 case OP_LOADU4_MEMBASE:
4298 /* this case is special, since it happens for spill code after lowering has been called */
4299 if (arm_is_imm12 (ins->inst_offset)) {
4300 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4302 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4303 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4306 case OP_LOADI1_MEMBASE:
4307 g_assert (arm_is_imm8 (ins->inst_offset));
4308 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4310 case OP_LOADU1_MEMBASE:
4311 g_assert (arm_is_imm12 (ins->inst_offset));
4312 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4314 case OP_LOADU2_MEMBASE:
4315 g_assert (arm_is_imm8 (ins->inst_offset));
4316 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4318 case OP_LOADI2_MEMBASE:
4319 g_assert (arm_is_imm8 (ins->inst_offset));
4320 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4322 case OP_ICONV_TO_I1:
4323 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4324 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4326 case OP_ICONV_TO_I2:
4327 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4328 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4330 case OP_ICONV_TO_U1:
4331 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4333 case OP_ICONV_TO_U2:
4334 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4335 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4339 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4341 case OP_COMPARE_IMM:
4342 case OP_ICOMPARE_IMM:
4343 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4344 g_assert (imm8 >= 0);
4345 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4349 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4350 * So instead of emitting a trap, we emit a call a C function and place a
4353 //*(int*)code = 0xef9f0001;
4356 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4357 (gpointer)"mono_break");
4358 code = emit_call_seq (cfg, code);
4360 case OP_RELAXED_NOP:
4365 case OP_DUMMY_STORE:
4366 case OP_DUMMY_ICONST:
4367 case OP_DUMMY_R8CONST:
4368 case OP_NOT_REACHED:
4371 case OP_SEQ_POINT: {
4373 MonoInst *info_var = cfg->arch.seq_point_info_var;
4374 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4375 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4376 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4377 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4379 int dreg = ARMREG_LR;
4381 if (cfg->soft_breakpoints) {
4382 g_assert (!cfg->compile_aot);
4386 * For AOT, we use one got slot per method, which will point to a
4387 * SeqPointInfo structure, containing all the information required
4388 * by the code below.
4390 if (cfg->compile_aot) {
4391 g_assert (info_var);
4392 g_assert (info_var->opcode == OP_REGOFFSET);
4393 g_assert (arm_is_imm12 (info_var->inst_offset));
4396 if (!cfg->soft_breakpoints) {
4398 * Read from the single stepping trigger page. This will cause a
4399 * SIGSEGV when single stepping is enabled.
4400 * We do this _before_ the breakpoint, so single stepping after
4401 * a breakpoint is hit will step to the next IL offset.
4403 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4406 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4407 if (cfg->soft_breakpoints) {
4408 /* Load the address of the sequence point trigger variable. */
4411 g_assert (var->opcode == OP_REGOFFSET);
4412 g_assert (arm_is_imm12 (var->inst_offset));
4413 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4415 /* Read the value and check whether it is non-zero. */
4416 ARM_LDR_IMM (code, dreg, dreg, 0);
4417 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4419 /* Load the address of the sequence point method. */
4420 var = ss_method_var;
4422 g_assert (var->opcode == OP_REGOFFSET);
4423 g_assert (arm_is_imm12 (var->inst_offset));
4424 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4426 /* Call it conditionally. */
4427 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4429 if (cfg->compile_aot) {
4430 /* Load the trigger page addr from the variable initialized in the prolog */
4431 var = ss_trigger_page_var;
4433 g_assert (var->opcode == OP_REGOFFSET);
4434 g_assert (arm_is_imm12 (var->inst_offset));
4435 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4437 #ifdef USE_JUMP_TABLES
4438 gpointer *jte = mono_jumptable_add_entry ();
4439 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4440 jte [0] = ss_trigger_page;
4442 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4444 *(int*)code = (int)ss_trigger_page;
4448 ARM_LDR_IMM (code, dreg, dreg, 0);
4452 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4454 if (cfg->soft_breakpoints) {
4455 /* Load the address of the breakpoint method into ip. */
4456 var = bp_method_var;
4458 g_assert (var->opcode == OP_REGOFFSET);
4459 g_assert (arm_is_imm12 (var->inst_offset));
4460 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4463 * A placeholder for a possible breakpoint inserted by
4464 * mono_arch_set_breakpoint ().
4467 } else if (cfg->compile_aot) {
4468 guint32 offset = code - cfg->native_code;
4471 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4472 /* Add the offset */
4473 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4474 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4475 if (arm_is_imm12 ((int)val)) {
4476 ARM_LDR_IMM (code, dreg, dreg, val);
4478 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4480 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4482 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4483 g_assert (!(val & 0xFF000000));
4485 ARM_LDR_IMM (code, dreg, dreg, 0);
4487 /* What is faster, a branch or a load ? */
4488 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4489 /* The breakpoint instruction */
4490 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4493 * A placeholder for a possible breakpoint inserted by
4494 * mono_arch_set_breakpoint ().
4496 for (i = 0; i < 4; ++i)
4503 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4506 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4510 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4513 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4514 g_assert (imm8 >= 0);
4515 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4519 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4520 g_assert (imm8 >= 0);
4521 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4525 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4526 g_assert (imm8 >= 0);
4527 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4530 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4531 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4533 case OP_IADD_OVF_UN:
4534 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4535 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4538 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4539 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4541 case OP_ISUB_OVF_UN:
4542 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4543 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4545 case OP_ADD_OVF_CARRY:
4546 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4547 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4549 case OP_ADD_OVF_UN_CARRY:
4550 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4551 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4553 case OP_SUB_OVF_CARRY:
4554 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4555 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4557 case OP_SUB_OVF_UN_CARRY:
4558 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4559 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4563 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4566 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4567 g_assert (imm8 >= 0);
4568 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4571 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4575 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4579 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4580 g_assert (imm8 >= 0);
4581 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4585 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4586 g_assert (imm8 >= 0);
4587 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4589 case OP_ARM_RSBS_IMM:
4590 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4591 g_assert (imm8 >= 0);
4592 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4594 case OP_ARM_RSC_IMM:
4595 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4596 g_assert (imm8 >= 0);
4597 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4600 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4604 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4605 g_assert (imm8 >= 0);
4606 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4609 g_assert (v7s_supported);
4610 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4613 g_assert (v7s_supported);
4614 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4617 g_assert (v7s_supported);
4618 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4619 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4622 g_assert (v7s_supported);
4623 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4624 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4628 g_assert_not_reached ();
4630 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4634 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4635 g_assert (imm8 >= 0);
4636 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4639 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4643 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4644 g_assert (imm8 >= 0);
4645 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4648 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4653 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4654 else if (ins->dreg != ins->sreg1)
4655 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4658 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4663 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4664 else if (ins->dreg != ins->sreg1)
4665 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4668 case OP_ISHR_UN_IMM:
4670 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4671 else if (ins->dreg != ins->sreg1)
4672 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4675 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4678 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4681 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4684 if (ins->dreg == ins->sreg2)
4685 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4687 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4690 g_assert_not_reached ();
4693 /* FIXME: handle ovf/ sreg2 != dreg */
4694 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4695 /* FIXME: MUL doesn't set the C/O flags on ARM */
4697 case OP_IMUL_OVF_UN:
4698 /* FIXME: handle ovf/ sreg2 != dreg */
4699 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4700 /* FIXME: MUL doesn't set the C/O flags on ARM */
4703 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4706 /* Load the GOT offset */
4707 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4708 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4710 *(gpointer*)code = NULL;
4712 /* Load the value from the GOT */
4713 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4715 case OP_OBJC_GET_SELECTOR:
4716 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4717 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4719 *(gpointer*)code = NULL;
4721 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4723 case OP_ICONV_TO_I4:
4724 case OP_ICONV_TO_U4:
4726 if (ins->dreg != ins->sreg1)
4727 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4730 int saved = ins->sreg2;
4731 if (ins->sreg2 == ARM_LSW_REG) {
4732 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4735 if (ins->sreg1 != ARM_LSW_REG)
4736 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4737 if (saved != ARM_MSW_REG)
4738 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4743 ARM_CPYD (code, ins->dreg, ins->sreg1);
4745 case OP_FCONV_TO_R4:
4747 ARM_CVTD (code, ins->dreg, ins->sreg1);
4748 ARM_CVTS (code, ins->dreg, ins->dreg);
4753 * Keep in sync with mono_arch_emit_epilog
4755 g_assert (!cfg->method->save_lmf);
4757 code = emit_load_volatile_arguments (cfg, code);
4759 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4761 if (cfg->used_int_regs)
4762 ARM_POP (code, cfg->used_int_regs);
4763 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4765 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4767 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4768 if (cfg->compile_aot) {
4769 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4771 *(gpointer*)code = NULL;
4773 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4775 code = mono_arm_patchable_b (code, ARMCOND_AL);
4779 MonoCallInst *call = (MonoCallInst*)ins;
4782 * The stack looks like the following:
4783 * <caller argument area>
4786 * <callee argument area>
4787 * Need to copy the arguments from the callee argument area to
4788 * the caller argument area, and pop the frame.
4790 if (call->stack_usage) {
4791 int i, prev_sp_offset = 0;
4793 /* Compute size of saved registers restored below */
4795 prev_sp_offset = 2 * 4;
4797 prev_sp_offset = 1 * 4;
4798 for (i = 0; i < 16; ++i) {
4799 if (cfg->used_int_regs & (1 << i))
4800 prev_sp_offset += 4;
4803 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4805 /* Copy arguments on the stack to our argument area */
4806 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4807 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4808 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4813 * Keep in sync with mono_arch_emit_epilog
4815 g_assert (!cfg->method->save_lmf);
4817 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4819 if (cfg->used_int_regs)
4820 ARM_POP (code, cfg->used_int_regs);
4821 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4823 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4826 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4827 if (cfg->compile_aot) {
4828 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4830 *(gpointer*)code = NULL;
4832 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4834 code = mono_arm_patchable_b (code, ARMCOND_AL);
4839 /* ensure ins->sreg1 is not NULL */
4840 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4843 g_assert (cfg->sig_cookie < 128);
4844 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4845 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4854 call = (MonoCallInst*)ins;
4857 code = emit_float_args (cfg, call, code, &max_len, &offset);
4859 if (ins->flags & MONO_INST_HAS_METHOD)
4860 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4862 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4863 code = emit_call_seq (cfg, code);
4864 ins->flags |= MONO_INST_GC_CALLSITE;
4865 ins->backend.pc_offset = code - cfg->native_code;
4866 code = emit_move_return_value (cfg, ins, code);
4872 case OP_VOIDCALL_REG:
4875 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4877 code = emit_call_reg (code, ins->sreg1);
4878 ins->flags |= MONO_INST_GC_CALLSITE;
4879 ins->backend.pc_offset = code - cfg->native_code;
4880 code = emit_move_return_value (cfg, ins, code);
4882 case OP_FCALL_MEMBASE:
4883 case OP_LCALL_MEMBASE:
4884 case OP_VCALL_MEMBASE:
4885 case OP_VCALL2_MEMBASE:
4886 case OP_VOIDCALL_MEMBASE:
4887 case OP_CALL_MEMBASE: {
4888 gboolean imt_arg = FALSE;
4890 g_assert (ins->sreg1 != ARMREG_LR);
4891 call = (MonoCallInst*)ins;
4894 code = emit_float_args (cfg, call, code, &max_len, &offset);
4896 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4898 if (!arm_is_imm12 (ins->inst_offset))
4899 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4900 #ifdef USE_JUMP_TABLES
4906 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4908 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4910 if (!arm_is_imm12 (ins->inst_offset))
4911 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4913 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4916 * We can't embed the method in the code stream in PIC code, or
4918 * Instead, we put it in V5 in code emitted by
4919 * mono_arch_emit_imt_argument (), and embed NULL here to
4920 * signal the IMT thunk that the value is in V5.
4922 #ifdef USE_JUMP_TABLES
4923 /* In case of jumptables we always use value in V5. */
4926 if (call->dynamic_imt_arg)
4927 *((gpointer*)code) = NULL;
4929 *((gpointer*)code) = (gpointer)call->method;
4933 ins->flags |= MONO_INST_GC_CALLSITE;
4934 ins->backend.pc_offset = code - cfg->native_code;
4935 code = emit_move_return_value (cfg, ins, code);
4939 /* keep alignment */
4940 int alloca_waste = cfg->param_area;
4943 /* round the size to 8 bytes */
4944 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4945 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4947 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4948 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4949 /* memzero the area: dreg holds the size, sp is the pointer */
4950 if (ins->flags & MONO_INST_INIT) {
4951 guint8 *start_loop, *branch_to_cond;
4952 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4953 branch_to_cond = code;
4956 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4957 arm_patch (branch_to_cond, code);
4958 /* decrement by 4 and set flags */
4959 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4960 ARM_B_COND (code, ARMCOND_GE, 0);
4961 arm_patch (code - 4, start_loop);
4963 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4968 MonoInst *var = cfg->dyn_call_var;
4970 g_assert (var->opcode == OP_REGOFFSET);
4971 g_assert (arm_is_imm12 (var->inst_offset));
4973 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4974 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4976 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4978 /* Save args buffer */
4979 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4981 /* Set stack slots using R0 as scratch reg */
4982 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4983 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4984 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4985 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4988 /* Set argument registers */
4989 for (i = 0; i < PARAM_REGS; ++i)
4990 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4993 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4994 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4997 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4998 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
4999 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5003 if (ins->sreg1 != ARMREG_R0)
5004 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5005 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5006 (gpointer)"mono_arch_throw_exception");
5007 code = emit_call_seq (cfg, code);
5011 if (ins->sreg1 != ARMREG_R0)
5012 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5013 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5014 (gpointer)"mono_arch_rethrow_exception");
5015 code = emit_call_seq (cfg, code);
5018 case OP_START_HANDLER: {
5019 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5022 /* Reserve a param area, see filter-stack.exe */
5023 if (cfg->param_area) {
5024 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5025 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5027 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5028 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5032 if (arm_is_imm12 (spvar->inst_offset)) {
5033 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5035 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5036 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5040 case OP_ENDFILTER: {
5041 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5044 /* Free the param area */
5045 if (cfg->param_area) {
5046 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5047 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5049 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5050 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5054 if (ins->sreg1 != ARMREG_R0)
5055 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5056 if (arm_is_imm12 (spvar->inst_offset)) {
5057 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5059 g_assert (ARMREG_IP != spvar->inst_basereg);
5060 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5061 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5063 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5066 case OP_ENDFINALLY: {
5067 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5070 /* Free the param area */
5071 if (cfg->param_area) {
5072 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5073 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5075 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5076 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5080 if (arm_is_imm12 (spvar->inst_offset)) {
5081 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5083 g_assert (ARMREG_IP != spvar->inst_basereg);
5084 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5085 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5087 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5090 case OP_CALL_HANDLER:
5091 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5092 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5093 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5096 ins->inst_c0 = code - cfg->native_code;
5099 /*if (ins->inst_target_bb->native_offset) {
5101 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5103 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5104 code = mono_arm_patchable_b (code, ARMCOND_AL);
5108 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5112 * In the normal case we have:
5113 * ldr pc, [pc, ins->sreg1 << 2]
5116 * ldr lr, [pc, ins->sreg1 << 2]
5118 * After follows the data.
5119 * FIXME: add aot support.
5121 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5122 #ifdef USE_JUMP_TABLES
5124 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5125 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5126 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5130 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5131 if (offset + max_len > (cfg->code_size - 16)) {
5132 cfg->code_size += max_len;
5133 cfg->code_size *= 2;
5134 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5135 code = cfg->native_code + offset;
5137 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5139 code += 4 * GPOINTER_TO_INT (ins->klass);
5144 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5145 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5149 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5150 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5154 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5155 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5159 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5160 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5164 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5165 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5168 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5169 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5172 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5173 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5176 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5181 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5184 case OP_COND_EXC_EQ:
5185 case OP_COND_EXC_NE_UN:
5186 case OP_COND_EXC_LT:
5187 case OP_COND_EXC_LT_UN:
5188 case OP_COND_EXC_GT:
5189 case OP_COND_EXC_GT_UN:
5190 case OP_COND_EXC_GE:
5191 case OP_COND_EXC_GE_UN:
5192 case OP_COND_EXC_LE:
5193 case OP_COND_EXC_LE_UN:
5194 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5196 case OP_COND_EXC_IEQ:
5197 case OP_COND_EXC_INE_UN:
5198 case OP_COND_EXC_ILT:
5199 case OP_COND_EXC_ILT_UN:
5200 case OP_COND_EXC_IGT:
5201 case OP_COND_EXC_IGT_UN:
5202 case OP_COND_EXC_IGE:
5203 case OP_COND_EXC_IGE_UN:
5204 case OP_COND_EXC_ILE:
5205 case OP_COND_EXC_ILE_UN:
5206 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5209 case OP_COND_EXC_IC:
5210 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5212 case OP_COND_EXC_OV:
5213 case OP_COND_EXC_IOV:
5214 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5216 case OP_COND_EXC_NC:
5217 case OP_COND_EXC_INC:
5218 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5220 case OP_COND_EXC_NO:
5221 case OP_COND_EXC_INO:
5222 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5234 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5237 /* floating point opcodes */
5239 if (cfg->compile_aot) {
5240 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5242 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5244 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5247 /* FIXME: we can optimize the imm load by dealing with part of
5248 * the displacement in LDFD (aligning to 512).
5250 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5251 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5255 if (cfg->compile_aot) {
5256 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5258 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5260 ARM_CVTS (code, ins->dreg, ins->dreg);
5262 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5263 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5264 ARM_CVTS (code, ins->dreg, ins->dreg);
5267 case OP_STORER8_MEMBASE_REG:
5268 /* This is generated by the local regalloc pass which runs after the lowering pass */
5269 if (!arm_is_fpimm8 (ins->inst_offset)) {
5270 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5271 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5272 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5274 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5277 case OP_LOADR8_MEMBASE:
5278 /* This is generated by the local regalloc pass which runs after the lowering pass */
5279 if (!arm_is_fpimm8 (ins->inst_offset)) {
5280 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5281 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5282 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5284 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5287 case OP_STORER4_MEMBASE_REG:
5288 g_assert (arm_is_fpimm8 (ins->inst_offset));
5289 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5290 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5291 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5292 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5294 case OP_LOADR4_MEMBASE:
5295 g_assert (arm_is_fpimm8 (ins->inst_offset));
5296 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5297 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5298 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5299 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5301 case OP_ICONV_TO_R_UN: {
5302 g_assert_not_reached ();
5305 case OP_ICONV_TO_R4:
5306 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5307 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5308 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5309 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5310 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5312 case OP_ICONV_TO_R8:
5313 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5314 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5315 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5316 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5320 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5321 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5323 if (!IS_HARD_FLOAT) {
5324 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5327 if (IS_HARD_FLOAT) {
5328 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5330 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5334 case OP_FCONV_TO_I1:
5335 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5337 case OP_FCONV_TO_U1:
5338 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5340 case OP_FCONV_TO_I2:
5341 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5343 case OP_FCONV_TO_U2:
5344 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5346 case OP_FCONV_TO_I4:
5348 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5350 case OP_FCONV_TO_U4:
5352 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5354 case OP_FCONV_TO_I8:
5355 case OP_FCONV_TO_U8:
5356 g_assert_not_reached ();
5357 /* Implemented as helper calls */
5359 case OP_LCONV_TO_R_UN:
5360 g_assert_not_reached ();
5361 /* Implemented as helper calls */
5363 case OP_LCONV_TO_OVF_I4_2: {
5364 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5366 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5369 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5370 high_bit_not_set = code;
5371 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5373 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5374 valid_negative = code;
5375 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5376 invalid_negative = code;
5377 ARM_B_COND (code, ARMCOND_AL, 0);
5379 arm_patch (high_bit_not_set, code);
5381 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5382 valid_positive = code;
5383 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5385 arm_patch (invalid_negative, code);
5386 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5388 arm_patch (valid_negative, code);
5389 arm_patch (valid_positive, code);
5391 if (ins->dreg != ins->sreg1)
5392 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5396 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5399 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5402 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5405 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5408 ARM_NEGD (code, ins->dreg, ins->sreg1);
5412 g_assert_not_reached ();
5416 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5422 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5425 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5426 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5430 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5433 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5434 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5438 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5441 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5442 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5443 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5447 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5450 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5451 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5455 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5458 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5459 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5460 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5464 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5467 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5468 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5472 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5475 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5476 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5480 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5483 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5484 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5487 /* ARM FPA flags table:
5488 * N Less than ARMCOND_MI
5489 * Z Equal ARMCOND_EQ
5490 * C Greater Than or Equal ARMCOND_CS
5491 * V Unordered ARMCOND_VS
5494 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5497 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5500 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5503 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5504 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5510 g_assert_not_reached ();
5514 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5516 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5517 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5518 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5522 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5523 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5528 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5529 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5531 #ifdef USE_JUMP_TABLES
5533 gpointer *jte = mono_jumptable_add_entries (2);
5534 jte [0] = GUINT_TO_POINTER (0xffffffff);
5535 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5536 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5537 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5540 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5541 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5543 *(guint32*)code = 0xffffffff;
5545 *(guint32*)code = 0x7fefffff;
5548 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5550 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5551 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5553 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5554 ARM_CPYD (code, ins->dreg, ins->sreg1);
5556 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5557 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5562 case OP_GC_LIVENESS_DEF:
5563 case OP_GC_LIVENESS_USE:
5564 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5565 ins->backend.pc_offset = code - cfg->native_code;
5567 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5568 ins->backend.pc_offset = code - cfg->native_code;
5569 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5573 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5574 g_assert_not_reached ();
5577 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5578 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5579 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5580 g_assert_not_reached ();
5586 last_offset = offset;
5589 cfg->code_len = code - cfg->native_code;
5592 #endif /* DISABLE_JIT */
5594 #ifdef HAVE_AEABI_READ_TP
5595 void __aeabi_read_tp (void);
5599 mono_arch_register_lowlevel_calls (void)
5601 /* The signature doesn't matter */
5602 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5603 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5605 #ifndef MONO_CROSS_COMPILE
5606 #ifdef HAVE_AEABI_READ_TP
5607 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5612 #define patch_lis_ori(ip,val) do {\
5613 guint16 *__lis_ori = (guint16*)(ip); \
5614 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5615 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5619 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5621 MonoJumpInfo *patch_info;
5622 gboolean compile_aot = !run_cctors;
5624 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5625 unsigned char *ip = patch_info->ip.i + code;
5626 const unsigned char *target;
5628 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5629 #ifdef USE_JUMP_TABLES
5630 gpointer *jt = mono_jumptable_get_entry (ip);
5632 gpointer *jt = (gpointer*)(ip + 8);
5635 /* jt is the inlined jump table, 2 instructions after ip
5636 * In the normal case we store the absolute addresses,
5637 * otherwise the displacements.
5639 for (i = 0; i < patch_info->data.table->table_size; i++)
5640 jt [i] = code + (int)patch_info->data.table->table [i];
5645 switch (patch_info->type) {
5646 case MONO_PATCH_INFO_BB:
5647 case MONO_PATCH_INFO_LABEL:
5650 /* No need to patch these */
5655 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5657 switch (patch_info->type) {
5658 case MONO_PATCH_INFO_IP:
5659 g_assert_not_reached ();
5660 patch_lis_ori (ip, ip);
5662 case MONO_PATCH_INFO_METHOD_REL:
5663 g_assert_not_reached ();
5664 *((gpointer *)(ip)) = code + patch_info->data.offset;
5666 case MONO_PATCH_INFO_METHODCONST:
5667 case MONO_PATCH_INFO_CLASS:
5668 case MONO_PATCH_INFO_IMAGE:
5669 case MONO_PATCH_INFO_FIELD:
5670 case MONO_PATCH_INFO_VTABLE:
5671 case MONO_PATCH_INFO_IID:
5672 case MONO_PATCH_INFO_SFLDA:
5673 case MONO_PATCH_INFO_LDSTR:
5674 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5675 case MONO_PATCH_INFO_LDTOKEN:
5676 g_assert_not_reached ();
5677 /* from OP_AOTCONST : lis + ori */
5678 patch_lis_ori (ip, target);
5680 case MONO_PATCH_INFO_R4:
5681 case MONO_PATCH_INFO_R8:
5682 g_assert_not_reached ();
5683 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5685 case MONO_PATCH_INFO_EXC_NAME:
5686 g_assert_not_reached ();
5687 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5689 case MONO_PATCH_INFO_NONE:
5690 case MONO_PATCH_INFO_BB_OVF:
5691 case MONO_PATCH_INFO_EXC_OVF:
5692 /* everything is dealt with at epilog output time */
5697 arm_patch_general (domain, ip, target, dyn_code_mp);
5704 * Stack frame layout:
5706 * ------------------- fp
5707 * MonoLMF structure or saved registers
5708 * -------------------
5710 * -------------------
5712 * -------------------
5713 * optional 8 bytes for tracing
5714 * -------------------
5715 * param area size is cfg->param_area
5716 * ------------------- sp
5719 mono_arch_emit_prolog (MonoCompile *cfg)
5721 MonoMethod *method = cfg->method;
5723 MonoMethodSignature *sig;
5725 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5730 int prev_sp_offset, reg_offset;
5732 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5735 sig = mono_method_signature (method);
5736 cfg->code_size = 256 + sig->param_count * 64;
5737 code = cfg->native_code = g_malloc (cfg->code_size);
5739 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5741 alloc_size = cfg->stack_offset;
5747 * The iphone uses R7 as the frame pointer, and it points at the saved
5752 * We can't use r7 as a frame pointer since it points into the middle of
5753 * the frame, so we keep using our own frame pointer.
5754 * FIXME: Optimize this.
5756 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5757 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5758 prev_sp_offset += 8; /* r7 and lr */
5759 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5760 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5763 if (!method->save_lmf) {
5765 /* No need to push LR again */
5766 if (cfg->used_int_regs)
5767 ARM_PUSH (code, cfg->used_int_regs);
5769 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5770 prev_sp_offset += 4;
5772 for (i = 0; i < 16; ++i) {
5773 if (cfg->used_int_regs & (1 << i))
5774 prev_sp_offset += 4;
5776 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5778 for (i = 0; i < 16; ++i) {
5779 if ((cfg->used_int_regs & (1 << i))) {
5780 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5781 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5786 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5787 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5789 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5790 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5793 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5794 ARM_PUSH (code, 0x5ff0);
5795 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5796 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5798 for (i = 0; i < 16; ++i) {
5799 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5800 /* The original r7 is saved at the start */
5801 if (!(iphone_abi && i == ARMREG_R7))
5802 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5806 g_assert (reg_offset == 4 * 10);
5807 pos += sizeof (MonoLMF) - (4 * 10);
5811 orig_alloc_size = alloc_size;
5812 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5813 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5814 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5815 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5818 /* the stack used in the pushed regs */
5819 if (prev_sp_offset & 4)
5821 cfg->stack_usage = alloc_size;
5823 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5824 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5826 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5827 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5829 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5831 if (cfg->frame_reg != ARMREG_SP) {
5832 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5833 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5835 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5836 prev_sp_offset += alloc_size;
5838 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5839 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5841 /* compute max_offset in order to use short forward jumps
5842 * we could skip do it on arm because the immediate displacement
5843 * for jumps is large enough, it may be useful later for constant pools
5846 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5847 MonoInst *ins = bb->code;
5848 bb->max_offset = max_offset;
5850 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5853 MONO_BB_FOR_EACH_INS (bb, ins)
5854 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5857 /* store runtime generic context */
5858 if (cfg->rgctx_var) {
5859 MonoInst *ins = cfg->rgctx_var;
5861 g_assert (ins->opcode == OP_REGOFFSET);
5863 if (arm_is_imm12 (ins->inst_offset)) {
5864 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5866 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5867 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5871 /* load arguments allocated to register from the stack */
5874 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5876 if (cinfo->vtype_retaddr) {
5877 ArgInfo *ainfo = &cinfo->ret;
5878 inst = cfg->vret_addr;
5879 g_assert (arm_is_imm12 (inst->inst_offset));
5880 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5883 if (sig->call_convention == MONO_CALL_VARARG) {
5884 ArgInfo *cookie = &cinfo->sig_cookie;
5886 /* Save the sig cookie address */
5887 g_assert (cookie->storage == RegTypeBase);
5889 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5890 g_assert (arm_is_imm12 (cfg->sig_cookie));
5891 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5892 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5895 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5896 ArgInfo *ainfo = cinfo->args + i;
5897 inst = cfg->args [pos];
5899 if (cfg->verbose_level > 2)
5900 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5901 if (inst->opcode == OP_REGVAR) {
5902 if (ainfo->storage == RegTypeGeneral)
5903 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5904 else if (ainfo->storage == RegTypeFP) {
5905 g_assert_not_reached ();
5906 } else if (ainfo->storage == RegTypeBase) {
5907 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5908 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5910 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5911 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5914 g_assert_not_reached ();
5916 if (cfg->verbose_level > 2)
5917 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5919 /* the argument should be put on the stack: FIXME handle size != word */
5920 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5921 switch (ainfo->size) {
5923 if (arm_is_imm12 (inst->inst_offset))
5924 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5926 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5927 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5931 if (arm_is_imm8 (inst->inst_offset)) {
5932 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5934 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5935 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5939 if (arm_is_imm12 (inst->inst_offset)) {
5940 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5942 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5943 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5945 if (arm_is_imm12 (inst->inst_offset + 4)) {
5946 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5948 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5949 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5953 if (arm_is_imm12 (inst->inst_offset)) {
5954 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5956 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5957 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5961 } else if (ainfo->storage == RegTypeBaseGen) {
5962 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5963 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5965 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5966 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5968 if (arm_is_imm12 (inst->inst_offset + 4)) {
5969 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5970 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5972 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5973 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5974 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5975 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5977 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5978 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5979 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5981 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5982 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5985 switch (ainfo->size) {
5987 if (arm_is_imm8 (inst->inst_offset)) {
5988 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5990 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5991 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5995 if (arm_is_imm8 (inst->inst_offset)) {
5996 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5998 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5999 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6003 if (arm_is_imm12 (inst->inst_offset)) {
6004 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6006 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6007 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6009 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6010 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6012 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6013 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6015 if (arm_is_imm12 (inst->inst_offset + 4)) {
6016 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6018 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6019 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6023 if (arm_is_imm12 (inst->inst_offset)) {
6024 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6026 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6027 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6031 } else if (ainfo->storage == RegTypeFP) {
6032 int imm8, rot_amount;
6034 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6035 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6036 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6038 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6040 if (ainfo->size == 8)
6041 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6043 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6044 } else if (ainfo->storage == RegTypeStructByVal) {
6045 int doffset = inst->inst_offset;
6049 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6050 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6051 if (arm_is_imm12 (doffset)) {
6052 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6054 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6055 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6057 soffset += sizeof (gpointer);
6058 doffset += sizeof (gpointer);
6060 if (ainfo->vtsize) {
6061 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6062 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6063 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6065 } else if (ainfo->storage == RegTypeStructByAddr) {
6066 g_assert_not_reached ();
6067 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6068 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6070 g_assert_not_reached ();
6075 if (method->save_lmf)
6076 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6079 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6081 if (cfg->arch.seq_point_info_var) {
6082 MonoInst *ins = cfg->arch.seq_point_info_var;
6084 /* Initialize the variable from a GOT slot */
6085 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6086 #ifdef USE_JUMP_TABLES
6088 gpointer *jte = mono_jumptable_add_entry ();
6089 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6090 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6092 /** XXX: is it correct? */
6094 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6096 *(gpointer*)code = NULL;
6099 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6101 g_assert (ins->opcode == OP_REGOFFSET);
6103 if (arm_is_imm12 (ins->inst_offset)) {
6104 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6106 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6107 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6111 /* Initialize ss_trigger_page_var */
6112 if (!cfg->soft_breakpoints) {
6113 MonoInst *info_var = cfg->arch.seq_point_info_var;
6114 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6115 int dreg = ARMREG_LR;
6118 g_assert (info_var->opcode == OP_REGOFFSET);
6119 g_assert (arm_is_imm12 (info_var->inst_offset));
6121 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6122 /* Load the trigger page addr */
6123 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6124 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6128 if (cfg->arch.seq_point_read_var) {
6129 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6130 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6131 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6132 #ifdef USE_JUMP_TABLES
6135 g_assert (read_ins->opcode == OP_REGOFFSET);
6136 g_assert (arm_is_imm12 (read_ins->inst_offset));
6137 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6138 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6139 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6140 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6142 #ifdef USE_JUMP_TABLES
6143 jte = mono_jumptable_add_entries (3);
6144 jte [0] = (gpointer)&ss_trigger_var;
6145 jte [1] = single_step_func_wrapper;
6146 jte [2] = breakpoint_func_wrapper;
6147 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6149 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6151 *(volatile int **)code = &ss_trigger_var;
6153 *(gpointer*)code = single_step_func_wrapper;
6155 *(gpointer*)code = breakpoint_func_wrapper;
6159 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6160 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6161 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6162 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6163 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6164 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6167 cfg->code_len = code - cfg->native_code;
6168 g_assert (cfg->code_len < cfg->code_size);
6175 mono_arch_emit_epilog (MonoCompile *cfg)
6177 MonoMethod *method = cfg->method;
6178 int pos, i, rot_amount;
6179 int max_epilog_size = 16 + 20*4;
6183 if (cfg->method->save_lmf)
6184 max_epilog_size += 128;
6186 if (mono_jit_trace_calls != NULL)
6187 max_epilog_size += 50;
6189 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6190 max_epilog_size += 50;
6192 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6193 cfg->code_size *= 2;
6194 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6195 cfg->stat_code_reallocs++;
6199 * Keep in sync with OP_JMP
6201 code = cfg->native_code + cfg->code_len;
6203 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6204 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6208 /* Load returned vtypes into registers if needed */
6209 cinfo = cfg->arch.cinfo;
6210 if (cinfo->ret.storage == RegTypeStructByVal) {
6211 MonoInst *ins = cfg->ret;
6213 if (arm_is_imm12 (ins->inst_offset)) {
6214 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6216 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6217 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6221 if (method->save_lmf) {
6222 int lmf_offset, reg, sp_adj, regmask;
6223 /* all but r0-r3, sp and pc */
6224 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6227 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6229 /* This points to r4 inside MonoLMF->iregs */
6230 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6232 regmask = 0x9ff0; /* restore lr to pc */
6233 /* Skip caller saved registers not used by the method */
6234 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6235 regmask &= ~(1 << reg);
6240 /* Restored later */
6241 regmask &= ~(1 << ARMREG_PC);
6242 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6243 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6245 ARM_POP (code, regmask);
6247 /* Restore saved r7, restore LR to PC */
6248 /* Skip lr from the lmf */
6249 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6250 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6253 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6254 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6256 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6257 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6261 /* Restore saved gregs */
6262 if (cfg->used_int_regs)
6263 ARM_POP (code, cfg->used_int_regs);
6264 /* Restore saved r7, restore LR to PC */
6265 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6267 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6271 cfg->code_len = code - cfg->native_code;
6273 g_assert (cfg->code_len < cfg->code_size);
6278 mono_arch_emit_exceptions (MonoCompile *cfg)
6280 MonoJumpInfo *patch_info;
6283 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6284 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6285 int max_epilog_size = 50;
6287 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6288 exc_throw_pos [i] = NULL;
6289 exc_throw_found [i] = 0;
6292 /* count the number of exception infos */
6295 * make sure we have enough space for exceptions
6297 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6298 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6299 i = mini_exception_id_by_name (patch_info->data.target);
6300 if (!exc_throw_found [i]) {
6301 max_epilog_size += 32;
6302 exc_throw_found [i] = TRUE;
6307 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6308 cfg->code_size *= 2;
6309 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6310 cfg->stat_code_reallocs++;
6313 code = cfg->native_code + cfg->code_len;
6315 /* add code to raise exceptions */
6316 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6317 switch (patch_info->type) {
6318 case MONO_PATCH_INFO_EXC: {
6319 MonoClass *exc_class;
6320 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6322 i = mini_exception_id_by_name (patch_info->data.target);
6323 if (exc_throw_pos [i]) {
6324 arm_patch (ip, exc_throw_pos [i]);
6325 patch_info->type = MONO_PATCH_INFO_NONE;
6328 exc_throw_pos [i] = code;
6330 arm_patch (ip, code);
6332 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6333 g_assert (exc_class);
6335 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6336 #ifdef USE_JUMP_TABLES
6338 gpointer *jte = mono_jumptable_add_entries (2);
6339 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6340 patch_info->data.name = "mono_arch_throw_corlib_exception";
6341 patch_info->ip.i = code - cfg->native_code;
6342 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6343 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6344 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6345 ARM_BLX_REG (code, ARMREG_IP);
6346 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6349 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6350 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6351 patch_info->data.name = "mono_arch_throw_corlib_exception";
6352 patch_info->ip.i = code - cfg->native_code;
6354 *(guint32*)(gpointer)code = exc_class->type_token;
6365 cfg->code_len = code - cfg->native_code;
6367 g_assert (cfg->code_len < cfg->code_size);
6371 #endif /* #ifndef DISABLE_JIT */
6374 mono_arch_finish_init (void)
6379 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6384 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6391 mono_arch_print_tree (MonoInst *tree, int arity)
6401 mono_arch_get_patch_offset (guint8 *code)
6408 mono_arch_flush_register_windows (void)
6415 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6417 int method_reg = mono_alloc_ireg (cfg);
6418 #ifdef USE_JUMP_TABLES
6419 int use_jumptables = TRUE;
6421 int use_jumptables = FALSE;
6424 if (cfg->compile_aot) {
6427 call->dynamic_imt_arg = TRUE;
6430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6432 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6433 ins->dreg = method_reg;
6434 ins->inst_p0 = call->method;
6435 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6436 MONO_ADD_INS (cfg->cbb, ins);
6438 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6439 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6440 /* Always pass in a register for simplicity */
6441 call->dynamic_imt_arg = TRUE;
6443 cfg->uses_rgctx_reg = TRUE;
6446 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6450 MONO_INST_NEW (cfg, ins, OP_PCONST);
6451 ins->inst_p0 = call->method;
6452 ins->dreg = method_reg;
6453 MONO_ADD_INS (cfg->cbb, ins);
6456 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6460 #endif /* DISABLE_JIT */
6463 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6465 #ifdef USE_JUMP_TABLES
6466 return (MonoMethod*)regs [ARMREG_V5];
6469 guint32 *code_ptr = (guint32*)code;
6471 method = GUINT_TO_POINTER (code_ptr [1]);
6475 return (MonoMethod*)regs [ARMREG_V5];
6477 /* The IMT value is stored in the code stream right after the LDC instruction. */
6478 /* This is no longer true for the gsharedvt_in trampoline */
6480 if (!IS_LDR_PC (code_ptr [0])) {
6481 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6482 g_assert (IS_LDR_PC (code_ptr [0]));
6486 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6487 return (MonoMethod*)regs [ARMREG_V5];
6489 return (MonoMethod*) method;
6494 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6496 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6499 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6500 #define BASE_SIZE (6 * 4)
6501 #define BSEARCH_ENTRY_SIZE (4 * 4)
6502 #define CMP_SIZE (3 * 4)
6503 #define BRANCH_SIZE (1 * 4)
6504 #define CALL_SIZE (2 * 4)
6505 #define WMC_SIZE (8 * 4)
6506 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6508 #ifdef USE_JUMP_TABLES
6510 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6512 g_assert (base [index] == NULL);
6513 base [index] = value;
6516 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6518 if (arm_is_imm12 (jti * 4)) {
6519 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6521 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6522 if ((jti * 4) >> 16)
6523 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6524 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6530 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6532 guint32 delta = DISTANCE (target, code);
6534 g_assert (delta >= 0 && delta <= 0xFFF);
6535 *target = *target | delta;
6541 #ifdef ENABLE_WRONG_METHOD_CHECK
6543 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6545 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6551 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6552 gpointer fail_tramp)
6555 arminstr_t *code, *start;
6556 #ifdef USE_JUMP_TABLES
6559 gboolean large_offsets = FALSE;
6560 guint32 **constant_pool_starts;
6561 arminstr_t *vtable_target = NULL;
6562 int extra_space = 0;
6564 #ifdef ENABLE_WRONG_METHOD_CHECK
6569 #ifdef USE_JUMP_TABLES
6570 for (i = 0; i < count; ++i) {
6571 MonoIMTCheckItem *item = imt_entries [i];
6572 item->chunk_size += 4 * 16;
6573 if (!item->is_equals)
6574 imt_entries [item->check_target_idx]->compare_done = TRUE;
6575 size += item->chunk_size;
6578 constant_pool_starts = g_new0 (guint32*, count);
6580 for (i = 0; i < count; ++i) {
6581 MonoIMTCheckItem *item = imt_entries [i];
6582 if (item->is_equals) {
6583 gboolean fail_case = !item->check_target_idx && fail_tramp;
6585 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6586 item->chunk_size += 32;
6587 large_offsets = TRUE;
6590 if (item->check_target_idx || fail_case) {
6591 if (!item->compare_done || fail_case)
6592 item->chunk_size += CMP_SIZE;
6593 item->chunk_size += BRANCH_SIZE;
6595 #ifdef ENABLE_WRONG_METHOD_CHECK
6596 item->chunk_size += WMC_SIZE;
6600 item->chunk_size += 16;
6601 large_offsets = TRUE;
6603 item->chunk_size += CALL_SIZE;
6605 item->chunk_size += BSEARCH_ENTRY_SIZE;
6606 imt_entries [item->check_target_idx]->compare_done = TRUE;
6608 size += item->chunk_size;
6612 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6616 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6618 code = mono_domain_code_reserve (domain, size);
6622 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6623 for (i = 0; i < count; ++i) {
6624 MonoIMTCheckItem *item = imt_entries [i];
6625 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6629 #ifdef USE_JUMP_TABLES
6630 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6631 /* If jumptables we always pass the IMT method in R5 */
6632 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6633 #define VTABLE_JTI 0
6634 #define IMT_METHOD_OFFSET 0
6635 #define TARGET_CODE_OFFSET 1
6636 #define JUMP_CODE_OFFSET 2
6637 #define RECORDS_PER_ENTRY 3
6638 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6639 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6640 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6642 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6643 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6644 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6645 set_jumptable_element (jte, VTABLE_JTI, vtable);
6648 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6650 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6651 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6652 vtable_target = code;
6653 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6655 if (mono_use_llvm) {
6656 /* LLVM always passes the IMT method in R5 */
6657 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6659 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6660 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6661 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6665 for (i = 0; i < count; ++i) {
6666 MonoIMTCheckItem *item = imt_entries [i];
6667 #ifdef USE_JUMP_TABLES
6668 guint32 imt_method_jti = 0, target_code_jti = 0;
6670 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6672 gint32 vtable_offset;
6674 item->code_target = (guint8*)code;
6676 if (item->is_equals) {
6677 gboolean fail_case = !item->check_target_idx && fail_tramp;
6679 if (item->check_target_idx || fail_case) {
6680 if (!item->compare_done || fail_case) {
6681 #ifdef USE_JUMP_TABLES
6682 imt_method_jti = IMT_METHOD_JTI (i);
6683 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6686 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6688 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6690 #ifdef USE_JUMP_TABLES
6691 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6692 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6693 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6695 item->jmp_code = (guint8*)code;
6696 ARM_B_COND (code, ARMCOND_NE, 0);
6699 /*Enable the commented code to assert on wrong method*/
6700 #ifdef ENABLE_WRONG_METHOD_CHECK
6701 #ifdef USE_JUMP_TABLES
6702 imt_method_jti = IMT_METHOD_JTI (i);
6703 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6706 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6708 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6710 ARM_B_COND (code, ARMCOND_EQ, 0);
6712 /* Define this if your system is so bad that gdb is failing. */
6713 #ifdef BROKEN_DEV_ENV
6714 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6716 arm_patch (code - 1, mini_dump_bad_imt);
6720 arm_patch (cond, code);
6724 if (item->has_target_code) {
6725 /* Load target address */
6726 #ifdef USE_JUMP_TABLES
6727 target_code_jti = TARGET_CODE_JTI (i);
6728 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6729 /* Restore registers */
6730 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6732 ARM_BX (code, ARMREG_R1);
6733 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6735 target_code_ins = code;
6736 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6737 /* Save it to the fourth slot */
6738 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6739 /* Restore registers and branch */
6740 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6742 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6745 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6746 if (!arm_is_imm12 (vtable_offset)) {
6748 * We need to branch to a computed address but we don't have
6749 * a free register to store it, since IP must contain the
6750 * vtable address. So we push the two values to the stack, and
6751 * load them both using LDM.
6753 /* Compute target address */
6754 #ifdef USE_JUMP_TABLES
6755 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6756 if (vtable_offset >> 16)
6757 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6758 /* IP had vtable base. */
6759 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6760 /* Restore registers and branch */
6761 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6762 ARM_BX (code, ARMREG_IP);
6764 vtable_offset_ins = code;
6765 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6766 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6767 /* Save it to the fourth slot */
6768 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6769 /* Restore registers and branch */
6770 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6772 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6775 #ifdef USE_JUMP_TABLES
6776 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6777 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6778 ARM_BX (code, ARMREG_IP);
6780 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6782 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6783 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6789 #ifdef USE_JUMP_TABLES
6790 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6791 target_code_jti = TARGET_CODE_JTI (i);
6792 /* Load target address */
6793 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6794 /* Restore registers */
6795 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6797 ARM_BX (code, ARMREG_R1);
6798 set_jumptable_element (jte, target_code_jti, fail_tramp);
6800 arm_patch (item->jmp_code, (guchar*)code);
6802 target_code_ins = code;
6803 /* Load target address */
6804 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6805 /* Save it to the fourth slot */
6806 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6807 /* Restore registers and branch */
6808 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6810 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6812 item->jmp_code = NULL;
6815 #ifdef USE_JUMP_TABLES
6817 set_jumptable_element (jte, imt_method_jti, item->key);
6820 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6822 /*must emit after unconditional branch*/
6823 if (vtable_target) {
6824 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6825 item->chunk_size += 4;
6826 vtable_target = NULL;
6829 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6830 constant_pool_starts [i] = code;
6832 code += extra_space;
6837 #ifdef USE_JUMP_TABLES
6838 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6839 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6840 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6841 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6842 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6844 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6845 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6847 item->jmp_code = (guint8*)code;
6848 ARM_B_COND (code, ARMCOND_HS, 0);
6854 for (i = 0; i < count; ++i) {
6855 MonoIMTCheckItem *item = imt_entries [i];
6856 if (item->jmp_code) {
6857 if (item->check_target_idx)
6858 #ifdef USE_JUMP_TABLES
6859 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6861 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6864 if (i > 0 && item->is_equals) {
6866 #ifdef USE_JUMP_TABLES
6867 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6868 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6870 arminstr_t *space_start = constant_pool_starts [i];
6871 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6872 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6880 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6881 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6886 #ifndef USE_JUMP_TABLES
6887 g_free (constant_pool_starts);
6890 mono_arch_flush_icache ((guint8*)start, size);
6891 mono_stats.imt_thunks_size += code - start;
6893 g_assert (DISTANCE (start, code) <= size);
6898 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6900 return ctx->regs [reg];
6904 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6906 ctx->regs [reg] = val;
6910 * mono_arch_get_trampolines:
6912 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6916 mono_arch_get_trampolines (gboolean aot)
6918 return mono_arm_get_exception_trampolines (aot);
6921 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6923 * mono_arch_set_breakpoint:
6925 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6926 * The location should contain code emitted by OP_SEQ_POINT.
6929 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6932 guint32 native_offset = ip - (guint8*)ji->code_start;
6933 MonoDebugOptions *opt = mini_get_debug_options ();
6935 if (opt->soft_breakpoints) {
6936 g_assert (!ji->from_aot);
6938 ARM_BLX_REG (code, ARMREG_LR);
6939 mono_arch_flush_icache (code - 4, 4);
6940 } else if (ji->from_aot) {
6941 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6943 g_assert (native_offset % 4 == 0);
6944 g_assert (info->bp_addrs [native_offset / 4] == 0);
6945 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6947 int dreg = ARMREG_LR;
6949 /* Read from another trigger page */
6950 #ifdef USE_JUMP_TABLES
6951 gpointer *jte = mono_jumptable_add_entry ();
6952 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6953 jte [0] = bp_trigger_page;
6955 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6957 *(int*)code = (int)bp_trigger_page;
6960 ARM_LDR_IMM (code, dreg, dreg, 0);
6962 mono_arch_flush_icache (code - 16, 16);
6965 /* This is currently implemented by emitting an SWI instruction, which
6966 * qemu/linux seems to convert to a SIGILL.
6968 *(int*)code = (0xef << 24) | 8;
6970 mono_arch_flush_icache (code - 4, 4);
6976 * mono_arch_clear_breakpoint:
6978 * Clear the breakpoint at IP.
6981 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6983 MonoDebugOptions *opt = mini_get_debug_options ();
6987 if (opt->soft_breakpoints) {
6988 g_assert (!ji->from_aot);
6991 mono_arch_flush_icache (code - 4, 4);
6992 } else if (ji->from_aot) {
6993 guint32 native_offset = ip - (guint8*)ji->code_start;
6994 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6996 g_assert (native_offset % 4 == 0);
6997 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6998 info->bp_addrs [native_offset / 4] = 0;
7000 for (i = 0; i < 4; ++i)
7003 mono_arch_flush_icache (ip, code - ip);
7008 * mono_arch_start_single_stepping:
7010 * Start single stepping.
7013 mono_arch_start_single_stepping (void)
7015 if (ss_trigger_page)
7016 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7022 * mono_arch_stop_single_stepping:
7024 * Stop single stepping.
7027 mono_arch_stop_single_stepping (void)
7029 if (ss_trigger_page)
7030 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7036 #define DBG_SIGNAL SIGBUS
7038 #define DBG_SIGNAL SIGSEGV
7042 * mono_arch_is_single_step_event:
7044 * Return whenever the machine state in SIGCTX corresponds to a single
7048 mono_arch_is_single_step_event (void *info, void *sigctx)
7050 siginfo_t *sinfo = info;
7052 if (!ss_trigger_page)
7055 /* Sometimes the address is off by 4 */
7056 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7063 * mono_arch_is_breakpoint_event:
7065 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7068 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7070 siginfo_t *sinfo = info;
7072 if (!ss_trigger_page)
7075 if (sinfo->si_signo == DBG_SIGNAL) {
7076 /* Sometimes the address is off by 4 */
7077 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7087 * mono_arch_skip_breakpoint:
7089 * See mini-amd64.c for docs.
7092 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7094 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7098 * mono_arch_skip_single_step:
7100 * See mini-amd64.c for docs.
7103 mono_arch_skip_single_step (MonoContext *ctx)
7105 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7108 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7111 * mono_arch_get_seq_point_info:
7113 * See mini-amd64.c for docs.
7116 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7121 // FIXME: Add a free function
7123 mono_domain_lock (domain);
7124 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7126 mono_domain_unlock (domain);
7129 ji = mono_jit_info_table_find (domain, (char*)code);
7132 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7134 info->ss_trigger_page = ss_trigger_page;
7135 info->bp_trigger_page = bp_trigger_page;
7137 mono_domain_lock (domain);
7138 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7140 mono_domain_unlock (domain);
7147 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7149 ext->lmf.previous_lmf = prev_lmf;
7150 /* Mark that this is a MonoLMFExt */
7151 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7152 ext->lmf.sp = (gssize)ext;
7156 * mono_arch_set_target:
7158 * Set the target architecture the JIT backend should generate code for, in the form
7159 * of a GNU target triplet. Only used in AOT mode.
7162 mono_arch_set_target (char *mtriple)
7164 /* The GNU target triple format is not very well documented */
7165 if (strstr (mtriple, "armv7")) {
7166 v5_supported = TRUE;
7167 v6_supported = TRUE;
7168 v7_supported = TRUE;
7170 if (strstr (mtriple, "armv6")) {
7171 v5_supported = TRUE;
7172 v6_supported = TRUE;
7174 if (strstr (mtriple, "armv7s")) {
7175 v7s_supported = TRUE;
7177 if (strstr (mtriple, "thumbv7s")) {
7178 v5_supported = TRUE;
7179 v6_supported = TRUE;
7180 v7_supported = TRUE;
7181 v7s_supported = TRUE;
7182 thumb_supported = TRUE;
7183 thumb2_supported = TRUE;
7185 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7186 v5_supported = TRUE;
7187 v6_supported = TRUE;
7188 thumb_supported = TRUE;
7191 if (strstr (mtriple, "gnueabi"))
7192 eabi_supported = TRUE;
7196 mono_arch_opcode_supported (int opcode)
7199 case OP_ATOMIC_ADD_I4:
7200 case OP_ATOMIC_EXCHANGE_I4:
7201 case OP_ATOMIC_CAS_I4:
7202 return v7_supported;
7208 #if defined(ENABLE_GSHAREDVT)
7210 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7212 #endif /* !MONOTOUCH */