2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
250 g_assert (dreg != sreg);
251 code = mono_arm_emit_load_imm (code, dreg, imm);
252 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
257 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
259 /* we can use r0-r3, since this is called only for incoming args on the stack */
260 if (size > sizeof (gpointer) * 4) {
262 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
263 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
264 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
265 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
266 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
267 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
268 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
269 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
270 ARM_B_COND (code, ARMCOND_NE, 0);
271 arm_patch (code - 4, start_loop);
274 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
275 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
277 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
278 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
284 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
285 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
286 doffset = soffset = 0;
288 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
289 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
295 g_assert (size == 0);
300 emit_call_reg (guint8 *code, int reg)
303 ARM_BLX_REG (code, reg);
305 #ifdef USE_JUMP_TABLES
306 g_assert_not_reached ();
308 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
312 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
318 emit_call_seq (MonoCompile *cfg, guint8 *code)
320 #ifdef USE_JUMP_TABLES
321 code = mono_arm_patchable_bl (code, ARMCOND_AL);
323 if (cfg->method->dynamic) {
324 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
326 *(gpointer*)code = NULL;
328 code = emit_call_reg (code, ARMREG_IP);
337 mono_arm_patchable_b (guint8 *code, int cond)
339 #ifdef USE_JUMP_TABLES
342 jte = mono_jumptable_add_entry ();
343 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
344 ARM_BX_COND (code, cond, ARMREG_IP);
346 ARM_B_COND (code, cond, 0);
352 mono_arm_patchable_bl (guint8 *code, int cond)
354 #ifdef USE_JUMP_TABLES
357 jte = mono_jumptable_add_entry ();
358 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
359 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
361 ARM_BL_COND (code, cond, 0);
366 #ifdef USE_JUMP_TABLES
368 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
370 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
371 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
376 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
378 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
379 ARM_LDR_IMM (code, reg, reg, 0);
385 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
387 switch (ins->opcode) {
390 case OP_FCALL_MEMBASE:
392 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
394 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
396 ARM_FMSR (code, ins->dreg, ARMREG_R0);
397 ARM_CVTS (code, ins->dreg, ins->dreg);
401 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
403 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
416 * Emit code to push an LMF structure on the LMF stack.
417 * On arm, this is intermixed with the initialization of other fields of the structure.
420 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
422 gboolean get_lmf_fast = FALSE;
425 #ifdef HAVE_AEABI_READ_TP
426 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
428 if (lmf_addr_tls_offset != -1) {
431 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
432 (gpointer)"__aeabi_read_tp");
433 code = emit_call_seq (cfg, code);
435 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
441 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
444 /* Inline mono_get_lmf_addr () */
445 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
447 /* Load mono_jit_tls_id */
449 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
450 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
452 *(gpointer*)code = NULL;
454 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
455 /* call pthread_getspecific () */
456 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
457 (gpointer)"pthread_getspecific");
458 code = emit_call_seq (cfg, code);
459 /* lmf_addr = &jit_tls->lmf */
460 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
461 g_assert (arm_is_imm8 (lmf_offset));
462 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
469 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
470 (gpointer)"mono_get_lmf_addr");
471 code = emit_call_seq (cfg, code);
473 /* we build the MonoLMF structure on the stack - see mini-arm.h */
474 /* lmf_offset is the offset from the previous stack pointer,
475 * alloc_size is the total stack space allocated, so the offset
476 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
477 * The pointer to the struct is put in r1 (new_lmf).
478 * ip is used as scratch
479 * The callee-saved registers are already in the MonoLMF structure
481 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
482 /* r0 is the result from mono_get_lmf_addr () */
483 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
484 /* new_lmf->previous_lmf = *lmf_addr */
485 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
486 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
487 /* *(lmf_addr) = r1 */
488 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
489 /* Skip method (only needed for trampoline LMF frames) */
490 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
491 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
492 /* save the current IP */
493 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
494 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
496 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
497 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
508 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
512 for (list = inst->float_args; list; list = list->next) {
513 FloatArgData *fad = list->data;
514 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
515 gboolean imm = arm_is_fpimm8 (var->inst_offset);
517 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
523 if (*offset + *max_len > cfg->code_size) {
524 cfg->code_size += *max_len;
525 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
527 code = cfg->native_code + *offset;
531 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
532 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
534 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
536 *offset = code - cfg->native_code;
543 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
547 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
549 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
552 if (!arm_is_fpimm8 (inst->inst_offset)) {
553 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
554 ARM_FSTD (code, reg, ARMREG_LR, 0);
556 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
563 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
567 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
569 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
572 if (!arm_is_fpimm8 (inst->inst_offset)) {
573 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
574 ARM_FLDD (code, reg, ARMREG_LR, 0);
576 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
585 * Emit code to pop an LMF structure from the LMF stack.
588 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
592 if (lmf_offset < 32) {
593 basereg = cfg->frame_reg;
598 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
601 /* ip = previous_lmf */
602 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
604 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
605 /* *(lmf_addr) = previous_lmf */
606 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
611 #endif /* #ifndef DISABLE_JIT */
614 * mono_arch_get_argument_info:
615 * @csig: a method signature
616 * @param_count: the number of parameters to consider
617 * @arg_info: an array to store the result infos
619 * Gathers information on parameters such as size, alignment and
620 * padding. arg_info should be large enought to hold param_count + 1 entries.
622 * Returns the size of the activation frame.
625 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
627 int k, frame_size = 0;
628 guint32 size, align, pad;
632 t = mini_type_get_underlying_type (gsctx, csig->ret);
633 if (MONO_TYPE_ISSTRUCT (t)) {
634 frame_size += sizeof (gpointer);
638 arg_info [0].offset = offset;
641 frame_size += sizeof (gpointer);
645 arg_info [0].size = frame_size;
647 for (k = 0; k < param_count; k++) {
648 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
650 /* ignore alignment for now */
653 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
654 arg_info [k].pad = pad;
656 arg_info [k + 1].pad = 0;
657 arg_info [k + 1].size = size;
659 arg_info [k + 1].offset = offset;
663 align = MONO_ARCH_FRAME_ALIGNMENT;
664 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
665 arg_info [k].pad = pad;
670 #define MAX_ARCH_DELEGATE_PARAMS 3
673 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
675 guint8 *code, *start;
678 start = code = mono_global_codeman_reserve (12);
680 /* Replace the this argument with the target */
681 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
682 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
683 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
685 g_assert ((code - start) <= 12);
687 mono_arch_flush_icache (start, 12);
691 size = 8 + param_count * 4;
692 start = code = mono_global_codeman_reserve (size);
694 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
695 /* slide down the arguments */
696 for (i = 0; i < param_count; ++i) {
697 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
699 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
701 g_assert ((code - start) <= size);
703 mono_arch_flush_icache (start, size);
707 *code_size = code - start;
713 * mono_arch_get_delegate_invoke_impls:
715 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
719 mono_arch_get_delegate_invoke_impls (void)
727 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
728 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
730 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
731 code = get_delegate_invoke_impl (FALSE, i, &code_len);
732 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
733 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
741 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
743 guint8 *code, *start;
746 /* FIXME: Support more cases */
747 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
748 if (MONO_TYPE_ISSTRUCT (sig_ret))
752 static guint8* cached = NULL;
753 mono_mini_arch_lock ();
755 mono_mini_arch_unlock ();
760 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
762 start = get_delegate_invoke_impl (TRUE, 0, NULL);
764 mono_mini_arch_unlock ();
767 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
770 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
772 for (i = 0; i < sig->param_count; ++i)
773 if (!mono_is_regsize_var (sig->params [i]))
776 mono_mini_arch_lock ();
777 code = cache [sig->param_count];
779 mono_mini_arch_unlock ();
784 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
785 start = mono_aot_get_trampoline (name);
788 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
790 cache [sig->param_count] = start;
791 mono_mini_arch_unlock ();
799 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
805 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
807 return (gpointer)regs [ARMREG_R0];
811 * Initialize the cpu to execute managed code.
814 mono_arch_cpu_init (void)
816 i8_align = MONO_ABI_ALIGNOF (gint64);
817 #ifdef MONO_CROSS_COMPILE
818 /* Need to set the alignment of i8 since it can different on the target */
819 #ifdef TARGET_ANDROID
821 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
827 create_function_wrapper (gpointer function)
829 guint8 *start, *code;
831 start = code = mono_global_codeman_reserve (96);
834 * Construct the MonoContext structure on the stack.
837 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
839 /* save ip, lr and pc into their correspodings ctx.regs slots. */
840 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
841 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
842 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
844 /* save r0..r10 and fp */
845 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
846 ARM_STM (code, ARMREG_IP, 0x0fff);
848 /* now we can update fp. */
849 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
851 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
852 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
853 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
854 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
856 /* make ctx.eip hold the address of the call. */
857 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
858 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
860 /* r0 now points to the MonoContext */
861 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
864 #ifdef USE_JUMP_TABLES
866 gpointer *jte = mono_jumptable_add_entry ();
867 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
871 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
873 *(gpointer*)code = function;
876 ARM_BLX_REG (code, ARMREG_IP);
878 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
879 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
880 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
881 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
883 /* make ip point to the regs array, then restore everything, including pc. */
884 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
885 ARM_LDM (code, ARMREG_IP, 0xffff);
887 mono_arch_flush_icache (start, code - start);
893 * Initialize architecture specific code.
896 mono_arch_init (void)
898 const char *cpu_arch;
900 mono_mutex_init_recursive (&mini_arch_mutex);
901 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
902 if (mini_get_debug_options ()->soft_breakpoints) {
903 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
904 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
909 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
910 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
911 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
914 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
915 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
916 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
917 #if defined(ENABLE_GSHAREDVT)
918 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
921 #if defined(__ARM_EABI__)
922 eabi_supported = TRUE;
925 #if defined(ARM_FPU_VFP_HARD)
926 arm_fpu = MONO_ARM_FPU_VFP_HARD;
928 arm_fpu = MONO_ARM_FPU_VFP;
930 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
931 /* If we're compiling with a soft float fallback and it
932 turns out that no VFP unit is available, we need to
933 switch to soft float. We don't do this for iOS, since
934 iOS devices always have a VFP unit. */
935 if (!mono_hwcap_arm_has_vfp)
936 arm_fpu = MONO_ARM_FPU_NONE;
940 v5_supported = mono_hwcap_arm_is_v5;
941 v6_supported = mono_hwcap_arm_is_v6;
942 v7_supported = mono_hwcap_arm_is_v7;
943 v7s_supported = mono_hwcap_arm_is_v7s;
945 #if defined(__APPLE__)
946 /* iOS is special-cased here because we don't yet
947 have a way to properly detect CPU features on it. */
948 thumb_supported = TRUE;
951 thumb_supported = mono_hwcap_arm_has_thumb;
952 thumb2_supported = mono_hwcap_arm_has_thumb2;
955 /* Format: armv(5|6|7[s])[-thumb[2]] */
956 cpu_arch = g_getenv ("MONO_CPU_ARCH");
958 /* Do this here so it overrides any detection. */
960 if (strncmp (cpu_arch, "armv", 4) == 0) {
961 v5_supported = cpu_arch [4] >= '5';
962 v6_supported = cpu_arch [4] >= '6';
963 v7_supported = cpu_arch [4] >= '7';
964 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
967 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
968 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
973 * Cleanup architecture specific code.
976 mono_arch_cleanup (void)
981 * This function returns the optimizations supported on this cpu.
984 mono_arch_cpu_optimizations (guint32 *exclude_mask)
986 /* no arm-specific optimizations yet */
992 * This function test for all SIMD functions supported.
994 * Returns a bitmask corresponding to all supported versions.
998 mono_arch_cpu_enumerate_simd_versions (void)
1000 /* SIMD is currently unimplemented */
1008 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1010 if (v7s_supported) {
1024 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1026 mono_arch_is_soft_float (void)
1028 return arm_fpu == MONO_ARM_FPU_NONE;
1033 mono_arm_is_hard_float (void)
1035 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1039 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1042 t = mini_type_get_underlying_type (gsctx, t);
1049 case MONO_TYPE_FNPTR:
1051 case MONO_TYPE_OBJECT:
1052 case MONO_TYPE_STRING:
1053 case MONO_TYPE_CLASS:
1054 case MONO_TYPE_SZARRAY:
1055 case MONO_TYPE_ARRAY:
1057 case MONO_TYPE_GENERICINST:
1058 if (!mono_type_generic_inst_is_valuetype (t))
1061 case MONO_TYPE_VALUETYPE:
1068 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1073 for (i = 0; i < cfg->num_varinfo; i++) {
1074 MonoInst *ins = cfg->varinfo [i];
1075 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1078 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1081 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1084 /* we can only allocate 32 bit values */
1085 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1086 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1087 g_assert (i == vmv->idx);
1088 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1095 #define USE_EXTRA_TEMPS 0
1098 mono_arch_get_global_int_regs (MonoCompile *cfg)
1102 mono_arch_compute_omit_fp (cfg);
1105 * FIXME: Interface calls might go through a static rgctx trampoline which
1106 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1109 if (cfg->flags & MONO_CFG_HAS_CALLS)
1110 cfg->uses_rgctx_reg = TRUE;
1112 if (cfg->arch.omit_fp)
1113 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1118 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1119 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1121 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1122 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1123 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1124 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1125 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1126 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1132 * mono_arch_regalloc_cost:
1134 * Return the cost, in number of memory references, of the action of
1135 * allocating the variable VMV into a register during global register
1139 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1145 #endif /* #ifndef DISABLE_JIT */
1147 #ifndef __GNUC_PREREQ
1148 #define __GNUC_PREREQ(maj, min) (0)
1152 mono_arch_flush_icache (guint8 *code, gint size)
1154 #if defined(__native_client__)
1155 // For Native Client we don't have to flush i-cache here,
1156 // as it's being done by dyncode interface.
1159 #ifdef MONO_CROSS_COMPILE
1161 sys_icache_invalidate (code, size);
1162 #elif __GNUC_PREREQ(4, 1)
1163 __clear_cache (code, code + size);
1164 #elif defined(PLATFORM_ANDROID)
1165 const int syscall = 0xf0002;
1173 : "r" (code), "r" (code + size), "r" (syscall)
1174 : "r0", "r1", "r7", "r2"
1177 __asm __volatile ("mov r0, %0\n"
1180 "swi 0x9f0002 @ sys_cacheflush"
1182 : "r" (code), "r" (code + size), "r" (0)
1183 : "r0", "r1", "r3" );
1185 #endif /* !__native_client__ */
1196 RegTypeStructByAddr,
1197 /* gsharedvt argument passed by addr in greg */
1198 RegTypeGSharedVtInReg,
1199 /* gsharedvt argument passed by addr on stack */
1200 RegTypeGSharedVtOnStack,
1205 guint16 vtsize; /* in param area */
1209 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1214 guint32 stack_usage;
1215 gboolean vtype_retaddr;
1216 /* The index of the vret arg in the argument list */
1226 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1229 if (*gr > ARMREG_R3) {
1231 ainfo->offset = *stack_size;
1232 ainfo->reg = ARMREG_SP; /* in the caller */
1233 ainfo->storage = RegTypeBase;
1236 ainfo->storage = RegTypeGeneral;
1243 split = i8_align == 4;
1248 if (*gr == ARMREG_R3 && split) {
1249 /* first word in r3 and the second on the stack */
1250 ainfo->offset = *stack_size;
1251 ainfo->reg = ARMREG_SP; /* in the caller */
1252 ainfo->storage = RegTypeBaseGen;
1254 } else if (*gr >= ARMREG_R3) {
1255 if (eabi_supported) {
1256 /* darwin aligns longs to 4 byte only */
1257 if (i8_align == 8) {
1262 ainfo->offset = *stack_size;
1263 ainfo->reg = ARMREG_SP; /* in the caller */
1264 ainfo->storage = RegTypeBase;
1267 if (eabi_supported) {
1268 if (i8_align == 8 && ((*gr) & 1))
1271 ainfo->storage = RegTypeIRegPair;
1280 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1283 * If we're calling a function like this:
1285 * void foo(float a, double b, float c)
1287 * We pass a in s0 and b in d1. That leaves us
1288 * with s1 being unused. The armhf ABI recognizes
1289 * this and requires register assignment to then
1290 * use that for the next single-precision arg,
1291 * i.e. c in this example. So float_spare either
1292 * tells us which reg to use for the next single-
1293 * precision arg, or it's -1, meaning use *fpr.
1295 * Note that even though most of the JIT speaks
1296 * double-precision, fpr represents single-
1297 * precision registers.
1299 * See parts 5.5 and 6.1.2 of the AAPCS for how
1303 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1304 ainfo->storage = RegTypeFP;
1308 * If we're passing a double-precision value
1309 * and *fpr is odd (e.g. it's s1, s3, ...)
1310 * we need to use the next even register. So
1311 * we mark the current *fpr as a spare that
1312 * can be used for the next single-precision
1316 *float_spare = *fpr;
1321 * At this point, we have an even register
1322 * so we assign that and move along.
1326 } else if (*float_spare >= 0) {
1328 * We're passing a single-precision value
1329 * and it looks like a spare single-
1330 * precision register is available. Let's
1334 ainfo->reg = *float_spare;
1338 * If we hit this branch, we're passing a
1339 * single-precision value and we can simply
1340 * use the next available register.
1348 * We've exhausted available floating point
1349 * regs, so pass the rest on the stack.
1357 ainfo->offset = *stack_size;
1358 ainfo->reg = ARMREG_SP;
1359 ainfo->storage = RegTypeBase;
1366 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1368 guint i, gr, fpr, pstart;
1370 int n = sig->hasthis + sig->param_count;
1371 MonoType *simpletype;
1372 guint32 stack_size = 0;
1374 gboolean is_pinvoke = sig->pinvoke;
1378 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1380 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1387 t = mini_type_get_underlying_type (gsctx, sig->ret);
1388 if (MONO_TYPE_ISSTRUCT (t)) {
1391 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1392 cinfo->ret.storage = RegTypeStructByVal;
1394 cinfo->vtype_retaddr = TRUE;
1396 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1397 cinfo->vtype_retaddr = TRUE;
1403 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1404 * the first argument, allowing 'this' to be always passed in the first arg reg.
1405 * Also do this if the first argument is a reference type, since virtual calls
1406 * are sometimes made using calli without sig->hasthis set, like in the delegate
1409 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1411 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1413 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1417 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1418 cinfo->vret_arg_index = 1;
1422 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1426 if (cinfo->vtype_retaddr)
1427 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1430 DEBUG(printf("params: %d\n", sig->param_count));
1431 for (i = pstart; i < sig->param_count; ++i) {
1432 ArgInfo *ainfo = &cinfo->args [n];
1434 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1435 /* Prevent implicit arguments and sig_cookie from
1436 being passed in registers */
1439 /* Emit the signature cookie just before the implicit arguments */
1440 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1442 DEBUG(printf("param %d: ", i));
1443 if (sig->params [i]->byref) {
1444 DEBUG(printf("byref\n"));
1445 add_general (&gr, &stack_size, ainfo, TRUE);
1449 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1450 switch (simpletype->type) {
1451 case MONO_TYPE_BOOLEAN:
1454 cinfo->args [n].size = 1;
1455 add_general (&gr, &stack_size, ainfo, TRUE);
1458 case MONO_TYPE_CHAR:
1461 cinfo->args [n].size = 2;
1462 add_general (&gr, &stack_size, ainfo, TRUE);
1467 cinfo->args [n].size = 4;
1468 add_general (&gr, &stack_size, ainfo, TRUE);
1474 case MONO_TYPE_FNPTR:
1475 case MONO_TYPE_CLASS:
1476 case MONO_TYPE_OBJECT:
1477 case MONO_TYPE_STRING:
1478 case MONO_TYPE_SZARRAY:
1479 case MONO_TYPE_ARRAY:
1480 cinfo->args [n].size = sizeof (gpointer);
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_GENERICINST:
1485 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1486 cinfo->args [n].size = sizeof (gpointer);
1487 add_general (&gr, &stack_size, ainfo, TRUE);
1491 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1492 /* gsharedvt arguments are passed by ref */
1493 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1495 switch (ainfo->storage) {
1496 case RegTypeGeneral:
1497 ainfo->storage = RegTypeGSharedVtInReg;
1500 ainfo->storage = RegTypeGSharedVtOnStack;
1503 g_assert_not_reached ();
1509 case MONO_TYPE_TYPEDBYREF:
1510 case MONO_TYPE_VALUETYPE: {
1516 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1517 size = sizeof (MonoTypedRef);
1518 align = sizeof (gpointer);
1520 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1522 size = mono_class_native_size (klass, &align);
1524 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1526 DEBUG(printf ("load %d bytes struct\n", size));
1529 align_size += (sizeof (gpointer) - 1);
1530 align_size &= ~(sizeof (gpointer) - 1);
1531 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1532 ainfo->storage = RegTypeStructByVal;
1533 ainfo->struct_size = size;
1534 /* FIXME: align stack_size if needed */
1535 if (eabi_supported) {
1536 if (align >= 8 && (gr & 1))
1539 if (gr > ARMREG_R3) {
1541 ainfo->vtsize = nwords;
1543 int rest = ARMREG_R3 - gr + 1;
1544 int n_in_regs = rest >= nwords? nwords: rest;
1546 ainfo->size = n_in_regs;
1547 ainfo->vtsize = nwords - n_in_regs;
1550 nwords -= n_in_regs;
1552 if (sig->call_convention == MONO_CALL_VARARG)
1553 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1554 stack_size = ALIGN_TO (stack_size, align);
1555 ainfo->offset = stack_size;
1556 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1557 stack_size += nwords * sizeof (gpointer);
1564 add_general (&gr, &stack_size, ainfo, FALSE);
1571 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1573 add_general (&gr, &stack_size, ainfo, TRUE);
1581 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1583 add_general (&gr, &stack_size, ainfo, FALSE);
1588 case MONO_TYPE_MVAR:
1589 /* gsharedvt arguments are passed by ref */
1590 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1591 add_general (&gr, &stack_size, ainfo, TRUE);
1592 switch (ainfo->storage) {
1593 case RegTypeGeneral:
1594 ainfo->storage = RegTypeGSharedVtInReg;
1597 ainfo->storage = RegTypeGSharedVtOnStack;
1600 g_assert_not_reached ();
1605 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1609 /* Handle the case where there are no implicit arguments */
1610 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1611 /* Prevent implicit arguments and sig_cookie from
1612 being passed in registers */
1615 /* Emit the signature cookie just before the implicit arguments */
1616 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1620 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1621 switch (simpletype->type) {
1622 case MONO_TYPE_BOOLEAN:
1627 case MONO_TYPE_CHAR:
1633 case MONO_TYPE_FNPTR:
1634 case MONO_TYPE_CLASS:
1635 case MONO_TYPE_OBJECT:
1636 case MONO_TYPE_SZARRAY:
1637 case MONO_TYPE_ARRAY:
1638 case MONO_TYPE_STRING:
1639 cinfo->ret.storage = RegTypeGeneral;
1640 cinfo->ret.reg = ARMREG_R0;
1644 cinfo->ret.storage = RegTypeIRegPair;
1645 cinfo->ret.reg = ARMREG_R0;
1649 cinfo->ret.storage = RegTypeFP;
1651 if (IS_HARD_FLOAT) {
1652 cinfo->ret.reg = ARM_VFP_F0;
1654 cinfo->ret.reg = ARMREG_R0;
1658 case MONO_TYPE_GENERICINST:
1659 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1660 cinfo->ret.storage = RegTypeGeneral;
1661 cinfo->ret.reg = ARMREG_R0;
1664 // FIXME: Only for variable types
1665 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1666 cinfo->ret.storage = RegTypeStructByAddr;
1667 g_assert (cinfo->vtype_retaddr);
1671 case MONO_TYPE_VALUETYPE:
1672 case MONO_TYPE_TYPEDBYREF:
1673 if (cinfo->ret.storage != RegTypeStructByVal)
1674 cinfo->ret.storage = RegTypeStructByAddr;
1677 case MONO_TYPE_MVAR:
1678 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1679 cinfo->ret.storage = RegTypeStructByAddr;
1680 g_assert (cinfo->vtype_retaddr);
1682 case MONO_TYPE_VOID:
1685 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1689 /* align stack size to 8 */
1690 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1691 stack_size = (stack_size + 7) & ~7;
1693 cinfo->stack_usage = stack_size;
1699 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1701 MonoType *callee_ret;
1705 if (cfg->compile_aot && !cfg->full_aot)
1706 /* OP_TAILCALL doesn't work with AOT */
1709 c1 = get_call_info (NULL, NULL, caller_sig);
1710 c2 = get_call_info (NULL, NULL, callee_sig);
1713 * Tail calls with more callee stack usage than the caller cannot be supported, since
1714 * the extra stack space would be left on the stack after the tail call.
1716 res = c1->stack_usage >= c2->stack_usage;
1717 callee_ret = mini_replace_type (callee_sig->ret);
1718 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1719 /* An address on the callee's stack is passed as the first argument */
1722 if (c2->stack_usage > 16 * 4)
1734 debug_omit_fp (void)
1737 return mono_debug_count ();
1744 * mono_arch_compute_omit_fp:
1746 * Determine whenever the frame pointer can be eliminated.
1749 mono_arch_compute_omit_fp (MonoCompile *cfg)
1751 MonoMethodSignature *sig;
1752 MonoMethodHeader *header;
1756 if (cfg->arch.omit_fp_computed)
1759 header = cfg->header;
1761 sig = mono_method_signature (cfg->method);
1763 if (!cfg->arch.cinfo)
1764 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1765 cinfo = cfg->arch.cinfo;
1768 * FIXME: Remove some of the restrictions.
1770 cfg->arch.omit_fp = TRUE;
1771 cfg->arch.omit_fp_computed = TRUE;
1773 if (cfg->disable_omit_fp)
1774 cfg->arch.omit_fp = FALSE;
1775 if (!debug_omit_fp ())
1776 cfg->arch.omit_fp = FALSE;
1778 if (cfg->method->save_lmf)
1779 cfg->arch.omit_fp = FALSE;
1781 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1782 cfg->arch.omit_fp = FALSE;
1783 if (header->num_clauses)
1784 cfg->arch.omit_fp = FALSE;
1785 if (cfg->param_area)
1786 cfg->arch.omit_fp = FALSE;
1787 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1788 cfg->arch.omit_fp = FALSE;
1789 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1790 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1791 cfg->arch.omit_fp = FALSE;
1792 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1793 ArgInfo *ainfo = &cinfo->args [i];
1795 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1797 * The stack offset can only be determined when the frame
1800 cfg->arch.omit_fp = FALSE;
1805 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1806 MonoInst *ins = cfg->varinfo [i];
1809 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1814 * Set var information according to the calling convention. arm version.
1815 * The locals var stuff should most likely be split in another method.
1818 mono_arch_allocate_vars (MonoCompile *cfg)
1820 MonoMethodSignature *sig;
1821 MonoMethodHeader *header;
1824 int i, offset, size, align, curinst;
1828 sig = mono_method_signature (cfg->method);
1830 if (!cfg->arch.cinfo)
1831 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1832 cinfo = cfg->arch.cinfo;
1833 sig_ret = mini_replace_type (sig->ret);
1835 mono_arch_compute_omit_fp (cfg);
1837 if (cfg->arch.omit_fp)
1838 cfg->frame_reg = ARMREG_SP;
1840 cfg->frame_reg = ARMREG_FP;
1842 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1844 /* allow room for the vararg method args: void* and long/double */
1845 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1846 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1848 header = cfg->header;
1850 /* See mono_arch_get_global_int_regs () */
1851 if (cfg->flags & MONO_CFG_HAS_CALLS)
1852 cfg->uses_rgctx_reg = TRUE;
1854 if (cfg->frame_reg != ARMREG_SP)
1855 cfg->used_int_regs |= 1 << cfg->frame_reg;
1857 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1858 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1859 cfg->used_int_regs |= (1 << ARMREG_V5);
1863 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1864 if (sig_ret->type != MONO_TYPE_VOID) {
1865 cfg->ret->opcode = OP_REGVAR;
1866 cfg->ret->inst_c0 = ARMREG_R0;
1869 /* local vars are at a positive offset from the stack pointer */
1871 * also note that if the function uses alloca, we use FP
1872 * to point at the local variables.
1874 offset = 0; /* linkage area */
1875 /* align the offset to 16 bytes: not sure this is needed here */
1877 //offset &= ~(8 - 1);
1879 /* add parameter area size for called functions */
1880 offset += cfg->param_area;
1883 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1886 /* allow room to save the return value */
1887 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1890 /* the MonoLMF structure is stored just below the stack pointer */
1891 if (cinfo->ret.storage == RegTypeStructByVal) {
1892 cfg->ret->opcode = OP_REGOFFSET;
1893 cfg->ret->inst_basereg = cfg->frame_reg;
1894 offset += sizeof (gpointer) - 1;
1895 offset &= ~(sizeof (gpointer) - 1);
1896 cfg->ret->inst_offset = - offset;
1897 offset += sizeof(gpointer);
1898 } else if (cinfo->vtype_retaddr) {
1899 ins = cfg->vret_addr;
1900 offset += sizeof(gpointer) - 1;
1901 offset &= ~(sizeof(gpointer) - 1);
1902 ins->inst_offset = offset;
1903 ins->opcode = OP_REGOFFSET;
1904 ins->inst_basereg = cfg->frame_reg;
1905 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1906 printf ("vret_addr =");
1907 mono_print_ins (cfg->vret_addr);
1909 offset += sizeof(gpointer);
1912 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1913 if (cfg->arch.seq_point_info_var) {
1916 ins = cfg->arch.seq_point_info_var;
1920 offset += align - 1;
1921 offset &= ~(align - 1);
1922 ins->opcode = OP_REGOFFSET;
1923 ins->inst_basereg = cfg->frame_reg;
1924 ins->inst_offset = offset;
1927 ins = cfg->arch.ss_trigger_page_var;
1930 offset += align - 1;
1931 offset &= ~(align - 1);
1932 ins->opcode = OP_REGOFFSET;
1933 ins->inst_basereg = cfg->frame_reg;
1934 ins->inst_offset = offset;
1938 if (cfg->arch.seq_point_read_var) {
1941 ins = cfg->arch.seq_point_read_var;
1945 offset += align - 1;
1946 offset &= ~(align - 1);
1947 ins->opcode = OP_REGOFFSET;
1948 ins->inst_basereg = cfg->frame_reg;
1949 ins->inst_offset = offset;
1952 ins = cfg->arch.seq_point_ss_method_var;
1955 offset += align - 1;
1956 offset &= ~(align - 1);
1957 ins->opcode = OP_REGOFFSET;
1958 ins->inst_basereg = cfg->frame_reg;
1959 ins->inst_offset = offset;
1962 ins = cfg->arch.seq_point_bp_method_var;
1965 offset += align - 1;
1966 offset &= ~(align - 1);
1967 ins->opcode = OP_REGOFFSET;
1968 ins->inst_basereg = cfg->frame_reg;
1969 ins->inst_offset = offset;
1973 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1974 /* Allocate a temporary used by the atomic ops */
1978 /* Allocate a local slot to hold the sig cookie address */
1979 offset += align - 1;
1980 offset &= ~(align - 1);
1981 cfg->arch.atomic_tmp_offset = offset;
1984 cfg->arch.atomic_tmp_offset = -1;
1987 cfg->locals_min_stack_offset = offset;
1989 curinst = cfg->locals_start;
1990 for (i = curinst; i < cfg->num_varinfo; ++i) {
1993 ins = cfg->varinfo [i];
1994 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1997 t = ins->inst_vtype;
1998 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2001 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2002 * pinvoke wrappers when they call functions returning structure */
2003 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2004 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2008 size = mono_type_size (t, &align);
2010 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2011 * since it loads/stores misaligned words, which don't do the right thing.
2013 if (align < 4 && size >= 4)
2015 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2016 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2017 offset += align - 1;
2018 offset &= ~(align - 1);
2019 ins->opcode = OP_REGOFFSET;
2020 ins->inst_offset = offset;
2021 ins->inst_basereg = cfg->frame_reg;
2023 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2026 cfg->locals_max_stack_offset = offset;
2030 ins = cfg->args [curinst];
2031 if (ins->opcode != OP_REGVAR) {
2032 ins->opcode = OP_REGOFFSET;
2033 ins->inst_basereg = cfg->frame_reg;
2034 offset += sizeof (gpointer) - 1;
2035 offset &= ~(sizeof (gpointer) - 1);
2036 ins->inst_offset = offset;
2037 offset += sizeof (gpointer);
2042 if (sig->call_convention == MONO_CALL_VARARG) {
2046 /* Allocate a local slot to hold the sig cookie address */
2047 offset += align - 1;
2048 offset &= ~(align - 1);
2049 cfg->sig_cookie = offset;
2053 for (i = 0; i < sig->param_count; ++i) {
2054 ins = cfg->args [curinst];
2056 if (ins->opcode != OP_REGVAR) {
2057 ins->opcode = OP_REGOFFSET;
2058 ins->inst_basereg = cfg->frame_reg;
2059 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2061 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2062 * since it loads/stores misaligned words, which don't do the right thing.
2064 if (align < 4 && size >= 4)
2066 /* The code in the prolog () stores words when storing vtypes received in a register */
2067 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2069 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2070 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2071 offset += align - 1;
2072 offset &= ~(align - 1);
2073 ins->inst_offset = offset;
2079 /* align the offset to 8 bytes */
2080 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2081 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2086 cfg->stack_offset = offset;
2090 mono_arch_create_vars (MonoCompile *cfg)
2092 MonoMethodSignature *sig;
2096 sig = mono_method_signature (cfg->method);
2098 if (!cfg->arch.cinfo)
2099 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2100 cinfo = cfg->arch.cinfo;
2102 if (IS_HARD_FLOAT) {
2103 for (i = 0; i < 2; i++) {
2104 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2105 inst->flags |= MONO_INST_VOLATILE;
2107 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2111 if (cinfo->ret.storage == RegTypeStructByVal)
2112 cfg->ret_var_is_local = TRUE;
2114 if (cinfo->vtype_retaddr) {
2115 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2116 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2117 printf ("vret_addr = ");
2118 mono_print_ins (cfg->vret_addr);
2122 if (cfg->gen_seq_points) {
2123 if (cfg->soft_breakpoints) {
2124 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2125 ins->flags |= MONO_INST_VOLATILE;
2126 cfg->arch.seq_point_read_var = ins;
2128 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2129 ins->flags |= MONO_INST_VOLATILE;
2130 cfg->arch.seq_point_ss_method_var = ins;
2132 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2133 ins->flags |= MONO_INST_VOLATILE;
2134 cfg->arch.seq_point_bp_method_var = ins;
2136 g_assert (!cfg->compile_aot);
2137 } else if (cfg->compile_aot) {
2138 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2139 ins->flags |= MONO_INST_VOLATILE;
2140 cfg->arch.seq_point_info_var = ins;
2142 /* Allocate a separate variable for this to save 1 load per seq point */
2143 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2144 ins->flags |= MONO_INST_VOLATILE;
2145 cfg->arch.ss_trigger_page_var = ins;
2151 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2153 MonoMethodSignature *tmp_sig;
2156 if (call->tail_call)
2159 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2162 * mono_ArgIterator_Setup assumes the signature cookie is
2163 * passed first and all the arguments which were before it are
2164 * passed on the stack after the signature. So compensate by
2165 * passing a different signature.
2167 tmp_sig = mono_metadata_signature_dup (call->signature);
2168 tmp_sig->param_count -= call->signature->sentinelpos;
2169 tmp_sig->sentinelpos = 0;
2170 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2172 sig_reg = mono_alloc_ireg (cfg);
2173 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2175 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2180 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2185 LLVMCallInfo *linfo;
2187 n = sig->param_count + sig->hasthis;
2189 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2191 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2194 * LLVM always uses the native ABI while we use our own ABI, the
2195 * only difference is the handling of vtypes:
2196 * - we only pass/receive them in registers in some cases, and only
2197 * in 1 or 2 integer registers.
2199 if (cinfo->vtype_retaddr) {
2200 /* Vtype returned using a hidden argument */
2201 linfo->ret.storage = LLVMArgVtypeRetAddr;
2202 linfo->vret_arg_index = cinfo->vret_arg_index;
2203 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2204 cfg->exception_message = g_strdup ("unknown ret conv");
2205 cfg->disable_llvm = TRUE;
2209 for (i = 0; i < n; ++i) {
2210 ainfo = cinfo->args + i;
2212 linfo->args [i].storage = LLVMArgNone;
2214 switch (ainfo->storage) {
2215 case RegTypeGeneral:
2216 case RegTypeIRegPair:
2218 linfo->args [i].storage = LLVMArgInIReg;
2220 case RegTypeStructByVal:
2221 // FIXME: Passing entirely on the stack or split reg/stack
2222 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2223 linfo->args [i].storage = LLVMArgVtypeInReg;
2224 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2225 if (ainfo->size == 2)
2226 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2228 linfo->args [i].pair_storage [1] = LLVMArgNone;
2230 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2231 cfg->disable_llvm = TRUE;
2235 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2236 cfg->disable_llvm = TRUE;
2246 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2249 MonoMethodSignature *sig;
2253 sig = call->signature;
2254 n = sig->param_count + sig->hasthis;
2256 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2258 for (i = 0; i < n; ++i) {
2259 ArgInfo *ainfo = cinfo->args + i;
2262 if (i >= sig->hasthis)
2263 t = sig->params [i - sig->hasthis];
2265 t = &mono_defaults.int_class->byval_arg;
2266 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2268 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2269 /* Emit the signature cookie just before the implicit arguments */
2270 emit_sig_cookie (cfg, call, cinfo);
2273 in = call->args [i];
2275 switch (ainfo->storage) {
2276 case RegTypeGeneral:
2277 case RegTypeIRegPair:
2278 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2279 MONO_INST_NEW (cfg, ins, OP_MOVE);
2280 ins->dreg = mono_alloc_ireg (cfg);
2281 ins->sreg1 = in->dreg + 1;
2282 MONO_ADD_INS (cfg->cbb, ins);
2283 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2285 MONO_INST_NEW (cfg, ins, OP_MOVE);
2286 ins->dreg = mono_alloc_ireg (cfg);
2287 ins->sreg1 = in->dreg + 2;
2288 MONO_ADD_INS (cfg->cbb, ins);
2289 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2290 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2291 if (ainfo->size == 4) {
2292 if (IS_SOFT_FLOAT) {
2293 /* mono_emit_call_args () have already done the r8->r4 conversion */
2294 /* The converted value is in an int vreg */
2295 MONO_INST_NEW (cfg, ins, OP_MOVE);
2296 ins->dreg = mono_alloc_ireg (cfg);
2297 ins->sreg1 = in->dreg;
2298 MONO_ADD_INS (cfg->cbb, ins);
2299 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2304 creg = mono_alloc_ireg (cfg);
2305 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2306 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2309 if (IS_SOFT_FLOAT) {
2310 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2311 ins->dreg = mono_alloc_ireg (cfg);
2312 ins->sreg1 = in->dreg;
2313 MONO_ADD_INS (cfg->cbb, ins);
2314 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2316 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2317 ins->dreg = mono_alloc_ireg (cfg);
2318 ins->sreg1 = in->dreg;
2319 MONO_ADD_INS (cfg->cbb, ins);
2320 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2324 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2325 creg = mono_alloc_ireg (cfg);
2326 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2327 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2328 creg = mono_alloc_ireg (cfg);
2329 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2330 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2333 cfg->flags |= MONO_CFG_HAS_FPOUT;
2335 MONO_INST_NEW (cfg, ins, OP_MOVE);
2336 ins->dreg = mono_alloc_ireg (cfg);
2337 ins->sreg1 = in->dreg;
2338 MONO_ADD_INS (cfg->cbb, ins);
2340 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2343 case RegTypeStructByAddr:
2346 /* FIXME: where si the data allocated? */
2347 arg->backend.reg3 = ainfo->reg;
2348 call->used_iregs |= 1 << ainfo->reg;
2349 g_assert_not_reached ();
2352 case RegTypeStructByVal:
2353 case RegTypeGSharedVtInReg:
2354 case RegTypeGSharedVtOnStack:
2355 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2356 ins->opcode = OP_OUTARG_VT;
2357 ins->sreg1 = in->dreg;
2358 ins->klass = in->klass;
2359 ins->inst_p0 = call;
2360 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2361 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2362 mono_call_inst_add_outarg_vt (cfg, call, ins);
2363 MONO_ADD_INS (cfg->cbb, ins);
2366 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2368 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2369 if (t->type == MONO_TYPE_R8) {
2370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2373 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2375 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2378 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2381 case RegTypeBaseGen:
2382 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2384 MONO_INST_NEW (cfg, ins, OP_MOVE);
2385 ins->dreg = mono_alloc_ireg (cfg);
2386 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2387 MONO_ADD_INS (cfg->cbb, ins);
2388 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2389 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2392 /* This should work for soft-float as well */
2394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2395 creg = mono_alloc_ireg (cfg);
2396 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2397 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2398 creg = mono_alloc_ireg (cfg);
2399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2400 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2401 cfg->flags |= MONO_CFG_HAS_FPOUT;
2403 g_assert_not_reached ();
2407 int fdreg = mono_alloc_freg (cfg);
2409 if (ainfo->size == 8) {
2410 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2411 ins->sreg1 = in->dreg;
2413 MONO_ADD_INS (cfg->cbb, ins);
2415 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2420 * Mono's register allocator doesn't speak single-precision registers that
2421 * overlap double-precision registers (i.e. armhf). So we have to work around
2422 * the register allocator and load the value from memory manually.
2424 * So we create a variable for the float argument and an instruction to store
2425 * the argument into the variable. We then store the list of these arguments
2426 * in cfg->float_args. This list is then used by emit_float_args later to
2427 * pass the arguments in the various call opcodes.
2429 * This is not very nice, and we should really try to fix the allocator.
2432 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2434 /* Make sure the instruction isn't seen as pointless and removed.
2436 float_arg->flags |= MONO_INST_VOLATILE;
2438 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2440 /* We use the dreg to look up the instruction later. The hreg is used to
2441 * emit the instruction that loads the value into the FP reg.
2443 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2444 fad->vreg = float_arg->dreg;
2445 fad->hreg = ainfo->reg;
2447 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2450 call->used_iregs |= 1 << ainfo->reg;
2451 cfg->flags |= MONO_CFG_HAS_FPOUT;
2455 g_assert_not_reached ();
2459 /* Handle the case where there are no implicit arguments */
2460 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2461 emit_sig_cookie (cfg, call, cinfo);
2463 if (cinfo->ret.storage == RegTypeStructByVal) {
2464 /* The JIT will transform this into a normal call */
2465 call->vret_in_reg = TRUE;
2466 } else if (cinfo->vtype_retaddr) {
2468 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2469 vtarg->sreg1 = call->vret_var->dreg;
2470 vtarg->dreg = mono_alloc_preg (cfg);
2471 MONO_ADD_INS (cfg->cbb, vtarg);
2473 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2476 call->stack_usage = cinfo->stack_usage;
2482 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2484 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2485 ArgInfo *ainfo = ins->inst_p1;
2486 int ovf_size = ainfo->vtsize;
2487 int doffset = ainfo->offset;
2488 int struct_size = ainfo->struct_size;
2489 int i, soffset, dreg, tmpreg;
2491 if (ainfo->storage == RegTypeGSharedVtInReg) {
2493 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2496 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2497 /* Pass by addr on stack */
2498 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2503 for (i = 0; i < ainfo->size; ++i) {
2504 dreg = mono_alloc_ireg (cfg);
2505 switch (struct_size) {
2507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2513 tmpreg = mono_alloc_ireg (cfg);
2514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2516 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2517 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2518 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2519 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2520 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2523 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2526 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2527 soffset += sizeof (gpointer);
2528 struct_size -= sizeof (gpointer);
2530 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2532 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2536 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2538 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2541 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2544 if (COMPILE_LLVM (cfg)) {
2545 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2547 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2548 ins->sreg1 = val->dreg + 1;
2549 ins->sreg2 = val->dreg + 2;
2550 MONO_ADD_INS (cfg->cbb, ins);
2555 case MONO_ARM_FPU_NONE:
2556 if (ret->type == MONO_TYPE_R8) {
2559 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2560 ins->dreg = cfg->ret->dreg;
2561 ins->sreg1 = val->dreg;
2562 MONO_ADD_INS (cfg->cbb, ins);
2565 if (ret->type == MONO_TYPE_R4) {
2566 /* Already converted to an int in method_to_ir () */
2567 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2571 case MONO_ARM_FPU_VFP:
2572 case MONO_ARM_FPU_VFP_HARD:
2573 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2576 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2577 ins->dreg = cfg->ret->dreg;
2578 ins->sreg1 = val->dreg;
2579 MONO_ADD_INS (cfg->cbb, ins);
2584 g_assert_not_reached ();
2588 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2591 #endif /* #ifndef DISABLE_JIT */
2594 mono_arch_is_inst_imm (gint64 imm)
2600 MonoMethodSignature *sig;
2603 MonoType **param_types;
2607 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2611 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2614 switch (cinfo->ret.storage) {
2616 case RegTypeGeneral:
2617 case RegTypeIRegPair:
2618 case RegTypeStructByAddr:
2629 for (i = 0; i < cinfo->nargs; ++i) {
2630 ArgInfo *ainfo = &cinfo->args [i];
2633 switch (ainfo->storage) {
2634 case RegTypeGeneral:
2636 case RegTypeIRegPair:
2639 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2642 case RegTypeStructByVal:
2643 if (ainfo->size == 0)
2644 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2646 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2647 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2655 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2656 for (i = 0; i < sig->param_count; ++i) {
2657 MonoType *t = sig->params [i];
2683 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2685 ArchDynCallInfo *info;
2689 cinfo = get_call_info (NULL, NULL, sig);
2691 if (!dyn_call_supported (cinfo, sig)) {
2696 info = g_new0 (ArchDynCallInfo, 1);
2697 // FIXME: Preprocess the info to speed up start_dyn_call ()
2699 info->cinfo = cinfo;
2700 info->rtype = mini_replace_type (sig->ret);
2701 info->param_types = g_new0 (MonoType*, sig->param_count);
2702 for (i = 0; i < sig->param_count; ++i)
2703 info->param_types [i] = mini_replace_type (sig->params [i]);
2705 return (MonoDynCallInfo*)info;
2709 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2711 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2713 g_free (ainfo->cinfo);
2718 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2720 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2721 DynCallArgs *p = (DynCallArgs*)buf;
2722 int arg_index, greg, i, j, pindex;
2723 MonoMethodSignature *sig = dinfo->sig;
2725 g_assert (buf_len >= sizeof (DynCallArgs));
2734 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2735 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2740 if (dinfo->cinfo->vtype_retaddr)
2741 p->regs [greg ++] = (mgreg_t)ret;
2743 for (i = pindex; i < sig->param_count; i++) {
2744 MonoType *t = dinfo->param_types [i];
2745 gpointer *arg = args [arg_index ++];
2746 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2749 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2751 else if (ainfo->storage == RegTypeBase)
2752 slot = PARAM_REGS + (ainfo->offset / 4);
2754 g_assert_not_reached ();
2757 p->regs [slot] = (mgreg_t)*arg;
2762 case MONO_TYPE_STRING:
2763 case MONO_TYPE_CLASS:
2764 case MONO_TYPE_ARRAY:
2765 case MONO_TYPE_SZARRAY:
2766 case MONO_TYPE_OBJECT:
2770 p->regs [slot] = (mgreg_t)*arg;
2772 case MONO_TYPE_BOOLEAN:
2774 p->regs [slot] = *(guint8*)arg;
2777 p->regs [slot] = *(gint8*)arg;
2780 p->regs [slot] = *(gint16*)arg;
2783 case MONO_TYPE_CHAR:
2784 p->regs [slot] = *(guint16*)arg;
2787 p->regs [slot] = *(gint32*)arg;
2790 p->regs [slot] = *(guint32*)arg;
2794 p->regs [slot ++] = (mgreg_t)arg [0];
2795 p->regs [slot] = (mgreg_t)arg [1];
2798 p->regs [slot] = *(mgreg_t*)arg;
2801 p->regs [slot ++] = (mgreg_t)arg [0];
2802 p->regs [slot] = (mgreg_t)arg [1];
2804 case MONO_TYPE_GENERICINST:
2805 if (MONO_TYPE_IS_REFERENCE (t)) {
2806 p->regs [slot] = (mgreg_t)*arg;
2811 case MONO_TYPE_VALUETYPE:
2812 g_assert (ainfo->storage == RegTypeStructByVal);
2814 if (ainfo->size == 0)
2815 slot = PARAM_REGS + (ainfo->offset / 4);
2819 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2820 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2823 g_assert_not_reached ();
2829 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2831 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2832 MonoType *ptype = ainfo->rtype;
2833 guint8 *ret = ((DynCallArgs*)buf)->ret;
2834 mgreg_t res = ((DynCallArgs*)buf)->res;
2835 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2837 switch (ptype->type) {
2838 case MONO_TYPE_VOID:
2839 *(gpointer*)ret = NULL;
2841 case MONO_TYPE_STRING:
2842 case MONO_TYPE_CLASS:
2843 case MONO_TYPE_ARRAY:
2844 case MONO_TYPE_SZARRAY:
2845 case MONO_TYPE_OBJECT:
2849 *(gpointer*)ret = (gpointer)res;
2855 case MONO_TYPE_BOOLEAN:
2856 *(guint8*)ret = res;
2859 *(gint16*)ret = res;
2862 case MONO_TYPE_CHAR:
2863 *(guint16*)ret = res;
2866 *(gint32*)ret = res;
2869 *(guint32*)ret = res;
2873 /* This handles endianness as well */
2874 ((gint32*)ret) [0] = res;
2875 ((gint32*)ret) [1] = res2;
2877 case MONO_TYPE_GENERICINST:
2878 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2879 *(gpointer*)ret = (gpointer)res;
2884 case MONO_TYPE_VALUETYPE:
2885 g_assert (ainfo->cinfo->vtype_retaddr);
2890 *(float*)ret = *(float*)&res;
2892 case MONO_TYPE_R8: {
2899 *(double*)ret = *(double*)®s;
2903 g_assert_not_reached ();
2910 * Allow tracing to work with this interface (with an optional argument)
2914 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2918 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2919 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2920 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2921 code = emit_call_reg (code, ARMREG_R2);
2935 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2938 int save_mode = SAVE_NONE;
2940 MonoMethod *method = cfg->method;
2941 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2942 int rtype = ret_type->type;
2943 int save_offset = cfg->param_area;
2947 offset = code - cfg->native_code;
2948 /* we need about 16 instructions */
2949 if (offset > (cfg->code_size - 16 * 4)) {
2950 cfg->code_size *= 2;
2951 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2952 code = cfg->native_code + offset;
2955 case MONO_TYPE_VOID:
2956 /* special case string .ctor icall */
2957 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2958 save_mode = SAVE_ONE;
2960 save_mode = SAVE_NONE;
2964 save_mode = SAVE_TWO;
2968 save_mode = SAVE_ONE_FP;
2970 save_mode = SAVE_ONE;
2974 save_mode = SAVE_TWO_FP;
2976 save_mode = SAVE_TWO;
2978 case MONO_TYPE_GENERICINST:
2979 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2980 save_mode = SAVE_ONE;
2984 case MONO_TYPE_VALUETYPE:
2985 save_mode = SAVE_STRUCT;
2988 save_mode = SAVE_ONE;
2992 switch (save_mode) {
2994 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2995 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2996 if (enable_arguments) {
2997 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2998 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3002 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3003 if (enable_arguments) {
3004 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3008 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3009 if (enable_arguments) {
3010 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3014 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3015 if (enable_arguments) {
3016 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3020 if (enable_arguments) {
3021 /* FIXME: get the actual address */
3022 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3030 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3031 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3032 code = emit_call_reg (code, ARMREG_IP);
3034 switch (save_mode) {
3036 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3037 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3040 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3043 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3046 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3057 * The immediate field for cond branches is big enough for all reasonable methods
3059 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3060 if (0 && ins->inst_true_bb->native_offset) { \
3061 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3063 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3064 ARM_B_COND (code, (condcode), 0); \
3067 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3069 /* emit an exception if condition is fail
3071 * We assign the extra code used to throw the implicit exceptions
3072 * to cfg->bb_exit as far as the big branch handling is concerned
3074 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3076 mono_add_patch_info (cfg, code - cfg->native_code, \
3077 MONO_PATCH_INFO_EXC, exc_name); \
3078 ARM_BL_COND (code, (condcode), 0); \
3081 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3084 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3089 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3091 MonoInst *ins, *n, *last_ins = NULL;
3093 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3094 switch (ins->opcode) {
3097 /* Already done by an arch-independent pass */
3099 case OP_LOAD_MEMBASE:
3100 case OP_LOADI4_MEMBASE:
3102 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3103 * OP_LOAD_MEMBASE offset(basereg), reg
3105 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3106 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3107 ins->inst_basereg == last_ins->inst_destbasereg &&
3108 ins->inst_offset == last_ins->inst_offset) {
3109 if (ins->dreg == last_ins->sreg1) {
3110 MONO_DELETE_INS (bb, ins);
3113 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3114 ins->opcode = OP_MOVE;
3115 ins->sreg1 = last_ins->sreg1;
3119 * Note: reg1 must be different from the basereg in the second load
3120 * OP_LOAD_MEMBASE offset(basereg), reg1
3121 * OP_LOAD_MEMBASE offset(basereg), reg2
3123 * OP_LOAD_MEMBASE offset(basereg), reg1
3124 * OP_MOVE reg1, reg2
3126 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3127 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3128 ins->inst_basereg != last_ins->dreg &&
3129 ins->inst_basereg == last_ins->inst_basereg &&
3130 ins->inst_offset == last_ins->inst_offset) {
3132 if (ins->dreg == last_ins->dreg) {
3133 MONO_DELETE_INS (bb, ins);
3136 ins->opcode = OP_MOVE;
3137 ins->sreg1 = last_ins->dreg;
3140 //g_assert_not_reached ();
3144 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3145 * OP_LOAD_MEMBASE offset(basereg), reg
3147 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3148 * OP_ICONST reg, imm
3150 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3151 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3152 ins->inst_basereg == last_ins->inst_destbasereg &&
3153 ins->inst_offset == last_ins->inst_offset) {
3154 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3155 ins->opcode = OP_ICONST;
3156 ins->inst_c0 = last_ins->inst_imm;
3157 g_assert_not_reached (); // check this rule
3161 case OP_LOADU1_MEMBASE:
3162 case OP_LOADI1_MEMBASE:
3163 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3164 ins->inst_basereg == last_ins->inst_destbasereg &&
3165 ins->inst_offset == last_ins->inst_offset) {
3166 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3167 ins->sreg1 = last_ins->sreg1;
3170 case OP_LOADU2_MEMBASE:
3171 case OP_LOADI2_MEMBASE:
3172 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3173 ins->inst_basereg == last_ins->inst_destbasereg &&
3174 ins->inst_offset == last_ins->inst_offset) {
3175 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3176 ins->sreg1 = last_ins->sreg1;
3180 ins->opcode = OP_MOVE;
3184 if (ins->dreg == ins->sreg1) {
3185 MONO_DELETE_INS (bb, ins);
3189 * OP_MOVE sreg, dreg
3190 * OP_MOVE dreg, sreg
3192 if (last_ins && last_ins->opcode == OP_MOVE &&
3193 ins->sreg1 == last_ins->dreg &&
3194 ins->dreg == last_ins->sreg1) {
3195 MONO_DELETE_INS (bb, ins);
3203 bb->last_ins = last_ins;
3207 * the branch_cc_table should maintain the order of these
3221 branch_cc_table [] = {
3235 #define ADD_NEW_INS(cfg,dest,op) do { \
3236 MONO_INST_NEW ((cfg), (dest), (op)); \
3237 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3241 map_to_reg_reg_op (int op)
3250 case OP_COMPARE_IMM:
3252 case OP_ICOMPARE_IMM:
3266 case OP_LOAD_MEMBASE:
3267 return OP_LOAD_MEMINDEX;
3268 case OP_LOADI4_MEMBASE:
3269 return OP_LOADI4_MEMINDEX;
3270 case OP_LOADU4_MEMBASE:
3271 return OP_LOADU4_MEMINDEX;
3272 case OP_LOADU1_MEMBASE:
3273 return OP_LOADU1_MEMINDEX;
3274 case OP_LOADI2_MEMBASE:
3275 return OP_LOADI2_MEMINDEX;
3276 case OP_LOADU2_MEMBASE:
3277 return OP_LOADU2_MEMINDEX;
3278 case OP_LOADI1_MEMBASE:
3279 return OP_LOADI1_MEMINDEX;
3280 case OP_STOREI1_MEMBASE_REG:
3281 return OP_STOREI1_MEMINDEX;
3282 case OP_STOREI2_MEMBASE_REG:
3283 return OP_STOREI2_MEMINDEX;
3284 case OP_STOREI4_MEMBASE_REG:
3285 return OP_STOREI4_MEMINDEX;
3286 case OP_STORE_MEMBASE_REG:
3287 return OP_STORE_MEMINDEX;
3288 case OP_STORER4_MEMBASE_REG:
3289 return OP_STORER4_MEMINDEX;
3290 case OP_STORER8_MEMBASE_REG:
3291 return OP_STORER8_MEMINDEX;
3292 case OP_STORE_MEMBASE_IMM:
3293 return OP_STORE_MEMBASE_REG;
3294 case OP_STOREI1_MEMBASE_IMM:
3295 return OP_STOREI1_MEMBASE_REG;
3296 case OP_STOREI2_MEMBASE_IMM:
3297 return OP_STOREI2_MEMBASE_REG;
3298 case OP_STOREI4_MEMBASE_IMM:
3299 return OP_STOREI4_MEMBASE_REG;
3301 g_assert_not_reached ();
3305 * Remove from the instruction list the instructions that can't be
3306 * represented with very simple instructions with no register
3310 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3312 MonoInst *ins, *temp, *last_ins = NULL;
3313 int rot_amount, imm8, low_imm;
3315 MONO_BB_FOR_EACH_INS (bb, ins) {
3317 switch (ins->opcode) {
3321 case OP_COMPARE_IMM:
3322 case OP_ICOMPARE_IMM:
3336 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3337 ADD_NEW_INS (cfg, temp, OP_ICONST);
3338 temp->inst_c0 = ins->inst_imm;
3339 temp->dreg = mono_alloc_ireg (cfg);
3340 ins->sreg2 = temp->dreg;
3341 ins->opcode = mono_op_imm_to_op (ins->opcode);
3343 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3349 if (ins->inst_imm == 1) {
3350 ins->opcode = OP_MOVE;
3353 if (ins->inst_imm == 0) {
3354 ins->opcode = OP_ICONST;
3358 imm8 = mono_is_power_of_two (ins->inst_imm);
3360 ins->opcode = OP_SHL_IMM;
3361 ins->inst_imm = imm8;
3364 ADD_NEW_INS (cfg, temp, OP_ICONST);
3365 temp->inst_c0 = ins->inst_imm;
3366 temp->dreg = mono_alloc_ireg (cfg);
3367 ins->sreg2 = temp->dreg;
3368 ins->opcode = OP_IMUL;
3374 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3375 /* ARM sets the C flag to 1 if there was _no_ overflow */
3376 ins->next->opcode = OP_COND_EXC_NC;
3379 case OP_IDIV_UN_IMM:
3381 case OP_IREM_UN_IMM:
3382 ADD_NEW_INS (cfg, temp, OP_ICONST);
3383 temp->inst_c0 = ins->inst_imm;
3384 temp->dreg = mono_alloc_ireg (cfg);
3385 ins->sreg2 = temp->dreg;
3386 ins->opcode = mono_op_imm_to_op (ins->opcode);
3388 case OP_LOCALLOC_IMM:
3389 ADD_NEW_INS (cfg, temp, OP_ICONST);
3390 temp->inst_c0 = ins->inst_imm;
3391 temp->dreg = mono_alloc_ireg (cfg);
3392 ins->sreg1 = temp->dreg;
3393 ins->opcode = OP_LOCALLOC;
3395 case OP_LOAD_MEMBASE:
3396 case OP_LOADI4_MEMBASE:
3397 case OP_LOADU4_MEMBASE:
3398 case OP_LOADU1_MEMBASE:
3399 /* we can do two things: load the immed in a register
3400 * and use an indexed load, or see if the immed can be
3401 * represented as an ad_imm + a load with a smaller offset
3402 * that fits. We just do the first for now, optimize later.
3404 if (arm_is_imm12 (ins->inst_offset))
3406 ADD_NEW_INS (cfg, temp, OP_ICONST);
3407 temp->inst_c0 = ins->inst_offset;
3408 temp->dreg = mono_alloc_ireg (cfg);
3409 ins->sreg2 = temp->dreg;
3410 ins->opcode = map_to_reg_reg_op (ins->opcode);
3412 case OP_LOADI2_MEMBASE:
3413 case OP_LOADU2_MEMBASE:
3414 case OP_LOADI1_MEMBASE:
3415 if (arm_is_imm8 (ins->inst_offset))
3417 ADD_NEW_INS (cfg, temp, OP_ICONST);
3418 temp->inst_c0 = ins->inst_offset;
3419 temp->dreg = mono_alloc_ireg (cfg);
3420 ins->sreg2 = temp->dreg;
3421 ins->opcode = map_to_reg_reg_op (ins->opcode);
3423 case OP_LOADR4_MEMBASE:
3424 case OP_LOADR8_MEMBASE:
3425 if (arm_is_fpimm8 (ins->inst_offset))
3427 low_imm = ins->inst_offset & 0x1ff;
3428 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3429 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3430 temp->inst_imm = ins->inst_offset & ~0x1ff;
3431 temp->sreg1 = ins->inst_basereg;
3432 temp->dreg = mono_alloc_ireg (cfg);
3433 ins->inst_basereg = temp->dreg;
3434 ins->inst_offset = low_imm;
3438 ADD_NEW_INS (cfg, temp, OP_ICONST);
3439 temp->inst_c0 = ins->inst_offset;
3440 temp->dreg = mono_alloc_ireg (cfg);
3442 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3443 add_ins->sreg1 = ins->inst_basereg;
3444 add_ins->sreg2 = temp->dreg;
3445 add_ins->dreg = mono_alloc_ireg (cfg);
3447 ins->inst_basereg = add_ins->dreg;
3448 ins->inst_offset = 0;
3451 case OP_STORE_MEMBASE_REG:
3452 case OP_STOREI4_MEMBASE_REG:
3453 case OP_STOREI1_MEMBASE_REG:
3454 if (arm_is_imm12 (ins->inst_offset))
3456 ADD_NEW_INS (cfg, temp, OP_ICONST);
3457 temp->inst_c0 = ins->inst_offset;
3458 temp->dreg = mono_alloc_ireg (cfg);
3459 ins->sreg2 = temp->dreg;
3460 ins->opcode = map_to_reg_reg_op (ins->opcode);
3462 case OP_STOREI2_MEMBASE_REG:
3463 if (arm_is_imm8 (ins->inst_offset))
3465 ADD_NEW_INS (cfg, temp, OP_ICONST);
3466 temp->inst_c0 = ins->inst_offset;
3467 temp->dreg = mono_alloc_ireg (cfg);
3468 ins->sreg2 = temp->dreg;
3469 ins->opcode = map_to_reg_reg_op (ins->opcode);
3471 case OP_STORER4_MEMBASE_REG:
3472 case OP_STORER8_MEMBASE_REG:
3473 if (arm_is_fpimm8 (ins->inst_offset))
3475 low_imm = ins->inst_offset & 0x1ff;
3476 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3477 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3478 temp->inst_imm = ins->inst_offset & ~0x1ff;
3479 temp->sreg1 = ins->inst_destbasereg;
3480 temp->dreg = mono_alloc_ireg (cfg);
3481 ins->inst_destbasereg = temp->dreg;
3482 ins->inst_offset = low_imm;
3486 ADD_NEW_INS (cfg, temp, OP_ICONST);
3487 temp->inst_c0 = ins->inst_offset;
3488 temp->dreg = mono_alloc_ireg (cfg);
3490 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3491 add_ins->sreg1 = ins->inst_destbasereg;
3492 add_ins->sreg2 = temp->dreg;
3493 add_ins->dreg = mono_alloc_ireg (cfg);
3495 ins->inst_destbasereg = add_ins->dreg;
3496 ins->inst_offset = 0;
3499 case OP_STORE_MEMBASE_IMM:
3500 case OP_STOREI1_MEMBASE_IMM:
3501 case OP_STOREI2_MEMBASE_IMM:
3502 case OP_STOREI4_MEMBASE_IMM:
3503 ADD_NEW_INS (cfg, temp, OP_ICONST);
3504 temp->inst_c0 = ins->inst_imm;
3505 temp->dreg = mono_alloc_ireg (cfg);
3506 ins->sreg1 = temp->dreg;
3507 ins->opcode = map_to_reg_reg_op (ins->opcode);
3509 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3511 gboolean swap = FALSE;
3515 /* Optimized away */
3520 /* Some fp compares require swapped operands */
3521 switch (ins->next->opcode) {
3523 ins->next->opcode = OP_FBLT;
3527 ins->next->opcode = OP_FBLT_UN;
3531 ins->next->opcode = OP_FBGE;
3535 ins->next->opcode = OP_FBGE_UN;
3543 ins->sreg1 = ins->sreg2;
3552 bb->last_ins = last_ins;
3553 bb->max_vreg = cfg->next_vreg;
3557 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3561 if (long_ins->opcode == OP_LNEG) {
3563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3564 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3570 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3572 /* sreg is a float, dreg is an integer reg */
3574 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3576 ARM_TOSIZD (code, vfp_scratch1, sreg);
3578 ARM_TOUIZD (code, vfp_scratch1, sreg);
3579 ARM_FMRS (code, dreg, vfp_scratch1);
3580 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3584 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3585 else if (size == 2) {
3586 ARM_SHL_IMM (code, dreg, dreg, 16);
3587 ARM_SHR_IMM (code, dreg, dreg, 16);
3591 ARM_SHL_IMM (code, dreg, dreg, 24);
3592 ARM_SAR_IMM (code, dreg, dreg, 24);
3593 } else if (size == 2) {
3594 ARM_SHL_IMM (code, dreg, dreg, 16);
3595 ARM_SAR_IMM (code, dreg, dreg, 16);
3601 #endif /* #ifndef DISABLE_JIT */
3605 const guchar *target;
3610 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3613 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3614 PatchData *pdata = (PatchData*)user_data;
3615 guchar *code = data;
3616 guint32 *thunks = data;
3617 guint32 *endthunks = (guint32*)(code + bsize);
3619 int difflow, diffhigh;
3621 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3622 difflow = (char*)pdata->code - (char*)thunks;
3623 diffhigh = (char*)pdata->code - (char*)endthunks;
3624 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3628 * The thunk is composed of 3 words:
3629 * load constant from thunks [2] into ARM_IP
3632 * Note that the LR register is already setup
3634 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3635 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3636 while (thunks < endthunks) {
3637 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3638 if (thunks [2] == (guint32)pdata->target) {
3639 arm_patch (pdata->code, (guchar*)thunks);
3640 mono_arch_flush_icache (pdata->code, 4);
3643 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3644 /* found a free slot instead: emit thunk */
3645 /* ARMREG_IP is fine to use since this can't be an IMT call
3648 code = (guchar*)thunks;
3649 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3650 if (thumb_supported)
3651 ARM_BX (code, ARMREG_IP);
3653 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3654 thunks [2] = (guint32)pdata->target;
3655 mono_arch_flush_icache ((guchar*)thunks, 12);
3657 arm_patch (pdata->code, (guchar*)thunks);
3658 mono_arch_flush_icache (pdata->code, 4);
3662 /* skip 12 bytes, the size of the thunk */
3666 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3672 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3677 domain = mono_domain_get ();
3680 pdata.target = target;
3681 pdata.absolute = absolute;
3685 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3688 if (pdata.found != 1) {
3689 mono_domain_lock (domain);
3690 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3693 /* this uses the first available slot */
3695 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3697 mono_domain_unlock (domain);
3700 if (pdata.found != 1) {
3702 GHashTableIter iter;
3703 MonoJitDynamicMethodInfo *ji;
3706 * This might be a dynamic method, search its code manager. We can only
3707 * use the dynamic method containing CODE, since the others might be freed later.
3711 mono_domain_lock (domain);
3712 hash = domain_jit_info (domain)->dynamic_code_hash;
3714 /* FIXME: Speed this up */
3715 g_hash_table_iter_init (&iter, hash);
3716 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3717 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3718 if (pdata.found == 1)
3722 mono_domain_unlock (domain);
3724 if (pdata.found != 1)
3725 g_print ("thunk failed for %p from %p\n", target, code);
3726 g_assert (pdata.found == 1);
3730 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3732 guint32 *code32 = (void*)code;
3733 guint32 ins = *code32;
3734 guint32 prim = (ins >> 25) & 7;
3735 guint32 tval = GPOINTER_TO_UINT (target);
3737 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3738 if (prim == 5) { /* 101b */
3739 /* the diff starts 8 bytes from the branch opcode */
3740 gint diff = target - code - 8;
3742 gint tmask = 0xffffffff;
3743 if (tval & 1) { /* entering thumb mode */
3744 diff = target - 1 - code - 8;
3745 g_assert (thumb_supported);
3746 tbits = 0xf << 28; /* bl->blx bit pattern */
3747 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3748 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3752 tmask = ~(1 << 24); /* clear the link bit */
3753 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3758 if (diff <= 33554431) {
3760 ins = (ins & 0xff000000) | diff;
3762 *code32 = ins | tbits;
3766 /* diff between 0 and -33554432 */
3767 if (diff >= -33554432) {
3769 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3771 *code32 = ins | tbits;
3776 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3780 #ifdef USE_JUMP_TABLES
3782 gpointer *jte = mono_jumptable_get_entry (code);
3784 jte [0] = (gpointer) target;
3788 * The alternative call sequences looks like this:
3790 * ldr ip, [pc] // loads the address constant
3791 * b 1f // jumps around the constant
3792 * address constant embedded in the code
3797 * There are two cases for patching:
3798 * a) at the end of method emission: in this case code points to the start
3799 * of the call sequence
3800 * b) during runtime patching of the call site: in this case code points
3801 * to the mov pc, ip instruction
3803 * We have to handle also the thunk jump code sequence:
3807 * address constant // execution never reaches here
3809 if ((ins & 0x0ffffff0) == 0x12fff10) {
3810 /* Branch and exchange: the address is constructed in a reg
3811 * We can patch BX when the code sequence is the following:
3812 * ldr ip, [pc, #0] ; 0x8
3819 guint8 *emit = (guint8*)ccode;
3820 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3822 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3823 ARM_BX (emit, ARMREG_IP);
3825 /*patching from magic trampoline*/
3826 if (ins == ccode [3]) {
3827 g_assert (code32 [-4] == ccode [0]);
3828 g_assert (code32 [-3] == ccode [1]);
3829 g_assert (code32 [-1] == ccode [2]);
3830 code32 [-2] = (guint32)target;
3833 /*patching from JIT*/
3834 if (ins == ccode [0]) {
3835 g_assert (code32 [1] == ccode [1]);
3836 g_assert (code32 [3] == ccode [2]);
3837 g_assert (code32 [4] == ccode [3]);
3838 code32 [2] = (guint32)target;
3841 g_assert_not_reached ();
3842 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3850 guint8 *emit = (guint8*)ccode;
3851 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3853 ARM_BLX_REG (emit, ARMREG_IP);
3855 g_assert (code32 [-3] == ccode [0]);
3856 g_assert (code32 [-2] == ccode [1]);
3857 g_assert (code32 [0] == ccode [2]);
3859 code32 [-1] = (guint32)target;
3862 guint32 *tmp = ccode;
3863 guint8 *emit = (guint8*)tmp;
3864 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3865 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3866 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3867 ARM_BX (emit, ARMREG_IP);
3868 if (ins == ccode [2]) {
3869 g_assert_not_reached (); // should be -2 ...
3870 code32 [-1] = (guint32)target;
3873 if (ins == ccode [0]) {
3874 /* handles both thunk jump code and the far call sequence */
3875 code32 [2] = (guint32)target;
3878 g_assert_not_reached ();
3880 // g_print ("patched with 0x%08x\n", ins);
3885 arm_patch (guchar *code, const guchar *target)
3887 arm_patch_general (NULL, code, target, NULL);
3891 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3892 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3893 * to be used with the emit macros.
3894 * Return -1 otherwise.
3897 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3900 for (i = 0; i < 31; i+= 2) {
3901 res = (val << (32 - i)) | (val >> i);
3904 *rot_amount = i? 32 - i: 0;
3911 * Emits in code a sequence of instructions that load the value 'val'
3912 * into the dreg register. Uses at most 4 instructions.
3915 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3917 int imm8, rot_amount;
3919 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3920 /* skip the constant pool */
3926 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3927 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3928 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3929 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3932 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3934 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3938 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3940 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3942 if (val & 0xFF0000) {
3943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3945 if (val & 0xFF000000) {
3946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3948 } else if (val & 0xFF00) {
3949 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3950 if (val & 0xFF0000) {
3951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3953 if (val & 0xFF000000) {
3954 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3956 } else if (val & 0xFF0000) {
3957 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3958 if (val & 0xFF000000) {
3959 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3962 //g_assert_not_reached ();
3968 mono_arm_thumb_supported (void)
3970 return thumb_supported;
3976 * emit_load_volatile_arguments:
3978 * Load volatile arguments from the stack to the original input registers.
3979 * Required before a tail call.
3982 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3984 MonoMethod *method = cfg->method;
3985 MonoMethodSignature *sig;
3990 /* FIXME: Generate intermediate code instead */
3992 sig = mono_method_signature (method);
3994 /* This is the opposite of the code in emit_prolog */
3998 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4000 if (cinfo->vtype_retaddr) {
4001 ArgInfo *ainfo = &cinfo->ret;
4002 inst = cfg->vret_addr;
4003 g_assert (arm_is_imm12 (inst->inst_offset));
4004 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4006 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4007 ArgInfo *ainfo = cinfo->args + i;
4008 inst = cfg->args [pos];
4010 if (cfg->verbose_level > 2)
4011 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4012 if (inst->opcode == OP_REGVAR) {
4013 if (ainfo->storage == RegTypeGeneral)
4014 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4015 else if (ainfo->storage == RegTypeFP) {
4016 g_assert_not_reached ();
4017 } else if (ainfo->storage == RegTypeBase) {
4021 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4022 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4024 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4025 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4029 g_assert_not_reached ();
4031 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4032 switch (ainfo->size) {
4039 g_assert (arm_is_imm12 (inst->inst_offset));
4040 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4041 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4042 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4045 if (arm_is_imm12 (inst->inst_offset)) {
4046 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4048 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4049 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4053 } else if (ainfo->storage == RegTypeBaseGen) {
4056 } else if (ainfo->storage == RegTypeBase) {
4058 } else if (ainfo->storage == RegTypeFP) {
4059 g_assert_not_reached ();
4060 } else if (ainfo->storage == RegTypeStructByVal) {
4061 int doffset = inst->inst_offset;
4065 if (mono_class_from_mono_type (inst->inst_vtype))
4066 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4067 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4068 if (arm_is_imm12 (doffset)) {
4069 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4071 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4072 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4074 soffset += sizeof (gpointer);
4075 doffset += sizeof (gpointer);
4080 } else if (ainfo->storage == RegTypeStructByAddr) {
4095 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4100 guint8 *code = cfg->native_code + cfg->code_len;
4101 MonoInst *last_ins = NULL;
4102 guint last_offset = 0;
4104 int imm8, rot_amount;
4106 /* we don't align basic blocks of loops on arm */
4108 if (cfg->verbose_level > 2)
4109 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4111 cpos = bb->max_offset;
4113 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4114 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4115 //g_assert (!mono_compile_aot);
4118 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4119 /* this is not thread save, but good enough */
4120 /* fixme: howto handle overflows? */
4121 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4124 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4125 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4126 (gpointer)"mono_break");
4127 code = emit_call_seq (cfg, code);
4130 MONO_BB_FOR_EACH_INS (bb, ins) {
4131 offset = code - cfg->native_code;
4133 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4135 if (offset > (cfg->code_size - max_len - 16)) {
4136 cfg->code_size *= 2;
4137 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4138 code = cfg->native_code + offset;
4140 // if (ins->cil_code)
4141 // g_print ("cil code\n");
4142 mono_debug_record_line_number (cfg, ins, offset);
4144 switch (ins->opcode) {
4145 case OP_MEMORY_BARRIER:
4147 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4148 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4152 #ifdef HAVE_AEABI_READ_TP
4153 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4154 (gpointer)"__aeabi_read_tp");
4155 code = emit_call_seq (cfg, code);
4157 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4159 g_assert_not_reached ();
4162 case OP_ATOMIC_EXCHANGE_I4:
4163 case OP_ATOMIC_CAS_I4:
4164 case OP_ATOMIC_ADD_I4: {
4168 g_assert (v7_supported);
4171 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4173 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4175 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4179 g_assert (cfg->arch.atomic_tmp_offset != -1);
4180 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4182 switch (ins->opcode) {
4183 case OP_ATOMIC_EXCHANGE_I4:
4185 ARM_DMB (code, ARM_DMB_SY);
4186 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4187 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4188 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4190 ARM_B_COND (code, ARMCOND_NE, 0);
4191 arm_patch (buf [1], buf [0]);
4193 case OP_ATOMIC_CAS_I4:
4194 ARM_DMB (code, ARM_DMB_SY);
4196 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4197 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4199 ARM_B_COND (code, ARMCOND_NE, 0);
4200 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4201 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4203 ARM_B_COND (code, ARMCOND_NE, 0);
4204 arm_patch (buf [2], buf [0]);
4205 arm_patch (buf [1], code);
4207 case OP_ATOMIC_ADD_I4:
4209 ARM_DMB (code, ARM_DMB_SY);
4210 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4211 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4212 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4213 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4215 ARM_B_COND (code, ARMCOND_NE, 0);
4216 arm_patch (buf [1], buf [0]);
4219 g_assert_not_reached ();
4222 ARM_DMB (code, ARM_DMB_SY);
4223 if (tmpreg != ins->dreg)
4224 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4225 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4230 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4231 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4234 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4235 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4237 case OP_STOREI1_MEMBASE_IMM:
4238 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4239 g_assert (arm_is_imm12 (ins->inst_offset));
4240 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4242 case OP_STOREI2_MEMBASE_IMM:
4243 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4244 g_assert (arm_is_imm8 (ins->inst_offset));
4245 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4247 case OP_STORE_MEMBASE_IMM:
4248 case OP_STOREI4_MEMBASE_IMM:
4249 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4250 g_assert (arm_is_imm12 (ins->inst_offset));
4251 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4253 case OP_STOREI1_MEMBASE_REG:
4254 g_assert (arm_is_imm12 (ins->inst_offset));
4255 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4257 case OP_STOREI2_MEMBASE_REG:
4258 g_assert (arm_is_imm8 (ins->inst_offset));
4259 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4261 case OP_STORE_MEMBASE_REG:
4262 case OP_STOREI4_MEMBASE_REG:
4263 /* this case is special, since it happens for spill code after lowering has been called */
4264 if (arm_is_imm12 (ins->inst_offset)) {
4265 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4267 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4268 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4271 case OP_STOREI1_MEMINDEX:
4272 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4274 case OP_STOREI2_MEMINDEX:
4275 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4277 case OP_STORE_MEMINDEX:
4278 case OP_STOREI4_MEMINDEX:
4279 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4282 g_assert_not_reached ();
4284 case OP_LOAD_MEMINDEX:
4285 case OP_LOADI4_MEMINDEX:
4286 case OP_LOADU4_MEMINDEX:
4287 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4289 case OP_LOADI1_MEMINDEX:
4290 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4292 case OP_LOADU1_MEMINDEX:
4293 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4295 case OP_LOADI2_MEMINDEX:
4296 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4298 case OP_LOADU2_MEMINDEX:
4299 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4301 case OP_LOAD_MEMBASE:
4302 case OP_LOADI4_MEMBASE:
4303 case OP_LOADU4_MEMBASE:
4304 /* this case is special, since it happens for spill code after lowering has been called */
4305 if (arm_is_imm12 (ins->inst_offset)) {
4306 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4308 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4309 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4312 case OP_LOADI1_MEMBASE:
4313 g_assert (arm_is_imm8 (ins->inst_offset));
4314 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4316 case OP_LOADU1_MEMBASE:
4317 g_assert (arm_is_imm12 (ins->inst_offset));
4318 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4320 case OP_LOADU2_MEMBASE:
4321 g_assert (arm_is_imm8 (ins->inst_offset));
4322 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4324 case OP_LOADI2_MEMBASE:
4325 g_assert (arm_is_imm8 (ins->inst_offset));
4326 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4328 case OP_ICONV_TO_I1:
4329 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4330 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4332 case OP_ICONV_TO_I2:
4333 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4334 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4336 case OP_ICONV_TO_U1:
4337 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4339 case OP_ICONV_TO_U2:
4340 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4341 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4345 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4347 case OP_COMPARE_IMM:
4348 case OP_ICOMPARE_IMM:
4349 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4350 g_assert (imm8 >= 0);
4351 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4355 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4356 * So instead of emitting a trap, we emit a call a C function and place a
4359 //*(int*)code = 0xef9f0001;
4362 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4363 (gpointer)"mono_break");
4364 code = emit_call_seq (cfg, code);
4366 case OP_RELAXED_NOP:
4371 case OP_DUMMY_STORE:
4372 case OP_DUMMY_ICONST:
4373 case OP_DUMMY_R8CONST:
4374 case OP_NOT_REACHED:
4377 case OP_SEQ_POINT: {
4379 MonoInst *info_var = cfg->arch.seq_point_info_var;
4380 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4381 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4382 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4383 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4385 int dreg = ARMREG_LR;
4387 if (cfg->soft_breakpoints) {
4388 g_assert (!cfg->compile_aot);
4392 * For AOT, we use one got slot per method, which will point to a
4393 * SeqPointInfo structure, containing all the information required
4394 * by the code below.
4396 if (cfg->compile_aot) {
4397 g_assert (info_var);
4398 g_assert (info_var->opcode == OP_REGOFFSET);
4399 g_assert (arm_is_imm12 (info_var->inst_offset));
4402 if (!cfg->soft_breakpoints) {
4404 * Read from the single stepping trigger page. This will cause a
4405 * SIGSEGV when single stepping is enabled.
4406 * We do this _before_ the breakpoint, so single stepping after
4407 * a breakpoint is hit will step to the next IL offset.
4409 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4412 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4413 if (cfg->soft_breakpoints) {
4414 /* Load the address of the sequence point trigger variable. */
4417 g_assert (var->opcode == OP_REGOFFSET);
4418 g_assert (arm_is_imm12 (var->inst_offset));
4419 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4421 /* Read the value and check whether it is non-zero. */
4422 ARM_LDR_IMM (code, dreg, dreg, 0);
4423 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4425 /* Load the address of the sequence point method. */
4426 var = ss_method_var;
4428 g_assert (var->opcode == OP_REGOFFSET);
4429 g_assert (arm_is_imm12 (var->inst_offset));
4430 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4432 /* Call it conditionally. */
4433 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4435 if (cfg->compile_aot) {
4436 /* Load the trigger page addr from the variable initialized in the prolog */
4437 var = ss_trigger_page_var;
4439 g_assert (var->opcode == OP_REGOFFSET);
4440 g_assert (arm_is_imm12 (var->inst_offset));
4441 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4443 #ifdef USE_JUMP_TABLES
4444 gpointer *jte = mono_jumptable_add_entry ();
4445 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4446 jte [0] = ss_trigger_page;
4448 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4450 *(int*)code = (int)ss_trigger_page;
4454 ARM_LDR_IMM (code, dreg, dreg, 0);
4458 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4460 if (cfg->soft_breakpoints) {
4461 /* Load the address of the breakpoint method into ip. */
4462 var = bp_method_var;
4464 g_assert (var->opcode == OP_REGOFFSET);
4465 g_assert (arm_is_imm12 (var->inst_offset));
4466 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4469 * A placeholder for a possible breakpoint inserted by
4470 * mono_arch_set_breakpoint ().
4473 } else if (cfg->compile_aot) {
4474 guint32 offset = code - cfg->native_code;
4477 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4478 /* Add the offset */
4479 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4480 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4481 if (arm_is_imm12 ((int)val)) {
4482 ARM_LDR_IMM (code, dreg, dreg, val);
4484 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4486 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4488 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4489 g_assert (!(val & 0xFF000000));
4491 ARM_LDR_IMM (code, dreg, dreg, 0);
4493 /* What is faster, a branch or a load ? */
4494 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4495 /* The breakpoint instruction */
4496 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4499 * A placeholder for a possible breakpoint inserted by
4500 * mono_arch_set_breakpoint ().
4502 for (i = 0; i < 4; ++i)
4509 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4512 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4516 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4519 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4520 g_assert (imm8 >= 0);
4521 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4525 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4526 g_assert (imm8 >= 0);
4527 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4531 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4532 g_assert (imm8 >= 0);
4533 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4536 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4537 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4539 case OP_IADD_OVF_UN:
4540 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4541 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4544 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4545 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4547 case OP_ISUB_OVF_UN:
4548 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4549 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4551 case OP_ADD_OVF_CARRY:
4552 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4553 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4555 case OP_ADD_OVF_UN_CARRY:
4556 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4557 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4559 case OP_SUB_OVF_CARRY:
4560 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4561 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4563 case OP_SUB_OVF_UN_CARRY:
4564 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4565 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4569 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4572 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4573 g_assert (imm8 >= 0);
4574 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4577 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4581 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4585 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4586 g_assert (imm8 >= 0);
4587 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4591 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4592 g_assert (imm8 >= 0);
4593 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4595 case OP_ARM_RSBS_IMM:
4596 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4597 g_assert (imm8 >= 0);
4598 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4600 case OP_ARM_RSC_IMM:
4601 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4602 g_assert (imm8 >= 0);
4603 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4606 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4610 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4611 g_assert (imm8 >= 0);
4612 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4615 g_assert (v7s_supported);
4616 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4619 g_assert (v7s_supported);
4620 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4623 g_assert (v7s_supported);
4624 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4625 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4628 g_assert (v7s_supported);
4629 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4630 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4634 g_assert_not_reached ();
4636 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4640 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4641 g_assert (imm8 >= 0);
4642 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4645 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4649 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4650 g_assert (imm8 >= 0);
4651 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4654 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4659 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4660 else if (ins->dreg != ins->sreg1)
4661 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4664 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4669 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4670 else if (ins->dreg != ins->sreg1)
4671 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4674 case OP_ISHR_UN_IMM:
4676 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4677 else if (ins->dreg != ins->sreg1)
4678 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4681 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4684 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4687 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4690 if (ins->dreg == ins->sreg2)
4691 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4693 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4696 g_assert_not_reached ();
4699 /* FIXME: handle ovf/ sreg2 != dreg */
4700 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4701 /* FIXME: MUL doesn't set the C/O flags on ARM */
4703 case OP_IMUL_OVF_UN:
4704 /* FIXME: handle ovf/ sreg2 != dreg */
4705 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4706 /* FIXME: MUL doesn't set the C/O flags on ARM */
4709 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4712 /* Load the GOT offset */
4713 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4714 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4716 *(gpointer*)code = NULL;
4718 /* Load the value from the GOT */
4719 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4721 case OP_OBJC_GET_SELECTOR:
4722 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4723 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4725 *(gpointer*)code = NULL;
4727 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4729 case OP_ICONV_TO_I4:
4730 case OP_ICONV_TO_U4:
4732 if (ins->dreg != ins->sreg1)
4733 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4736 int saved = ins->sreg2;
4737 if (ins->sreg2 == ARM_LSW_REG) {
4738 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4741 if (ins->sreg1 != ARM_LSW_REG)
4742 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4743 if (saved != ARM_MSW_REG)
4744 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4749 ARM_CPYD (code, ins->dreg, ins->sreg1);
4751 case OP_FCONV_TO_R4:
4753 ARM_CVTD (code, ins->dreg, ins->sreg1);
4754 ARM_CVTS (code, ins->dreg, ins->dreg);
4759 * Keep in sync with mono_arch_emit_epilog
4761 g_assert (!cfg->method->save_lmf);
4763 code = emit_load_volatile_arguments (cfg, code);
4765 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4767 if (cfg->used_int_regs)
4768 ARM_POP (code, cfg->used_int_regs);
4769 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4771 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4773 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4774 if (cfg->compile_aot) {
4775 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4777 *(gpointer*)code = NULL;
4779 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4781 code = mono_arm_patchable_b (code, ARMCOND_AL);
4785 MonoCallInst *call = (MonoCallInst*)ins;
4788 * The stack looks like the following:
4789 * <caller argument area>
4792 * <callee argument area>
4793 * Need to copy the arguments from the callee argument area to
4794 * the caller argument area, and pop the frame.
4796 if (call->stack_usage) {
4797 int i, prev_sp_offset = 0;
4799 /* Compute size of saved registers restored below */
4801 prev_sp_offset = 2 * 4;
4803 prev_sp_offset = 1 * 4;
4804 for (i = 0; i < 16; ++i) {
4805 if (cfg->used_int_regs & (1 << i))
4806 prev_sp_offset += 4;
4809 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4811 /* Copy arguments on the stack to our argument area */
4812 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4813 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4814 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4819 * Keep in sync with mono_arch_emit_epilog
4821 g_assert (!cfg->method->save_lmf);
4823 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4825 if (cfg->used_int_regs)
4826 ARM_POP (code, cfg->used_int_regs);
4827 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4829 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4832 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4833 if (cfg->compile_aot) {
4834 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4836 *(gpointer*)code = NULL;
4838 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4840 code = mono_arm_patchable_b (code, ARMCOND_AL);
4845 /* ensure ins->sreg1 is not NULL */
4846 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4849 g_assert (cfg->sig_cookie < 128);
4850 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4851 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4860 call = (MonoCallInst*)ins;
4863 code = emit_float_args (cfg, call, code, &max_len, &offset);
4865 if (ins->flags & MONO_INST_HAS_METHOD)
4866 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4868 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4869 code = emit_call_seq (cfg, code);
4870 ins->flags |= MONO_INST_GC_CALLSITE;
4871 ins->backend.pc_offset = code - cfg->native_code;
4872 code = emit_move_return_value (cfg, ins, code);
4878 case OP_VOIDCALL_REG:
4881 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4883 code = emit_call_reg (code, ins->sreg1);
4884 ins->flags |= MONO_INST_GC_CALLSITE;
4885 ins->backend.pc_offset = code - cfg->native_code;
4886 code = emit_move_return_value (cfg, ins, code);
4888 case OP_FCALL_MEMBASE:
4889 case OP_LCALL_MEMBASE:
4890 case OP_VCALL_MEMBASE:
4891 case OP_VCALL2_MEMBASE:
4892 case OP_VOIDCALL_MEMBASE:
4893 case OP_CALL_MEMBASE: {
4894 gboolean imt_arg = FALSE;
4896 g_assert (ins->sreg1 != ARMREG_LR);
4897 call = (MonoCallInst*)ins;
4900 code = emit_float_args (cfg, call, code, &max_len, &offset);
4902 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4904 if (!arm_is_imm12 (ins->inst_offset))
4905 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4906 #ifdef USE_JUMP_TABLES
4912 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4914 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4916 if (!arm_is_imm12 (ins->inst_offset))
4917 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4919 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4922 * We can't embed the method in the code stream in PIC code, or
4924 * Instead, we put it in V5 in code emitted by
4925 * mono_arch_emit_imt_argument (), and embed NULL here to
4926 * signal the IMT thunk that the value is in V5.
4928 #ifdef USE_JUMP_TABLES
4929 /* In case of jumptables we always use value in V5. */
4932 if (call->dynamic_imt_arg)
4933 *((gpointer*)code) = NULL;
4935 *((gpointer*)code) = (gpointer)call->method;
4939 ins->flags |= MONO_INST_GC_CALLSITE;
4940 ins->backend.pc_offset = code - cfg->native_code;
4941 code = emit_move_return_value (cfg, ins, code);
4945 /* keep alignment */
4946 int alloca_waste = cfg->param_area;
4949 /* round the size to 8 bytes */
4950 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4951 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4953 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4954 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4955 /* memzero the area: dreg holds the size, sp is the pointer */
4956 if (ins->flags & MONO_INST_INIT) {
4957 guint8 *start_loop, *branch_to_cond;
4958 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4959 branch_to_cond = code;
4962 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4963 arm_patch (branch_to_cond, code);
4964 /* decrement by 4 and set flags */
4965 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4966 ARM_B_COND (code, ARMCOND_GE, 0);
4967 arm_patch (code - 4, start_loop);
4969 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4974 MonoInst *var = cfg->dyn_call_var;
4976 g_assert (var->opcode == OP_REGOFFSET);
4977 g_assert (arm_is_imm12 (var->inst_offset));
4979 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4980 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4982 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4984 /* Save args buffer */
4985 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4987 /* Set stack slots using R0 as scratch reg */
4988 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4989 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4990 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4991 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4994 /* Set argument registers */
4995 for (i = 0; i < PARAM_REGS; ++i)
4996 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4999 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5000 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5003 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5004 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5005 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5009 if (ins->sreg1 != ARMREG_R0)
5010 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5011 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5012 (gpointer)"mono_arch_throw_exception");
5013 code = emit_call_seq (cfg, code);
5017 if (ins->sreg1 != ARMREG_R0)
5018 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5019 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5020 (gpointer)"mono_arch_rethrow_exception");
5021 code = emit_call_seq (cfg, code);
5024 case OP_START_HANDLER: {
5025 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5028 /* Reserve a param area, see filter-stack.exe */
5029 if (cfg->param_area) {
5030 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5031 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5033 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5034 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5038 if (arm_is_imm12 (spvar->inst_offset)) {
5039 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5041 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5042 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5046 case OP_ENDFILTER: {
5047 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5050 /* Free the param area */
5051 if (cfg->param_area) {
5052 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5053 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5055 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5056 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5060 if (ins->sreg1 != ARMREG_R0)
5061 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5062 if (arm_is_imm12 (spvar->inst_offset)) {
5063 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5065 g_assert (ARMREG_IP != spvar->inst_basereg);
5066 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5067 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5069 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5072 case OP_ENDFINALLY: {
5073 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5076 /* Free the param area */
5077 if (cfg->param_area) {
5078 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5079 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5081 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5082 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5086 if (arm_is_imm12 (spvar->inst_offset)) {
5087 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5089 g_assert (ARMREG_IP != spvar->inst_basereg);
5090 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5091 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5093 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5096 case OP_CALL_HANDLER:
5097 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5098 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5099 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5102 ins->inst_c0 = code - cfg->native_code;
5105 /*if (ins->inst_target_bb->native_offset) {
5107 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5109 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5110 code = mono_arm_patchable_b (code, ARMCOND_AL);
5114 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5118 * In the normal case we have:
5119 * ldr pc, [pc, ins->sreg1 << 2]
5122 * ldr lr, [pc, ins->sreg1 << 2]
5124 * After follows the data.
5125 * FIXME: add aot support.
5127 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5128 #ifdef USE_JUMP_TABLES
5130 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5131 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5132 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5136 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5137 if (offset + max_len > (cfg->code_size - 16)) {
5138 cfg->code_size += max_len;
5139 cfg->code_size *= 2;
5140 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5141 code = cfg->native_code + offset;
5143 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5145 code += 4 * GPOINTER_TO_INT (ins->klass);
5150 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5151 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5155 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5156 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5160 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5161 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5165 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5166 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5170 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5171 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5174 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5175 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5178 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5179 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5182 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5183 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5187 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5190 case OP_COND_EXC_EQ:
5191 case OP_COND_EXC_NE_UN:
5192 case OP_COND_EXC_LT:
5193 case OP_COND_EXC_LT_UN:
5194 case OP_COND_EXC_GT:
5195 case OP_COND_EXC_GT_UN:
5196 case OP_COND_EXC_GE:
5197 case OP_COND_EXC_GE_UN:
5198 case OP_COND_EXC_LE:
5199 case OP_COND_EXC_LE_UN:
5200 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5202 case OP_COND_EXC_IEQ:
5203 case OP_COND_EXC_INE_UN:
5204 case OP_COND_EXC_ILT:
5205 case OP_COND_EXC_ILT_UN:
5206 case OP_COND_EXC_IGT:
5207 case OP_COND_EXC_IGT_UN:
5208 case OP_COND_EXC_IGE:
5209 case OP_COND_EXC_IGE_UN:
5210 case OP_COND_EXC_ILE:
5211 case OP_COND_EXC_ILE_UN:
5212 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5215 case OP_COND_EXC_IC:
5216 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5218 case OP_COND_EXC_OV:
5219 case OP_COND_EXC_IOV:
5220 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5222 case OP_COND_EXC_NC:
5223 case OP_COND_EXC_INC:
5224 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5226 case OP_COND_EXC_NO:
5227 case OP_COND_EXC_INO:
5228 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5240 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5243 /* floating point opcodes */
5245 if (cfg->compile_aot) {
5246 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5248 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5250 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5253 /* FIXME: we can optimize the imm load by dealing with part of
5254 * the displacement in LDFD (aligning to 512).
5256 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5257 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5261 if (cfg->compile_aot) {
5262 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5264 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5266 ARM_CVTS (code, ins->dreg, ins->dreg);
5268 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5269 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5270 ARM_CVTS (code, ins->dreg, ins->dreg);
5273 case OP_STORER8_MEMBASE_REG:
5274 /* This is generated by the local regalloc pass which runs after the lowering pass */
5275 if (!arm_is_fpimm8 (ins->inst_offset)) {
5276 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5277 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5278 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5280 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5283 case OP_LOADR8_MEMBASE:
5284 /* This is generated by the local regalloc pass which runs after the lowering pass */
5285 if (!arm_is_fpimm8 (ins->inst_offset)) {
5286 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5287 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5288 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5290 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5293 case OP_STORER4_MEMBASE_REG:
5294 g_assert (arm_is_fpimm8 (ins->inst_offset));
5295 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5296 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5297 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5298 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5300 case OP_LOADR4_MEMBASE:
5301 g_assert (arm_is_fpimm8 (ins->inst_offset));
5302 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5303 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5304 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5305 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5307 case OP_ICONV_TO_R_UN: {
5308 g_assert_not_reached ();
5311 case OP_ICONV_TO_R4:
5312 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5313 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5314 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5315 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5316 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5318 case OP_ICONV_TO_R8:
5319 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5320 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5321 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5322 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5326 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5327 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5329 if (!IS_HARD_FLOAT) {
5330 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5333 if (IS_HARD_FLOAT) {
5334 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5336 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5340 case OP_FCONV_TO_I1:
5341 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5343 case OP_FCONV_TO_U1:
5344 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5346 case OP_FCONV_TO_I2:
5347 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5349 case OP_FCONV_TO_U2:
5350 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5352 case OP_FCONV_TO_I4:
5354 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5356 case OP_FCONV_TO_U4:
5358 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5360 case OP_FCONV_TO_I8:
5361 case OP_FCONV_TO_U8:
5362 g_assert_not_reached ();
5363 /* Implemented as helper calls */
5365 case OP_LCONV_TO_R_UN:
5366 g_assert_not_reached ();
5367 /* Implemented as helper calls */
5369 case OP_LCONV_TO_OVF_I4_2: {
5370 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5372 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5375 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5376 high_bit_not_set = code;
5377 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5379 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5380 valid_negative = code;
5381 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5382 invalid_negative = code;
5383 ARM_B_COND (code, ARMCOND_AL, 0);
5385 arm_patch (high_bit_not_set, code);
5387 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5388 valid_positive = code;
5389 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5391 arm_patch (invalid_negative, code);
5392 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5394 arm_patch (valid_negative, code);
5395 arm_patch (valid_positive, code);
5397 if (ins->dreg != ins->sreg1)
5398 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5402 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5405 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5408 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5411 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5414 ARM_NEGD (code, ins->dreg, ins->sreg1);
5418 g_assert_not_reached ();
5422 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5428 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5431 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5432 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5436 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5439 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5440 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5444 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5447 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5448 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5449 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5453 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5456 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5457 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5461 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5464 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5465 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5466 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5470 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5473 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5474 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5478 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5481 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5482 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5486 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5489 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5490 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5493 /* ARM FPA flags table:
5494 * N Less than ARMCOND_MI
5495 * Z Equal ARMCOND_EQ
5496 * C Greater Than or Equal ARMCOND_CS
5497 * V Unordered ARMCOND_VS
5500 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5503 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5506 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5509 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5510 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5516 g_assert_not_reached ();
5520 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5522 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5523 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5524 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5528 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5529 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5534 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5535 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5537 #ifdef USE_JUMP_TABLES
5539 gpointer *jte = mono_jumptable_add_entries (2);
5540 jte [0] = GUINT_TO_POINTER (0xffffffff);
5541 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5542 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5543 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5546 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5547 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5549 *(guint32*)code = 0xffffffff;
5551 *(guint32*)code = 0x7fefffff;
5554 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5556 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5557 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5559 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5560 ARM_CPYD (code, ins->dreg, ins->sreg1);
5562 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5563 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5568 case OP_GC_LIVENESS_DEF:
5569 case OP_GC_LIVENESS_USE:
5570 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5571 ins->backend.pc_offset = code - cfg->native_code;
5573 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5574 ins->backend.pc_offset = code - cfg->native_code;
5575 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5579 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5580 g_assert_not_reached ();
5583 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5584 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5585 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5586 g_assert_not_reached ();
5592 last_offset = offset;
5595 cfg->code_len = code - cfg->native_code;
5598 #endif /* DISABLE_JIT */
5600 #ifdef HAVE_AEABI_READ_TP
5601 void __aeabi_read_tp (void);
5605 mono_arch_register_lowlevel_calls (void)
5607 /* The signature doesn't matter */
5608 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5609 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5611 #ifndef MONO_CROSS_COMPILE
5612 #ifdef HAVE_AEABI_READ_TP
5613 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5618 #define patch_lis_ori(ip,val) do {\
5619 guint16 *__lis_ori = (guint16*)(ip); \
5620 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5621 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5625 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5627 MonoJumpInfo *patch_info;
5628 gboolean compile_aot = !run_cctors;
5630 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5631 unsigned char *ip = patch_info->ip.i + code;
5632 const unsigned char *target;
5634 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5635 #ifdef USE_JUMP_TABLES
5636 gpointer *jt = mono_jumptable_get_entry (ip);
5638 gpointer *jt = (gpointer*)(ip + 8);
5641 /* jt is the inlined jump table, 2 instructions after ip
5642 * In the normal case we store the absolute addresses,
5643 * otherwise the displacements.
5645 for (i = 0; i < patch_info->data.table->table_size; i++)
5646 jt [i] = code + (int)patch_info->data.table->table [i];
5651 switch (patch_info->type) {
5652 case MONO_PATCH_INFO_BB:
5653 case MONO_PATCH_INFO_LABEL:
5656 /* No need to patch these */
5661 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5663 switch (patch_info->type) {
5664 case MONO_PATCH_INFO_IP:
5665 g_assert_not_reached ();
5666 patch_lis_ori (ip, ip);
5668 case MONO_PATCH_INFO_METHOD_REL:
5669 g_assert_not_reached ();
5670 *((gpointer *)(ip)) = code + patch_info->data.offset;
5672 case MONO_PATCH_INFO_METHODCONST:
5673 case MONO_PATCH_INFO_CLASS:
5674 case MONO_PATCH_INFO_IMAGE:
5675 case MONO_PATCH_INFO_FIELD:
5676 case MONO_PATCH_INFO_VTABLE:
5677 case MONO_PATCH_INFO_IID:
5678 case MONO_PATCH_INFO_SFLDA:
5679 case MONO_PATCH_INFO_LDSTR:
5680 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5681 case MONO_PATCH_INFO_LDTOKEN:
5682 g_assert_not_reached ();
5683 /* from OP_AOTCONST : lis + ori */
5684 patch_lis_ori (ip, target);
5686 case MONO_PATCH_INFO_R4:
5687 case MONO_PATCH_INFO_R8:
5688 g_assert_not_reached ();
5689 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5691 case MONO_PATCH_INFO_EXC_NAME:
5692 g_assert_not_reached ();
5693 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5695 case MONO_PATCH_INFO_NONE:
5696 case MONO_PATCH_INFO_BB_OVF:
5697 case MONO_PATCH_INFO_EXC_OVF:
5698 /* everything is dealt with at epilog output time */
5703 arm_patch_general (domain, ip, target, dyn_code_mp);
5710 * Stack frame layout:
5712 * ------------------- fp
5713 * MonoLMF structure or saved registers
5714 * -------------------
5716 * -------------------
5718 * -------------------
5719 * optional 8 bytes for tracing
5720 * -------------------
5721 * param area size is cfg->param_area
5722 * ------------------- sp
5725 mono_arch_emit_prolog (MonoCompile *cfg)
5727 MonoMethod *method = cfg->method;
5729 MonoMethodSignature *sig;
5731 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5736 int prev_sp_offset, reg_offset;
5738 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5741 sig = mono_method_signature (method);
5742 cfg->code_size = 256 + sig->param_count * 64;
5743 code = cfg->native_code = g_malloc (cfg->code_size);
5745 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5747 alloc_size = cfg->stack_offset;
5753 * The iphone uses R7 as the frame pointer, and it points at the saved
5758 * We can't use r7 as a frame pointer since it points into the middle of
5759 * the frame, so we keep using our own frame pointer.
5760 * FIXME: Optimize this.
5762 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5763 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5764 prev_sp_offset += 8; /* r7 and lr */
5765 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5766 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5769 if (!method->save_lmf) {
5771 /* No need to push LR again */
5772 if (cfg->used_int_regs)
5773 ARM_PUSH (code, cfg->used_int_regs);
5775 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5776 prev_sp_offset += 4;
5778 for (i = 0; i < 16; ++i) {
5779 if (cfg->used_int_regs & (1 << i))
5780 prev_sp_offset += 4;
5782 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5784 for (i = 0; i < 16; ++i) {
5785 if ((cfg->used_int_regs & (1 << i))) {
5786 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5787 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5792 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5793 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5795 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5796 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5799 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5800 ARM_PUSH (code, 0x5ff0);
5801 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5802 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5804 for (i = 0; i < 16; ++i) {
5805 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5806 /* The original r7 is saved at the start */
5807 if (!(iphone_abi && i == ARMREG_R7))
5808 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5812 g_assert (reg_offset == 4 * 10);
5813 pos += sizeof (MonoLMF) - (4 * 10);
5817 orig_alloc_size = alloc_size;
5818 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5819 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5820 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5821 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5824 /* the stack used in the pushed regs */
5825 if (prev_sp_offset & 4)
5827 cfg->stack_usage = alloc_size;
5829 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5830 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5832 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5833 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5835 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5837 if (cfg->frame_reg != ARMREG_SP) {
5838 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5839 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5841 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5842 prev_sp_offset += alloc_size;
5844 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5845 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5847 /* compute max_offset in order to use short forward jumps
5848 * we could skip do it on arm because the immediate displacement
5849 * for jumps is large enough, it may be useful later for constant pools
5852 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5853 MonoInst *ins = bb->code;
5854 bb->max_offset = max_offset;
5856 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5859 MONO_BB_FOR_EACH_INS (bb, ins)
5860 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5863 /* store runtime generic context */
5864 if (cfg->rgctx_var) {
5865 MonoInst *ins = cfg->rgctx_var;
5867 g_assert (ins->opcode == OP_REGOFFSET);
5869 if (arm_is_imm12 (ins->inst_offset)) {
5870 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5872 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5873 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5877 /* load arguments allocated to register from the stack */
5880 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5882 if (cinfo->vtype_retaddr) {
5883 ArgInfo *ainfo = &cinfo->ret;
5884 inst = cfg->vret_addr;
5885 g_assert (arm_is_imm12 (inst->inst_offset));
5886 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5889 if (sig->call_convention == MONO_CALL_VARARG) {
5890 ArgInfo *cookie = &cinfo->sig_cookie;
5892 /* Save the sig cookie address */
5893 g_assert (cookie->storage == RegTypeBase);
5895 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5896 g_assert (arm_is_imm12 (cfg->sig_cookie));
5897 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5898 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5901 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5902 ArgInfo *ainfo = cinfo->args + i;
5903 inst = cfg->args [pos];
5905 if (cfg->verbose_level > 2)
5906 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5907 if (inst->opcode == OP_REGVAR) {
5908 if (ainfo->storage == RegTypeGeneral)
5909 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5910 else if (ainfo->storage == RegTypeFP) {
5911 g_assert_not_reached ();
5912 } else if (ainfo->storage == RegTypeBase) {
5913 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5914 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5916 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5917 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5920 g_assert_not_reached ();
5922 if (cfg->verbose_level > 2)
5923 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5925 /* the argument should be put on the stack: FIXME handle size != word */
5926 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5927 switch (ainfo->size) {
5929 if (arm_is_imm12 (inst->inst_offset))
5930 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5932 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5933 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5937 if (arm_is_imm8 (inst->inst_offset)) {
5938 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5940 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5941 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5945 if (arm_is_imm12 (inst->inst_offset)) {
5946 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5948 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5949 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5951 if (arm_is_imm12 (inst->inst_offset + 4)) {
5952 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5954 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5955 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5959 if (arm_is_imm12 (inst->inst_offset)) {
5960 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5962 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5963 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5967 } else if (ainfo->storage == RegTypeBaseGen) {
5968 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5969 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5971 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5972 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5974 if (arm_is_imm12 (inst->inst_offset + 4)) {
5975 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5976 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5978 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5979 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5980 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5981 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5983 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5984 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5985 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5987 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5988 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5991 switch (ainfo->size) {
5993 if (arm_is_imm8 (inst->inst_offset)) {
5994 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5996 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5997 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6001 if (arm_is_imm8 (inst->inst_offset)) {
6002 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6004 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6005 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6009 if (arm_is_imm12 (inst->inst_offset)) {
6010 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6012 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6013 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6015 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6016 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6018 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6019 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6021 if (arm_is_imm12 (inst->inst_offset + 4)) {
6022 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6024 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6025 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6029 if (arm_is_imm12 (inst->inst_offset)) {
6030 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6032 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6033 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6037 } else if (ainfo->storage == RegTypeFP) {
6038 int imm8, rot_amount;
6040 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6041 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6042 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6044 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6046 if (ainfo->size == 8)
6047 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6049 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6050 } else if (ainfo->storage == RegTypeStructByVal) {
6051 int doffset = inst->inst_offset;
6055 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6056 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6057 if (arm_is_imm12 (doffset)) {
6058 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6060 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6061 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6063 soffset += sizeof (gpointer);
6064 doffset += sizeof (gpointer);
6066 if (ainfo->vtsize) {
6067 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6068 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6069 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6071 } else if (ainfo->storage == RegTypeStructByAddr) {
6072 g_assert_not_reached ();
6073 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6074 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6076 g_assert_not_reached ();
6081 if (method->save_lmf)
6082 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6085 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6087 if (cfg->arch.seq_point_info_var) {
6088 MonoInst *ins = cfg->arch.seq_point_info_var;
6090 /* Initialize the variable from a GOT slot */
6091 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6092 #ifdef USE_JUMP_TABLES
6094 gpointer *jte = mono_jumptable_add_entry ();
6095 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6096 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6098 /** XXX: is it correct? */
6100 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6102 *(gpointer*)code = NULL;
6105 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6107 g_assert (ins->opcode == OP_REGOFFSET);
6109 if (arm_is_imm12 (ins->inst_offset)) {
6110 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6112 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6113 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6117 /* Initialize ss_trigger_page_var */
6118 if (!cfg->soft_breakpoints) {
6119 MonoInst *info_var = cfg->arch.seq_point_info_var;
6120 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6121 int dreg = ARMREG_LR;
6124 g_assert (info_var->opcode == OP_REGOFFSET);
6125 g_assert (arm_is_imm12 (info_var->inst_offset));
6127 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6128 /* Load the trigger page addr */
6129 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6130 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6134 if (cfg->arch.seq_point_read_var) {
6135 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6136 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6137 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6138 #ifdef USE_JUMP_TABLES
6141 g_assert (read_ins->opcode == OP_REGOFFSET);
6142 g_assert (arm_is_imm12 (read_ins->inst_offset));
6143 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6144 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6145 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6146 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6148 #ifdef USE_JUMP_TABLES
6149 jte = mono_jumptable_add_entries (3);
6150 jte [0] = (gpointer)&ss_trigger_var;
6151 jte [1] = single_step_func_wrapper;
6152 jte [2] = breakpoint_func_wrapper;
6153 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6155 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6157 *(volatile int **)code = &ss_trigger_var;
6159 *(gpointer*)code = single_step_func_wrapper;
6161 *(gpointer*)code = breakpoint_func_wrapper;
6165 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6166 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6167 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6168 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6169 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6170 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6173 cfg->code_len = code - cfg->native_code;
6174 g_assert (cfg->code_len < cfg->code_size);
6181 mono_arch_emit_epilog (MonoCompile *cfg)
6183 MonoMethod *method = cfg->method;
6184 int pos, i, rot_amount;
6185 int max_epilog_size = 16 + 20*4;
6189 if (cfg->method->save_lmf)
6190 max_epilog_size += 128;
6192 if (mono_jit_trace_calls != NULL)
6193 max_epilog_size += 50;
6195 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6196 max_epilog_size += 50;
6198 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6199 cfg->code_size *= 2;
6200 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6201 cfg->stat_code_reallocs++;
6205 * Keep in sync with OP_JMP
6207 code = cfg->native_code + cfg->code_len;
6209 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6210 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6214 /* Load returned vtypes into registers if needed */
6215 cinfo = cfg->arch.cinfo;
6216 if (cinfo->ret.storage == RegTypeStructByVal) {
6217 MonoInst *ins = cfg->ret;
6219 if (arm_is_imm12 (ins->inst_offset)) {
6220 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6222 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6223 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6227 if (method->save_lmf) {
6228 int lmf_offset, reg, sp_adj, regmask;
6229 /* all but r0-r3, sp and pc */
6230 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6233 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6235 /* This points to r4 inside MonoLMF->iregs */
6236 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6238 regmask = 0x9ff0; /* restore lr to pc */
6239 /* Skip caller saved registers not used by the method */
6240 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6241 regmask &= ~(1 << reg);
6246 /* Restored later */
6247 regmask &= ~(1 << ARMREG_PC);
6248 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6249 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6251 ARM_POP (code, regmask);
6253 /* Restore saved r7, restore LR to PC */
6254 /* Skip lr from the lmf */
6255 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6256 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6259 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6260 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6262 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6263 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6267 /* Restore saved gregs */
6268 if (cfg->used_int_regs)
6269 ARM_POP (code, cfg->used_int_regs);
6270 /* Restore saved r7, restore LR to PC */
6271 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6273 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6277 cfg->code_len = code - cfg->native_code;
6279 g_assert (cfg->code_len < cfg->code_size);
6284 mono_arch_emit_exceptions (MonoCompile *cfg)
6286 MonoJumpInfo *patch_info;
6289 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6290 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6291 int max_epilog_size = 50;
6293 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6294 exc_throw_pos [i] = NULL;
6295 exc_throw_found [i] = 0;
6298 /* count the number of exception infos */
6301 * make sure we have enough space for exceptions
6303 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6304 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6305 i = mini_exception_id_by_name (patch_info->data.target);
6306 if (!exc_throw_found [i]) {
6307 max_epilog_size += 32;
6308 exc_throw_found [i] = TRUE;
6313 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6314 cfg->code_size *= 2;
6315 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6316 cfg->stat_code_reallocs++;
6319 code = cfg->native_code + cfg->code_len;
6321 /* add code to raise exceptions */
6322 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6323 switch (patch_info->type) {
6324 case MONO_PATCH_INFO_EXC: {
6325 MonoClass *exc_class;
6326 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6328 i = mini_exception_id_by_name (patch_info->data.target);
6329 if (exc_throw_pos [i]) {
6330 arm_patch (ip, exc_throw_pos [i]);
6331 patch_info->type = MONO_PATCH_INFO_NONE;
6334 exc_throw_pos [i] = code;
6336 arm_patch (ip, code);
6338 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6339 g_assert (exc_class);
6341 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6342 #ifdef USE_JUMP_TABLES
6344 gpointer *jte = mono_jumptable_add_entries (2);
6345 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6346 patch_info->data.name = "mono_arch_throw_corlib_exception";
6347 patch_info->ip.i = code - cfg->native_code;
6348 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6349 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6350 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6351 ARM_BLX_REG (code, ARMREG_IP);
6352 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6355 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6356 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6357 patch_info->data.name = "mono_arch_throw_corlib_exception";
6358 patch_info->ip.i = code - cfg->native_code;
6360 *(guint32*)(gpointer)code = exc_class->type_token;
6371 cfg->code_len = code - cfg->native_code;
6373 g_assert (cfg->code_len < cfg->code_size);
6377 #endif /* #ifndef DISABLE_JIT */
6380 mono_arch_finish_init (void)
6385 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6390 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6397 mono_arch_print_tree (MonoInst *tree, int arity)
6407 mono_arch_get_patch_offset (guint8 *code)
6414 mono_arch_flush_register_windows (void)
6421 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6423 int method_reg = mono_alloc_ireg (cfg);
6424 #ifdef USE_JUMP_TABLES
6425 int use_jumptables = TRUE;
6427 int use_jumptables = FALSE;
6430 if (cfg->compile_aot) {
6433 call->dynamic_imt_arg = TRUE;
6436 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6438 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6439 ins->dreg = method_reg;
6440 ins->inst_p0 = call->method;
6441 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6442 MONO_ADD_INS (cfg->cbb, ins);
6444 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6445 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6446 /* Always pass in a register for simplicity */
6447 call->dynamic_imt_arg = TRUE;
6449 cfg->uses_rgctx_reg = TRUE;
6452 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6456 MONO_INST_NEW (cfg, ins, OP_PCONST);
6457 ins->inst_p0 = call->method;
6458 ins->dreg = method_reg;
6459 MONO_ADD_INS (cfg->cbb, ins);
6462 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6466 #endif /* DISABLE_JIT */
6469 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6471 #ifdef USE_JUMP_TABLES
6472 return (MonoMethod*)regs [ARMREG_V5];
6475 guint32 *code_ptr = (guint32*)code;
6477 method = GUINT_TO_POINTER (code_ptr [1]);
6481 return (MonoMethod*)regs [ARMREG_V5];
6483 /* The IMT value is stored in the code stream right after the LDC instruction. */
6484 /* This is no longer true for the gsharedvt_in trampoline */
6486 if (!IS_LDR_PC (code_ptr [0])) {
6487 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6488 g_assert (IS_LDR_PC (code_ptr [0]));
6492 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6493 return (MonoMethod*)regs [ARMREG_V5];
6495 return (MonoMethod*) method;
6500 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6502 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6505 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6506 #define BASE_SIZE (6 * 4)
6507 #define BSEARCH_ENTRY_SIZE (4 * 4)
6508 #define CMP_SIZE (3 * 4)
6509 #define BRANCH_SIZE (1 * 4)
6510 #define CALL_SIZE (2 * 4)
6511 #define WMC_SIZE (8 * 4)
6512 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6514 #ifdef USE_JUMP_TABLES
6516 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6518 g_assert (base [index] == NULL);
6519 base [index] = value;
6522 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6524 if (arm_is_imm12 (jti * 4)) {
6525 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6527 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6528 if ((jti * 4) >> 16)
6529 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6530 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6536 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6538 guint32 delta = DISTANCE (target, code);
6540 g_assert (delta >= 0 && delta <= 0xFFF);
6541 *target = *target | delta;
6547 #ifdef ENABLE_WRONG_METHOD_CHECK
6549 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6551 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6557 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6558 gpointer fail_tramp)
6561 arminstr_t *code, *start;
6562 #ifdef USE_JUMP_TABLES
6565 gboolean large_offsets = FALSE;
6566 guint32 **constant_pool_starts;
6567 arminstr_t *vtable_target = NULL;
6568 int extra_space = 0;
6570 #ifdef ENABLE_WRONG_METHOD_CHECK
6575 #ifdef USE_JUMP_TABLES
6576 for (i = 0; i < count; ++i) {
6577 MonoIMTCheckItem *item = imt_entries [i];
6578 item->chunk_size += 4 * 16;
6579 if (!item->is_equals)
6580 imt_entries [item->check_target_idx]->compare_done = TRUE;
6581 size += item->chunk_size;
6584 constant_pool_starts = g_new0 (guint32*, count);
6586 for (i = 0; i < count; ++i) {
6587 MonoIMTCheckItem *item = imt_entries [i];
6588 if (item->is_equals) {
6589 gboolean fail_case = !item->check_target_idx && fail_tramp;
6591 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6592 item->chunk_size += 32;
6593 large_offsets = TRUE;
6596 if (item->check_target_idx || fail_case) {
6597 if (!item->compare_done || fail_case)
6598 item->chunk_size += CMP_SIZE;
6599 item->chunk_size += BRANCH_SIZE;
6601 #ifdef ENABLE_WRONG_METHOD_CHECK
6602 item->chunk_size += WMC_SIZE;
6606 item->chunk_size += 16;
6607 large_offsets = TRUE;
6609 item->chunk_size += CALL_SIZE;
6611 item->chunk_size += BSEARCH_ENTRY_SIZE;
6612 imt_entries [item->check_target_idx]->compare_done = TRUE;
6614 size += item->chunk_size;
6618 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6622 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6624 code = mono_domain_code_reserve (domain, size);
6628 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6629 for (i = 0; i < count; ++i) {
6630 MonoIMTCheckItem *item = imt_entries [i];
6631 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6635 #ifdef USE_JUMP_TABLES
6636 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6637 /* If jumptables we always pass the IMT method in R5 */
6638 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6639 #define VTABLE_JTI 0
6640 #define IMT_METHOD_OFFSET 0
6641 #define TARGET_CODE_OFFSET 1
6642 #define JUMP_CODE_OFFSET 2
6643 #define RECORDS_PER_ENTRY 3
6644 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6645 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6646 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6648 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6649 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6650 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6651 set_jumptable_element (jte, VTABLE_JTI, vtable);
6654 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6656 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6657 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6658 vtable_target = code;
6659 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6661 if (mono_use_llvm) {
6662 /* LLVM always passes the IMT method in R5 */
6663 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6665 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6666 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6667 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6671 for (i = 0; i < count; ++i) {
6672 MonoIMTCheckItem *item = imt_entries [i];
6673 #ifdef USE_JUMP_TABLES
6674 guint32 imt_method_jti = 0, target_code_jti = 0;
6676 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6678 gint32 vtable_offset;
6680 item->code_target = (guint8*)code;
6682 if (item->is_equals) {
6683 gboolean fail_case = !item->check_target_idx && fail_tramp;
6685 if (item->check_target_idx || fail_case) {
6686 if (!item->compare_done || fail_case) {
6687 #ifdef USE_JUMP_TABLES
6688 imt_method_jti = IMT_METHOD_JTI (i);
6689 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6692 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6694 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6696 #ifdef USE_JUMP_TABLES
6697 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6698 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6699 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6701 item->jmp_code = (guint8*)code;
6702 ARM_B_COND (code, ARMCOND_NE, 0);
6705 /*Enable the commented code to assert on wrong method*/
6706 #ifdef ENABLE_WRONG_METHOD_CHECK
6707 #ifdef USE_JUMP_TABLES
6708 imt_method_jti = IMT_METHOD_JTI (i);
6709 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6712 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6714 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6716 ARM_B_COND (code, ARMCOND_EQ, 0);
6718 /* Define this if your system is so bad that gdb is failing. */
6719 #ifdef BROKEN_DEV_ENV
6720 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6722 arm_patch (code - 1, mini_dump_bad_imt);
6726 arm_patch (cond, code);
6730 if (item->has_target_code) {
6731 /* Load target address */
6732 #ifdef USE_JUMP_TABLES
6733 target_code_jti = TARGET_CODE_JTI (i);
6734 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6735 /* Restore registers */
6736 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6738 ARM_BX (code, ARMREG_R1);
6739 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6741 target_code_ins = code;
6742 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6743 /* Save it to the fourth slot */
6744 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6745 /* Restore registers and branch */
6746 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6748 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6751 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6752 if (!arm_is_imm12 (vtable_offset)) {
6754 * We need to branch to a computed address but we don't have
6755 * a free register to store it, since IP must contain the
6756 * vtable address. So we push the two values to the stack, and
6757 * load them both using LDM.
6759 /* Compute target address */
6760 #ifdef USE_JUMP_TABLES
6761 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6762 if (vtable_offset >> 16)
6763 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6764 /* IP had vtable base. */
6765 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6766 /* Restore registers and branch */
6767 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6768 ARM_BX (code, ARMREG_IP);
6770 vtable_offset_ins = code;
6771 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6772 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6773 /* Save it to the fourth slot */
6774 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6775 /* Restore registers and branch */
6776 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6778 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6781 #ifdef USE_JUMP_TABLES
6782 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6783 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6784 ARM_BX (code, ARMREG_IP);
6786 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6788 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6789 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6795 #ifdef USE_JUMP_TABLES
6796 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6797 target_code_jti = TARGET_CODE_JTI (i);
6798 /* Load target address */
6799 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6800 /* Restore registers */
6801 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6803 ARM_BX (code, ARMREG_R1);
6804 set_jumptable_element (jte, target_code_jti, fail_tramp);
6806 arm_patch (item->jmp_code, (guchar*)code);
6808 target_code_ins = code;
6809 /* Load target address */
6810 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6811 /* Save it to the fourth slot */
6812 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6813 /* Restore registers and branch */
6814 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6816 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6818 item->jmp_code = NULL;
6821 #ifdef USE_JUMP_TABLES
6823 set_jumptable_element (jte, imt_method_jti, item->key);
6826 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6828 /*must emit after unconditional branch*/
6829 if (vtable_target) {
6830 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6831 item->chunk_size += 4;
6832 vtable_target = NULL;
6835 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6836 constant_pool_starts [i] = code;
6838 code += extra_space;
6843 #ifdef USE_JUMP_TABLES
6844 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6845 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6846 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6847 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6848 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6850 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6851 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6853 item->jmp_code = (guint8*)code;
6854 ARM_B_COND (code, ARMCOND_HS, 0);
6860 for (i = 0; i < count; ++i) {
6861 MonoIMTCheckItem *item = imt_entries [i];
6862 if (item->jmp_code) {
6863 if (item->check_target_idx)
6864 #ifdef USE_JUMP_TABLES
6865 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6867 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6870 if (i > 0 && item->is_equals) {
6872 #ifdef USE_JUMP_TABLES
6873 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6874 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6876 arminstr_t *space_start = constant_pool_starts [i];
6877 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6878 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6886 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6887 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6892 #ifndef USE_JUMP_TABLES
6893 g_free (constant_pool_starts);
6896 mono_arch_flush_icache ((guint8*)start, size);
6897 mono_stats.imt_thunks_size += code - start;
6899 g_assert (DISTANCE (start, code) <= size);
6904 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6906 return ctx->regs [reg];
6910 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6912 ctx->regs [reg] = val;
6916 * mono_arch_get_trampolines:
6918 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6922 mono_arch_get_trampolines (gboolean aot)
6924 return mono_arm_get_exception_trampolines (aot);
6927 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6929 * mono_arch_set_breakpoint:
6931 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6932 * The location should contain code emitted by OP_SEQ_POINT.
6935 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6938 guint32 native_offset = ip - (guint8*)ji->code_start;
6939 MonoDebugOptions *opt = mini_get_debug_options ();
6941 if (opt->soft_breakpoints) {
6942 g_assert (!ji->from_aot);
6944 ARM_BLX_REG (code, ARMREG_LR);
6945 mono_arch_flush_icache (code - 4, 4);
6946 } else if (ji->from_aot) {
6947 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6949 g_assert (native_offset % 4 == 0);
6950 g_assert (info->bp_addrs [native_offset / 4] == 0);
6951 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6953 int dreg = ARMREG_LR;
6955 /* Read from another trigger page */
6956 #ifdef USE_JUMP_TABLES
6957 gpointer *jte = mono_jumptable_add_entry ();
6958 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6959 jte [0] = bp_trigger_page;
6961 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6963 *(int*)code = (int)bp_trigger_page;
6966 ARM_LDR_IMM (code, dreg, dreg, 0);
6968 mono_arch_flush_icache (code - 16, 16);
6971 /* This is currently implemented by emitting an SWI instruction, which
6972 * qemu/linux seems to convert to a SIGILL.
6974 *(int*)code = (0xef << 24) | 8;
6976 mono_arch_flush_icache (code - 4, 4);
6982 * mono_arch_clear_breakpoint:
6984 * Clear the breakpoint at IP.
6987 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6989 MonoDebugOptions *opt = mini_get_debug_options ();
6993 if (opt->soft_breakpoints) {
6994 g_assert (!ji->from_aot);
6997 mono_arch_flush_icache (code - 4, 4);
6998 } else if (ji->from_aot) {
6999 guint32 native_offset = ip - (guint8*)ji->code_start;
7000 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7002 g_assert (native_offset % 4 == 0);
7003 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7004 info->bp_addrs [native_offset / 4] = 0;
7006 for (i = 0; i < 4; ++i)
7009 mono_arch_flush_icache (ip, code - ip);
7014 * mono_arch_start_single_stepping:
7016 * Start single stepping.
7019 mono_arch_start_single_stepping (void)
7021 if (ss_trigger_page)
7022 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7028 * mono_arch_stop_single_stepping:
7030 * Stop single stepping.
7033 mono_arch_stop_single_stepping (void)
7035 if (ss_trigger_page)
7036 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7042 #define DBG_SIGNAL SIGBUS
7044 #define DBG_SIGNAL SIGSEGV
7048 * mono_arch_is_single_step_event:
7050 * Return whenever the machine state in SIGCTX corresponds to a single
7054 mono_arch_is_single_step_event (void *info, void *sigctx)
7056 siginfo_t *sinfo = info;
7058 if (!ss_trigger_page)
7061 /* Sometimes the address is off by 4 */
7062 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7069 * mono_arch_is_breakpoint_event:
7071 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7074 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7076 siginfo_t *sinfo = info;
7078 if (!ss_trigger_page)
7081 if (sinfo->si_signo == DBG_SIGNAL) {
7082 /* Sometimes the address is off by 4 */
7083 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7093 * mono_arch_skip_breakpoint:
7095 * See mini-amd64.c for docs.
7098 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7100 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7104 * mono_arch_skip_single_step:
7106 * See mini-amd64.c for docs.
7109 mono_arch_skip_single_step (MonoContext *ctx)
7111 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7114 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7117 * mono_arch_get_seq_point_info:
7119 * See mini-amd64.c for docs.
7122 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7127 // FIXME: Add a free function
7129 mono_domain_lock (domain);
7130 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7132 mono_domain_unlock (domain);
7135 ji = mono_jit_info_table_find (domain, (char*)code);
7138 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7140 info->ss_trigger_page = ss_trigger_page;
7141 info->bp_trigger_page = bp_trigger_page;
7143 mono_domain_lock (domain);
7144 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7146 mono_domain_unlock (domain);
7153 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7155 ext->lmf.previous_lmf = prev_lmf;
7156 /* Mark that this is a MonoLMFExt */
7157 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7158 ext->lmf.sp = (gssize)ext;
7162 * mono_arch_set_target:
7164 * Set the target architecture the JIT backend should generate code for, in the form
7165 * of a GNU target triplet. Only used in AOT mode.
7168 mono_arch_set_target (char *mtriple)
7170 /* The GNU target triple format is not very well documented */
7171 if (strstr (mtriple, "armv7")) {
7172 v5_supported = TRUE;
7173 v6_supported = TRUE;
7174 v7_supported = TRUE;
7176 if (strstr (mtriple, "armv6")) {
7177 v5_supported = TRUE;
7178 v6_supported = TRUE;
7180 if (strstr (mtriple, "armv7s")) {
7181 v7s_supported = TRUE;
7183 if (strstr (mtriple, "thumbv7s")) {
7184 v5_supported = TRUE;
7185 v6_supported = TRUE;
7186 v7_supported = TRUE;
7187 v7s_supported = TRUE;
7188 thumb_supported = TRUE;
7189 thumb2_supported = TRUE;
7191 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7192 v5_supported = TRUE;
7193 v6_supported = TRUE;
7194 thumb_supported = TRUE;
7197 if (strstr (mtriple, "gnueabi"))
7198 eabi_supported = TRUE;
7202 mono_arch_opcode_supported (int opcode)
7205 case OP_ATOMIC_ADD_I4:
7206 case OP_ATOMIC_EXCHANGE_I4:
7207 case OP_ATOMIC_CAS_I4:
7208 return v7_supported;
7214 #if defined(ENABLE_GSHAREDVT)
7216 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7218 #endif /* !MONOTOUCH */