2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
251 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
252 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
254 code = mono_arm_emit_load_imm (code, dreg, imm);
255 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
260 /* If dreg == sreg, this clobbers IP */
262 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
264 int imm8, rot_amount;
265 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
266 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
270 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
271 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
273 code = mono_arm_emit_load_imm (code, dreg, imm);
274 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
280 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
282 /* we can use r0-r3, since this is called only for incoming args on the stack */
283 if (size > sizeof (gpointer) * 4) {
285 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
286 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
287 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
288 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
289 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
290 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
291 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
292 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
293 ARM_B_COND (code, ARMCOND_NE, 0);
294 arm_patch (code - 4, start_loop);
297 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
298 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
300 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
301 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
307 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
308 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
309 doffset = soffset = 0;
311 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
312 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
318 g_assert (size == 0);
323 emit_call_reg (guint8 *code, int reg)
326 ARM_BLX_REG (code, reg);
328 #ifdef USE_JUMP_TABLES
329 g_assert_not_reached ();
331 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
335 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
341 emit_call_seq (MonoCompile *cfg, guint8 *code)
343 #ifdef USE_JUMP_TABLES
344 code = mono_arm_patchable_bl (code, ARMCOND_AL);
346 if (cfg->method->dynamic) {
347 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
349 *(gpointer*)code = NULL;
351 code = emit_call_reg (code, ARMREG_IP);
360 mono_arm_patchable_b (guint8 *code, int cond)
362 #ifdef USE_JUMP_TABLES
365 jte = mono_jumptable_add_entry ();
366 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
367 ARM_BX_COND (code, cond, ARMREG_IP);
369 ARM_B_COND (code, cond, 0);
375 mono_arm_patchable_bl (guint8 *code, int cond)
377 #ifdef USE_JUMP_TABLES
380 jte = mono_jumptable_add_entry ();
381 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
382 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
384 ARM_BL_COND (code, cond, 0);
389 #ifdef USE_JUMP_TABLES
391 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
393 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
394 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
399 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
401 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
402 ARM_LDR_IMM (code, reg, reg, 0);
408 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
410 switch (ins->opcode) {
413 case OP_FCALL_MEMBASE:
415 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
416 if (sig_ret->type == MONO_TYPE_R4) {
418 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
420 ARM_FMSR (code, ins->dreg, ARMREG_R0);
421 ARM_CVTS (code, ins->dreg, ins->dreg);
425 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
427 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
440 * Emit code to push an LMF structure on the LMF stack.
441 * On arm, this is intermixed with the initialization of other fields of the structure.
444 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
446 gboolean get_lmf_fast = FALSE;
449 #ifdef HAVE_AEABI_READ_TP
450 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
452 if (lmf_addr_tls_offset != -1) {
455 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
456 (gpointer)"__aeabi_read_tp");
457 code = emit_call_seq (cfg, code);
459 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
465 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
468 /* Inline mono_get_lmf_addr () */
469 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
471 /* Load mono_jit_tls_id */
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
474 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
476 *(gpointer*)code = NULL;
478 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
479 /* call pthread_getspecific () */
480 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
481 (gpointer)"pthread_getspecific");
482 code = emit_call_seq (cfg, code);
483 /* lmf_addr = &jit_tls->lmf */
484 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
485 g_assert (arm_is_imm8 (lmf_offset));
486 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
493 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
494 (gpointer)"mono_get_lmf_addr");
495 code = emit_call_seq (cfg, code);
497 /* we build the MonoLMF structure on the stack - see mini-arm.h */
498 /* lmf_offset is the offset from the previous stack pointer,
499 * alloc_size is the total stack space allocated, so the offset
500 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
501 * The pointer to the struct is put in r1 (new_lmf).
502 * ip is used as scratch
503 * The callee-saved registers are already in the MonoLMF structure
505 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
506 /* r0 is the result from mono_get_lmf_addr () */
507 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
508 /* new_lmf->previous_lmf = *lmf_addr */
509 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
510 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
511 /* *(lmf_addr) = r1 */
512 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
513 /* Skip method (only needed for trampoline LMF frames) */
514 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
515 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
516 /* save the current IP */
517 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
518 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
520 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
521 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
532 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
536 for (list = inst->float_args; list; list = list->next) {
537 FloatArgData *fad = list->data;
538 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
539 gboolean imm = arm_is_fpimm8 (var->inst_offset);
541 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
547 if (*offset + *max_len > cfg->code_size) {
548 cfg->code_size += *max_len;
549 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
551 code = cfg->native_code + *offset;
555 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
556 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
558 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
560 *offset = code - cfg->native_code;
567 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FSTD (code, reg, ARMREG_LR, 0);
580 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
587 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
591 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
593 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
596 if (!arm_is_fpimm8 (inst->inst_offset)) {
597 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
598 ARM_FLDD (code, reg, ARMREG_LR, 0);
600 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
609 * Emit code to pop an LMF structure from the LMF stack.
612 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
616 if (lmf_offset < 32) {
617 basereg = cfg->frame_reg;
622 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
625 /* ip = previous_lmf */
626 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
628 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
629 /* *(lmf_addr) = previous_lmf */
630 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
635 #endif /* #ifndef DISABLE_JIT */
638 * mono_arch_get_argument_info:
639 * @csig: a method signature
640 * @param_count: the number of parameters to consider
641 * @arg_info: an array to store the result infos
643 * Gathers information on parameters such as size, alignment and
644 * padding. arg_info should be large enought to hold param_count + 1 entries.
646 * Returns the size of the activation frame.
649 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
651 int k, frame_size = 0;
652 guint32 size, align, pad;
656 t = mini_type_get_underlying_type (gsctx, csig->ret);
657 if (MONO_TYPE_ISSTRUCT (t)) {
658 frame_size += sizeof (gpointer);
662 arg_info [0].offset = offset;
665 frame_size += sizeof (gpointer);
669 arg_info [0].size = frame_size;
671 for (k = 0; k < param_count; k++) {
672 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
674 /* ignore alignment for now */
677 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
678 arg_info [k].pad = pad;
680 arg_info [k + 1].pad = 0;
681 arg_info [k + 1].size = size;
683 arg_info [k + 1].offset = offset;
687 align = MONO_ARCH_FRAME_ALIGNMENT;
688 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
689 arg_info [k].pad = pad;
694 #define MAX_ARCH_DELEGATE_PARAMS 3
697 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
699 guint8 *code, *start;
702 start = code = mono_global_codeman_reserve (12);
704 /* Replace the this argument with the target */
705 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
706 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
707 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
709 g_assert ((code - start) <= 12);
711 mono_arch_flush_icache (start, 12);
715 size = 8 + param_count * 4;
716 start = code = mono_global_codeman_reserve (size);
718 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
719 /* slide down the arguments */
720 for (i = 0; i < param_count; ++i) {
721 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
723 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
725 g_assert ((code - start) <= size);
727 mono_arch_flush_icache (start, size);
731 *code_size = code - start;
737 * mono_arch_get_delegate_invoke_impls:
739 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
743 mono_arch_get_delegate_invoke_impls (void)
751 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
752 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
754 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
755 code = get_delegate_invoke_impl (FALSE, i, &code_len);
756 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
757 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
765 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
767 guint8 *code, *start;
770 /* FIXME: Support more cases */
771 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
772 if (MONO_TYPE_ISSTRUCT (sig_ret))
776 static guint8* cached = NULL;
777 mono_mini_arch_lock ();
779 mono_mini_arch_unlock ();
784 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
786 start = get_delegate_invoke_impl (TRUE, 0, NULL);
788 mono_mini_arch_unlock ();
791 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
794 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
796 for (i = 0; i < sig->param_count; ++i)
797 if (!mono_is_regsize_var (sig->params [i]))
800 mono_mini_arch_lock ();
801 code = cache [sig->param_count];
803 mono_mini_arch_unlock ();
808 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
809 start = mono_aot_get_trampoline (name);
812 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
814 cache [sig->param_count] = start;
815 mono_mini_arch_unlock ();
823 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
829 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
831 return (gpointer)regs [ARMREG_R0];
835 * Initialize the cpu to execute managed code.
838 mono_arch_cpu_init (void)
840 i8_align = MONO_ABI_ALIGNOF (gint64);
841 #ifdef MONO_CROSS_COMPILE
842 /* Need to set the alignment of i8 since it can different on the target */
843 #ifdef TARGET_ANDROID
845 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
851 create_function_wrapper (gpointer function)
853 guint8 *start, *code;
855 start = code = mono_global_codeman_reserve (96);
858 * Construct the MonoContext structure on the stack.
861 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
863 /* save ip, lr and pc into their correspodings ctx.regs slots. */
864 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
865 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
866 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
868 /* save r0..r10 and fp */
869 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
870 ARM_STM (code, ARMREG_IP, 0x0fff);
872 /* now we can update fp. */
873 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
875 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
876 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
877 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
878 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
880 /* make ctx.eip hold the address of the call. */
881 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
882 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
884 /* r0 now points to the MonoContext */
885 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
888 #ifdef USE_JUMP_TABLES
890 gpointer *jte = mono_jumptable_add_entry ();
891 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
895 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
897 *(gpointer*)code = function;
900 ARM_BLX_REG (code, ARMREG_IP);
902 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
903 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
904 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
905 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
907 /* make ip point to the regs array, then restore everything, including pc. */
908 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
909 ARM_LDM (code, ARMREG_IP, 0xffff);
911 mono_arch_flush_icache (start, code - start);
917 * Initialize architecture specific code.
920 mono_arch_init (void)
922 const char *cpu_arch;
924 mono_mutex_init_recursive (&mini_arch_mutex);
925 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
926 if (mini_get_debug_options ()->soft_breakpoints) {
927 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
928 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
933 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
934 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
935 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
938 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
939 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
940 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
941 #if defined(ENABLE_GSHAREDVT)
942 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
945 #if defined(__ARM_EABI__)
946 eabi_supported = TRUE;
949 #if defined(ARM_FPU_VFP_HARD)
950 arm_fpu = MONO_ARM_FPU_VFP_HARD;
952 arm_fpu = MONO_ARM_FPU_VFP;
954 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
955 /* If we're compiling with a soft float fallback and it
956 turns out that no VFP unit is available, we need to
957 switch to soft float. We don't do this for iOS, since
958 iOS devices always have a VFP unit. */
959 if (!mono_hwcap_arm_has_vfp)
960 arm_fpu = MONO_ARM_FPU_NONE;
964 v5_supported = mono_hwcap_arm_is_v5;
965 v6_supported = mono_hwcap_arm_is_v6;
966 v7_supported = mono_hwcap_arm_is_v7;
967 v7s_supported = mono_hwcap_arm_is_v7s;
969 #if defined(__APPLE__)
970 /* iOS is special-cased here because we don't yet
971 have a way to properly detect CPU features on it. */
972 thumb_supported = TRUE;
975 thumb_supported = mono_hwcap_arm_has_thumb;
976 thumb2_supported = mono_hwcap_arm_has_thumb2;
979 /* Format: armv(5|6|7[s])[-thumb[2]] */
980 cpu_arch = g_getenv ("MONO_CPU_ARCH");
982 /* Do this here so it overrides any detection. */
984 if (strncmp (cpu_arch, "armv", 4) == 0) {
985 v5_supported = cpu_arch [4] >= '5';
986 v6_supported = cpu_arch [4] >= '6';
987 v7_supported = cpu_arch [4] >= '7';
988 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
991 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
992 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
997 * Cleanup architecture specific code.
1000 mono_arch_cleanup (void)
1005 * This function returns the optimizations supported on this cpu.
1008 mono_arch_cpu_optimizations (guint32 *exclude_mask)
1010 /* no arm-specific optimizations yet */
1016 * This function test for all SIMD functions supported.
1018 * Returns a bitmask corresponding to all supported versions.
1022 mono_arch_cpu_enumerate_simd_versions (void)
1024 /* SIMD is currently unimplemented */
1032 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1034 if (v7s_supported) {
1048 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1050 mono_arch_is_soft_float (void)
1052 return arm_fpu == MONO_ARM_FPU_NONE;
1057 mono_arm_is_hard_float (void)
1059 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1063 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1066 t = mini_type_get_underlying_type (gsctx, t);
1073 case MONO_TYPE_FNPTR:
1075 case MONO_TYPE_OBJECT:
1076 case MONO_TYPE_STRING:
1077 case MONO_TYPE_CLASS:
1078 case MONO_TYPE_SZARRAY:
1079 case MONO_TYPE_ARRAY:
1081 case MONO_TYPE_GENERICINST:
1082 if (!mono_type_generic_inst_is_valuetype (t))
1085 case MONO_TYPE_VALUETYPE:
1092 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1097 for (i = 0; i < cfg->num_varinfo; i++) {
1098 MonoInst *ins = cfg->varinfo [i];
1099 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1102 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1105 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1108 /* we can only allocate 32 bit values */
1109 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1110 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1111 g_assert (i == vmv->idx);
1112 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1119 #define USE_EXTRA_TEMPS 0
1122 mono_arch_get_global_int_regs (MonoCompile *cfg)
1126 mono_arch_compute_omit_fp (cfg);
1129 * FIXME: Interface calls might go through a static rgctx trampoline which
1130 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1133 if (cfg->flags & MONO_CFG_HAS_CALLS)
1134 cfg->uses_rgctx_reg = TRUE;
1136 if (cfg->arch.omit_fp)
1137 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1138 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1139 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1140 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1142 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1143 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1145 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1146 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1147 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1148 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1149 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1150 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1156 * mono_arch_regalloc_cost:
1158 * Return the cost, in number of memory references, of the action of
1159 * allocating the variable VMV into a register during global register
1163 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1169 #endif /* #ifndef DISABLE_JIT */
1171 #ifndef __GNUC_PREREQ
1172 #define __GNUC_PREREQ(maj, min) (0)
1176 mono_arch_flush_icache (guint8 *code, gint size)
1178 #if defined(__native_client__)
1179 // For Native Client we don't have to flush i-cache here,
1180 // as it's being done by dyncode interface.
1183 #ifdef MONO_CROSS_COMPILE
1185 sys_icache_invalidate (code, size);
1186 #elif __GNUC_PREREQ(4, 3)
1187 __builtin___clear_cache (code, code + size);
1188 #elif __GNUC_PREREQ(4, 1)
1189 __clear_cache (code, code + size);
1190 #elif defined(PLATFORM_ANDROID)
1191 const int syscall = 0xf0002;
1199 : "r" (code), "r" (code + size), "r" (syscall)
1200 : "r0", "r1", "r7", "r2"
1203 __asm __volatile ("mov r0, %0\n"
1206 "swi 0x9f0002 @ sys_cacheflush"
1208 : "r" (code), "r" (code + size), "r" (0)
1209 : "r0", "r1", "r3" );
1211 #endif /* !__native_client__ */
1222 RegTypeStructByAddr,
1223 /* gsharedvt argument passed by addr in greg */
1224 RegTypeGSharedVtInReg,
1225 /* gsharedvt argument passed by addr on stack */
1226 RegTypeGSharedVtOnStack,
1231 guint16 vtsize; /* in param area */
1235 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1240 guint32 stack_usage;
1241 gboolean vtype_retaddr;
1242 /* The index of the vret arg in the argument list */
1252 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1255 if (*gr > ARMREG_R3) {
1257 ainfo->offset = *stack_size;
1258 ainfo->reg = ARMREG_SP; /* in the caller */
1259 ainfo->storage = RegTypeBase;
1262 ainfo->storage = RegTypeGeneral;
1269 split = i8_align == 4;
1274 if (*gr == ARMREG_R3 && split) {
1275 /* first word in r3 and the second on the stack */
1276 ainfo->offset = *stack_size;
1277 ainfo->reg = ARMREG_SP; /* in the caller */
1278 ainfo->storage = RegTypeBaseGen;
1280 } else if (*gr >= ARMREG_R3) {
1281 if (eabi_supported) {
1282 /* darwin aligns longs to 4 byte only */
1283 if (i8_align == 8) {
1288 ainfo->offset = *stack_size;
1289 ainfo->reg = ARMREG_SP; /* in the caller */
1290 ainfo->storage = RegTypeBase;
1293 if (eabi_supported) {
1294 if (i8_align == 8 && ((*gr) & 1))
1297 ainfo->storage = RegTypeIRegPair;
1306 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1309 * If we're calling a function like this:
1311 * void foo(float a, double b, float c)
1313 * We pass a in s0 and b in d1. That leaves us
1314 * with s1 being unused. The armhf ABI recognizes
1315 * this and requires register assignment to then
1316 * use that for the next single-precision arg,
1317 * i.e. c in this example. So float_spare either
1318 * tells us which reg to use for the next single-
1319 * precision arg, or it's -1, meaning use *fpr.
1321 * Note that even though most of the JIT speaks
1322 * double-precision, fpr represents single-
1323 * precision registers.
1325 * See parts 5.5 and 6.1.2 of the AAPCS for how
1329 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1330 ainfo->storage = RegTypeFP;
1334 * If we're passing a double-precision value
1335 * and *fpr is odd (e.g. it's s1, s3, ...)
1336 * we need to use the next even register. So
1337 * we mark the current *fpr as a spare that
1338 * can be used for the next single-precision
1342 *float_spare = *fpr;
1347 * At this point, we have an even register
1348 * so we assign that and move along.
1352 } else if (*float_spare >= 0) {
1354 * We're passing a single-precision value
1355 * and it looks like a spare single-
1356 * precision register is available. Let's
1360 ainfo->reg = *float_spare;
1364 * If we hit this branch, we're passing a
1365 * single-precision value and we can simply
1366 * use the next available register.
1374 * We've exhausted available floating point
1375 * regs, so pass the rest on the stack.
1383 ainfo->offset = *stack_size;
1384 ainfo->reg = ARMREG_SP;
1385 ainfo->storage = RegTypeBase;
1392 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1394 guint i, gr, fpr, pstart;
1396 int n = sig->hasthis + sig->param_count;
1397 MonoType *simpletype;
1398 guint32 stack_size = 0;
1400 gboolean is_pinvoke = sig->pinvoke;
1404 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1406 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1413 t = mini_type_get_underlying_type (gsctx, sig->ret);
1414 if (MONO_TYPE_ISSTRUCT (t)) {
1417 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1418 cinfo->ret.storage = RegTypeStructByVal;
1420 cinfo->vtype_retaddr = TRUE;
1422 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1423 cinfo->vtype_retaddr = TRUE;
1429 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1430 * the first argument, allowing 'this' to be always passed in the first arg reg.
1431 * Also do this if the first argument is a reference type, since virtual calls
1432 * are sometimes made using calli without sig->hasthis set, like in the delegate
1435 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1437 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1439 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1443 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1444 cinfo->vret_arg_index = 1;
1448 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1452 if (cinfo->vtype_retaddr)
1453 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1456 DEBUG(printf("params: %d\n", sig->param_count));
1457 for (i = pstart; i < sig->param_count; ++i) {
1458 ArgInfo *ainfo = &cinfo->args [n];
1460 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1461 /* Prevent implicit arguments and sig_cookie from
1462 being passed in registers */
1465 /* Emit the signature cookie just before the implicit arguments */
1466 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1468 DEBUG(printf("param %d: ", i));
1469 if (sig->params [i]->byref) {
1470 DEBUG(printf("byref\n"));
1471 add_general (&gr, &stack_size, ainfo, TRUE);
1475 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1476 switch (simpletype->type) {
1477 case MONO_TYPE_BOOLEAN:
1480 cinfo->args [n].size = 1;
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_CHAR:
1487 cinfo->args [n].size = 2;
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1493 cinfo->args [n].size = 4;
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1500 case MONO_TYPE_FNPTR:
1501 case MONO_TYPE_CLASS:
1502 case MONO_TYPE_OBJECT:
1503 case MONO_TYPE_STRING:
1504 case MONO_TYPE_SZARRAY:
1505 case MONO_TYPE_ARRAY:
1506 cinfo->args [n].size = sizeof (gpointer);
1507 add_general (&gr, &stack_size, ainfo, TRUE);
1510 case MONO_TYPE_GENERICINST:
1511 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1512 cinfo->args [n].size = sizeof (gpointer);
1513 add_general (&gr, &stack_size, ainfo, TRUE);
1517 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1518 /* gsharedvt arguments are passed by ref */
1519 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1520 add_general (&gr, &stack_size, ainfo, TRUE);
1521 switch (ainfo->storage) {
1522 case RegTypeGeneral:
1523 ainfo->storage = RegTypeGSharedVtInReg;
1526 ainfo->storage = RegTypeGSharedVtOnStack;
1529 g_assert_not_reached ();
1535 case MONO_TYPE_TYPEDBYREF:
1536 case MONO_TYPE_VALUETYPE: {
1542 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1543 size = sizeof (MonoTypedRef);
1544 align = sizeof (gpointer);
1546 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1548 size = mono_class_native_size (klass, &align);
1550 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1552 DEBUG(printf ("load %d bytes struct\n", size));
1555 align_size += (sizeof (gpointer) - 1);
1556 align_size &= ~(sizeof (gpointer) - 1);
1557 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1558 ainfo->storage = RegTypeStructByVal;
1559 ainfo->struct_size = size;
1560 /* FIXME: align stack_size if needed */
1561 if (eabi_supported) {
1562 if (align >= 8 && (gr & 1))
1565 if (gr > ARMREG_R3) {
1567 ainfo->vtsize = nwords;
1569 int rest = ARMREG_R3 - gr + 1;
1570 int n_in_regs = rest >= nwords? nwords: rest;
1572 ainfo->size = n_in_regs;
1573 ainfo->vtsize = nwords - n_in_regs;
1576 nwords -= n_in_regs;
1578 if (sig->call_convention == MONO_CALL_VARARG)
1579 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1580 stack_size = ALIGN_TO (stack_size, align);
1581 ainfo->offset = stack_size;
1582 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1583 stack_size += nwords * sizeof (gpointer);
1590 add_general (&gr, &stack_size, ainfo, FALSE);
1597 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1599 add_general (&gr, &stack_size, ainfo, TRUE);
1607 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1609 add_general (&gr, &stack_size, ainfo, FALSE);
1614 case MONO_TYPE_MVAR:
1615 /* gsharedvt arguments are passed by ref */
1616 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1617 add_general (&gr, &stack_size, ainfo, TRUE);
1618 switch (ainfo->storage) {
1619 case RegTypeGeneral:
1620 ainfo->storage = RegTypeGSharedVtInReg;
1623 ainfo->storage = RegTypeGSharedVtOnStack;
1626 g_assert_not_reached ();
1631 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1635 /* Handle the case where there are no implicit arguments */
1636 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1637 /* Prevent implicit arguments and sig_cookie from
1638 being passed in registers */
1641 /* Emit the signature cookie just before the implicit arguments */
1642 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1646 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1647 switch (simpletype->type) {
1648 case MONO_TYPE_BOOLEAN:
1653 case MONO_TYPE_CHAR:
1659 case MONO_TYPE_FNPTR:
1660 case MONO_TYPE_CLASS:
1661 case MONO_TYPE_OBJECT:
1662 case MONO_TYPE_SZARRAY:
1663 case MONO_TYPE_ARRAY:
1664 case MONO_TYPE_STRING:
1665 cinfo->ret.storage = RegTypeGeneral;
1666 cinfo->ret.reg = ARMREG_R0;
1670 cinfo->ret.storage = RegTypeIRegPair;
1671 cinfo->ret.reg = ARMREG_R0;
1675 cinfo->ret.storage = RegTypeFP;
1677 if (IS_HARD_FLOAT) {
1678 cinfo->ret.reg = ARM_VFP_F0;
1680 cinfo->ret.reg = ARMREG_R0;
1684 case MONO_TYPE_GENERICINST:
1685 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1686 cinfo->ret.storage = RegTypeGeneral;
1687 cinfo->ret.reg = ARMREG_R0;
1690 // FIXME: Only for variable types
1691 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1692 cinfo->ret.storage = RegTypeStructByAddr;
1693 g_assert (cinfo->vtype_retaddr);
1697 case MONO_TYPE_VALUETYPE:
1698 case MONO_TYPE_TYPEDBYREF:
1699 if (cinfo->ret.storage != RegTypeStructByVal)
1700 cinfo->ret.storage = RegTypeStructByAddr;
1703 case MONO_TYPE_MVAR:
1704 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1705 cinfo->ret.storage = RegTypeStructByAddr;
1706 g_assert (cinfo->vtype_retaddr);
1708 case MONO_TYPE_VOID:
1711 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1715 /* align stack size to 8 */
1716 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1717 stack_size = (stack_size + 7) & ~7;
1719 cinfo->stack_usage = stack_size;
1725 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1727 MonoType *callee_ret;
1731 if (cfg->compile_aot && !cfg->full_aot)
1732 /* OP_TAILCALL doesn't work with AOT */
1735 c1 = get_call_info (NULL, NULL, caller_sig);
1736 c2 = get_call_info (NULL, NULL, callee_sig);
1739 * Tail calls with more callee stack usage than the caller cannot be supported, since
1740 * the extra stack space would be left on the stack after the tail call.
1742 res = c1->stack_usage >= c2->stack_usage;
1743 callee_ret = mini_replace_type (callee_sig->ret);
1744 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1745 /* An address on the callee's stack is passed as the first argument */
1748 if (c2->stack_usage > 16 * 4)
1760 debug_omit_fp (void)
1763 return mono_debug_count ();
1770 * mono_arch_compute_omit_fp:
1772 * Determine whenever the frame pointer can be eliminated.
1775 mono_arch_compute_omit_fp (MonoCompile *cfg)
1777 MonoMethodSignature *sig;
1778 MonoMethodHeader *header;
1782 if (cfg->arch.omit_fp_computed)
1785 header = cfg->header;
1787 sig = mono_method_signature (cfg->method);
1789 if (!cfg->arch.cinfo)
1790 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1791 cinfo = cfg->arch.cinfo;
1794 * FIXME: Remove some of the restrictions.
1796 cfg->arch.omit_fp = TRUE;
1797 cfg->arch.omit_fp_computed = TRUE;
1799 if (cfg->disable_omit_fp)
1800 cfg->arch.omit_fp = FALSE;
1801 if (!debug_omit_fp ())
1802 cfg->arch.omit_fp = FALSE;
1804 if (cfg->method->save_lmf)
1805 cfg->arch.omit_fp = FALSE;
1807 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1808 cfg->arch.omit_fp = FALSE;
1809 if (header->num_clauses)
1810 cfg->arch.omit_fp = FALSE;
1811 if (cfg->param_area)
1812 cfg->arch.omit_fp = FALSE;
1813 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1814 cfg->arch.omit_fp = FALSE;
1815 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1816 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1817 cfg->arch.omit_fp = FALSE;
1818 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1819 ArgInfo *ainfo = &cinfo->args [i];
1821 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1823 * The stack offset can only be determined when the frame
1826 cfg->arch.omit_fp = FALSE;
1831 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1832 MonoInst *ins = cfg->varinfo [i];
1835 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1840 * Set var information according to the calling convention. arm version.
1841 * The locals var stuff should most likely be split in another method.
1844 mono_arch_allocate_vars (MonoCompile *cfg)
1846 MonoMethodSignature *sig;
1847 MonoMethodHeader *header;
1850 int i, offset, size, align, curinst;
1854 sig = mono_method_signature (cfg->method);
1856 if (!cfg->arch.cinfo)
1857 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1858 cinfo = cfg->arch.cinfo;
1859 sig_ret = mini_replace_type (sig->ret);
1861 mono_arch_compute_omit_fp (cfg);
1863 if (cfg->arch.omit_fp)
1864 cfg->frame_reg = ARMREG_SP;
1866 cfg->frame_reg = ARMREG_FP;
1868 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1870 /* allow room for the vararg method args: void* and long/double */
1871 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1872 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1874 header = cfg->header;
1876 /* See mono_arch_get_global_int_regs () */
1877 if (cfg->flags & MONO_CFG_HAS_CALLS)
1878 cfg->uses_rgctx_reg = TRUE;
1880 if (cfg->frame_reg != ARMREG_SP)
1881 cfg->used_int_regs |= 1 << cfg->frame_reg;
1883 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1884 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1885 cfg->used_int_regs |= (1 << ARMREG_V5);
1889 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1890 if (sig_ret->type != MONO_TYPE_VOID) {
1891 cfg->ret->opcode = OP_REGVAR;
1892 cfg->ret->inst_c0 = ARMREG_R0;
1895 /* local vars are at a positive offset from the stack pointer */
1897 * also note that if the function uses alloca, we use FP
1898 * to point at the local variables.
1900 offset = 0; /* linkage area */
1901 /* align the offset to 16 bytes: not sure this is needed here */
1903 //offset &= ~(8 - 1);
1905 /* add parameter area size for called functions */
1906 offset += cfg->param_area;
1909 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1912 /* allow room to save the return value */
1913 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1916 /* the MonoLMF structure is stored just below the stack pointer */
1917 if (cinfo->ret.storage == RegTypeStructByVal) {
1918 cfg->ret->opcode = OP_REGOFFSET;
1919 cfg->ret->inst_basereg = cfg->frame_reg;
1920 offset += sizeof (gpointer) - 1;
1921 offset &= ~(sizeof (gpointer) - 1);
1922 cfg->ret->inst_offset = - offset;
1923 offset += sizeof(gpointer);
1924 } else if (cinfo->vtype_retaddr) {
1925 ins = cfg->vret_addr;
1926 offset += sizeof(gpointer) - 1;
1927 offset &= ~(sizeof(gpointer) - 1);
1928 ins->inst_offset = offset;
1929 ins->opcode = OP_REGOFFSET;
1930 ins->inst_basereg = cfg->frame_reg;
1931 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1932 printf ("vret_addr =");
1933 mono_print_ins (cfg->vret_addr);
1935 offset += sizeof(gpointer);
1938 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1939 if (cfg->arch.seq_point_info_var) {
1942 ins = cfg->arch.seq_point_info_var;
1946 offset += align - 1;
1947 offset &= ~(align - 1);
1948 ins->opcode = OP_REGOFFSET;
1949 ins->inst_basereg = cfg->frame_reg;
1950 ins->inst_offset = offset;
1953 ins = cfg->arch.ss_trigger_page_var;
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->opcode = OP_REGOFFSET;
1959 ins->inst_basereg = cfg->frame_reg;
1960 ins->inst_offset = offset;
1964 if (cfg->arch.seq_point_read_var) {
1967 ins = cfg->arch.seq_point_read_var;
1971 offset += align - 1;
1972 offset &= ~(align - 1);
1973 ins->opcode = OP_REGOFFSET;
1974 ins->inst_basereg = cfg->frame_reg;
1975 ins->inst_offset = offset;
1978 ins = cfg->arch.seq_point_ss_method_var;
1981 offset += align - 1;
1982 offset &= ~(align - 1);
1983 ins->opcode = OP_REGOFFSET;
1984 ins->inst_basereg = cfg->frame_reg;
1985 ins->inst_offset = offset;
1988 ins = cfg->arch.seq_point_bp_method_var;
1991 offset += align - 1;
1992 offset &= ~(align - 1);
1993 ins->opcode = OP_REGOFFSET;
1994 ins->inst_basereg = cfg->frame_reg;
1995 ins->inst_offset = offset;
1999 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2000 /* Allocate a temporary used by the atomic ops */
2004 /* Allocate a local slot to hold the sig cookie address */
2005 offset += align - 1;
2006 offset &= ~(align - 1);
2007 cfg->arch.atomic_tmp_offset = offset;
2010 cfg->arch.atomic_tmp_offset = -1;
2013 cfg->locals_min_stack_offset = offset;
2015 curinst = cfg->locals_start;
2016 for (i = curinst; i < cfg->num_varinfo; ++i) {
2019 ins = cfg->varinfo [i];
2020 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2023 t = ins->inst_vtype;
2024 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2027 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2028 * pinvoke wrappers when they call functions returning structure */
2029 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2030 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2034 size = mono_type_size (t, &align);
2036 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2037 * since it loads/stores misaligned words, which don't do the right thing.
2039 if (align < 4 && size >= 4)
2041 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2042 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2043 offset += align - 1;
2044 offset &= ~(align - 1);
2045 ins->opcode = OP_REGOFFSET;
2046 ins->inst_offset = offset;
2047 ins->inst_basereg = cfg->frame_reg;
2049 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2052 cfg->locals_max_stack_offset = offset;
2056 ins = cfg->args [curinst];
2057 if (ins->opcode != OP_REGVAR) {
2058 ins->opcode = OP_REGOFFSET;
2059 ins->inst_basereg = cfg->frame_reg;
2060 offset += sizeof (gpointer) - 1;
2061 offset &= ~(sizeof (gpointer) - 1);
2062 ins->inst_offset = offset;
2063 offset += sizeof (gpointer);
2068 if (sig->call_convention == MONO_CALL_VARARG) {
2072 /* Allocate a local slot to hold the sig cookie address */
2073 offset += align - 1;
2074 offset &= ~(align - 1);
2075 cfg->sig_cookie = offset;
2079 for (i = 0; i < sig->param_count; ++i) {
2080 ins = cfg->args [curinst];
2082 if (ins->opcode != OP_REGVAR) {
2083 ins->opcode = OP_REGOFFSET;
2084 ins->inst_basereg = cfg->frame_reg;
2085 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2087 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2088 * since it loads/stores misaligned words, which don't do the right thing.
2090 if (align < 4 && size >= 4)
2092 /* The code in the prolog () stores words when storing vtypes received in a register */
2093 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2095 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2096 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2097 offset += align - 1;
2098 offset &= ~(align - 1);
2099 ins->inst_offset = offset;
2105 /* align the offset to 8 bytes */
2106 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2107 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2112 cfg->stack_offset = offset;
2116 mono_arch_create_vars (MonoCompile *cfg)
2118 MonoMethodSignature *sig;
2122 sig = mono_method_signature (cfg->method);
2124 if (!cfg->arch.cinfo)
2125 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2126 cinfo = cfg->arch.cinfo;
2128 if (IS_HARD_FLOAT) {
2129 for (i = 0; i < 2; i++) {
2130 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2131 inst->flags |= MONO_INST_VOLATILE;
2133 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2137 if (cinfo->ret.storage == RegTypeStructByVal)
2138 cfg->ret_var_is_local = TRUE;
2140 if (cinfo->vtype_retaddr) {
2141 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2142 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2143 printf ("vret_addr = ");
2144 mono_print_ins (cfg->vret_addr);
2148 if (cfg->gen_seq_points) {
2149 if (cfg->soft_breakpoints) {
2150 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2151 ins->flags |= MONO_INST_VOLATILE;
2152 cfg->arch.seq_point_read_var = ins;
2154 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2155 ins->flags |= MONO_INST_VOLATILE;
2156 cfg->arch.seq_point_ss_method_var = ins;
2158 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2159 ins->flags |= MONO_INST_VOLATILE;
2160 cfg->arch.seq_point_bp_method_var = ins;
2162 g_assert (!cfg->compile_aot);
2163 } else if (cfg->compile_aot) {
2164 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2165 ins->flags |= MONO_INST_VOLATILE;
2166 cfg->arch.seq_point_info_var = ins;
2168 /* Allocate a separate variable for this to save 1 load per seq point */
2169 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2170 ins->flags |= MONO_INST_VOLATILE;
2171 cfg->arch.ss_trigger_page_var = ins;
2177 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2179 MonoMethodSignature *tmp_sig;
2182 if (call->tail_call)
2185 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2188 * mono_ArgIterator_Setup assumes the signature cookie is
2189 * passed first and all the arguments which were before it are
2190 * passed on the stack after the signature. So compensate by
2191 * passing a different signature.
2193 tmp_sig = mono_metadata_signature_dup (call->signature);
2194 tmp_sig->param_count -= call->signature->sentinelpos;
2195 tmp_sig->sentinelpos = 0;
2196 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2198 sig_reg = mono_alloc_ireg (cfg);
2199 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2201 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2206 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2211 LLVMCallInfo *linfo;
2213 n = sig->param_count + sig->hasthis;
2215 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2217 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2220 * LLVM always uses the native ABI while we use our own ABI, the
2221 * only difference is the handling of vtypes:
2222 * - we only pass/receive them in registers in some cases, and only
2223 * in 1 or 2 integer registers.
2225 if (cinfo->vtype_retaddr) {
2226 /* Vtype returned using a hidden argument */
2227 linfo->ret.storage = LLVMArgVtypeRetAddr;
2228 linfo->vret_arg_index = cinfo->vret_arg_index;
2229 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2230 cfg->exception_message = g_strdup ("unknown ret conv");
2231 cfg->disable_llvm = TRUE;
2235 for (i = 0; i < n; ++i) {
2236 ainfo = cinfo->args + i;
2238 linfo->args [i].storage = LLVMArgNone;
2240 switch (ainfo->storage) {
2241 case RegTypeGeneral:
2242 case RegTypeIRegPair:
2244 linfo->args [i].storage = LLVMArgInIReg;
2246 case RegTypeStructByVal:
2247 // FIXME: Passing entirely on the stack or split reg/stack
2248 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2249 linfo->args [i].storage = LLVMArgVtypeInReg;
2250 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2251 if (ainfo->size == 2)
2252 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2254 linfo->args [i].pair_storage [1] = LLVMArgNone;
2256 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2257 cfg->disable_llvm = TRUE;
2261 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2262 cfg->disable_llvm = TRUE;
2272 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2275 MonoMethodSignature *sig;
2279 sig = call->signature;
2280 n = sig->param_count + sig->hasthis;
2282 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2284 for (i = 0; i < n; ++i) {
2285 ArgInfo *ainfo = cinfo->args + i;
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2294 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2295 /* Emit the signature cookie just before the implicit arguments */
2296 emit_sig_cookie (cfg, call, cinfo);
2299 in = call->args [i];
2301 switch (ainfo->storage) {
2302 case RegTypeGeneral:
2303 case RegTypeIRegPair:
2304 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2305 MONO_INST_NEW (cfg, ins, OP_MOVE);
2306 ins->dreg = mono_alloc_ireg (cfg);
2307 ins->sreg1 = in->dreg + 1;
2308 MONO_ADD_INS (cfg->cbb, ins);
2309 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2311 MONO_INST_NEW (cfg, ins, OP_MOVE);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg + 2;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2316 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2317 if (ainfo->size == 4) {
2318 if (IS_SOFT_FLOAT) {
2319 /* mono_emit_call_args () have already done the r8->r4 conversion */
2320 /* The converted value is in an int vreg */
2321 MONO_INST_NEW (cfg, ins, OP_MOVE);
2322 ins->dreg = mono_alloc_ireg (cfg);
2323 ins->sreg1 = in->dreg;
2324 MONO_ADD_INS (cfg->cbb, ins);
2325 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2329 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2330 creg = mono_alloc_ireg (cfg);
2331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2332 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2335 if (IS_SOFT_FLOAT) {
2336 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2337 ins->dreg = mono_alloc_ireg (cfg);
2338 ins->sreg1 = in->dreg;
2339 MONO_ADD_INS (cfg->cbb, ins);
2340 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2342 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2343 ins->dreg = mono_alloc_ireg (cfg);
2344 ins->sreg1 = in->dreg;
2345 MONO_ADD_INS (cfg->cbb, ins);
2346 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2350 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2351 creg = mono_alloc_ireg (cfg);
2352 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2353 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2354 creg = mono_alloc_ireg (cfg);
2355 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2356 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2359 cfg->flags |= MONO_CFG_HAS_FPOUT;
2361 MONO_INST_NEW (cfg, ins, OP_MOVE);
2362 ins->dreg = mono_alloc_ireg (cfg);
2363 ins->sreg1 = in->dreg;
2364 MONO_ADD_INS (cfg->cbb, ins);
2366 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2369 case RegTypeStructByAddr:
2372 /* FIXME: where si the data allocated? */
2373 arg->backend.reg3 = ainfo->reg;
2374 call->used_iregs |= 1 << ainfo->reg;
2375 g_assert_not_reached ();
2378 case RegTypeStructByVal:
2379 case RegTypeGSharedVtInReg:
2380 case RegTypeGSharedVtOnStack:
2381 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2382 ins->opcode = OP_OUTARG_VT;
2383 ins->sreg1 = in->dreg;
2384 ins->klass = in->klass;
2385 ins->inst_p0 = call;
2386 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2387 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2388 mono_call_inst_add_outarg_vt (cfg, call, ins);
2389 MONO_ADD_INS (cfg->cbb, ins);
2392 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2394 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2395 if (t->type == MONO_TYPE_R8) {
2396 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2407 case RegTypeBaseGen:
2408 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2409 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2410 MONO_INST_NEW (cfg, ins, OP_MOVE);
2411 ins->dreg = mono_alloc_ireg (cfg);
2412 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2413 MONO_ADD_INS (cfg->cbb, ins);
2414 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2415 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2418 /* This should work for soft-float as well */
2420 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2421 creg = mono_alloc_ireg (cfg);
2422 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2424 creg = mono_alloc_ireg (cfg);
2425 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2426 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2427 cfg->flags |= MONO_CFG_HAS_FPOUT;
2429 g_assert_not_reached ();
2433 int fdreg = mono_alloc_freg (cfg);
2435 if (ainfo->size == 8) {
2436 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2437 ins->sreg1 = in->dreg;
2439 MONO_ADD_INS (cfg->cbb, ins);
2441 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2446 * Mono's register allocator doesn't speak single-precision registers that
2447 * overlap double-precision registers (i.e. armhf). So we have to work around
2448 * the register allocator and load the value from memory manually.
2450 * So we create a variable for the float argument and an instruction to store
2451 * the argument into the variable. We then store the list of these arguments
2452 * in cfg->float_args. This list is then used by emit_float_args later to
2453 * pass the arguments in the various call opcodes.
2455 * This is not very nice, and we should really try to fix the allocator.
2458 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2460 /* Make sure the instruction isn't seen as pointless and removed.
2462 float_arg->flags |= MONO_INST_VOLATILE;
2464 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2466 /* We use the dreg to look up the instruction later. The hreg is used to
2467 * emit the instruction that loads the value into the FP reg.
2469 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2470 fad->vreg = float_arg->dreg;
2471 fad->hreg = ainfo->reg;
2473 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2476 call->used_iregs |= 1 << ainfo->reg;
2477 cfg->flags |= MONO_CFG_HAS_FPOUT;
2481 g_assert_not_reached ();
2485 /* Handle the case where there are no implicit arguments */
2486 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2487 emit_sig_cookie (cfg, call, cinfo);
2489 if (cinfo->ret.storage == RegTypeStructByVal) {
2490 /* The JIT will transform this into a normal call */
2491 call->vret_in_reg = TRUE;
2492 } else if (cinfo->vtype_retaddr) {
2494 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2495 vtarg->sreg1 = call->vret_var->dreg;
2496 vtarg->dreg = mono_alloc_preg (cfg);
2497 MONO_ADD_INS (cfg->cbb, vtarg);
2499 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2502 call->stack_usage = cinfo->stack_usage;
2508 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2510 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2511 ArgInfo *ainfo = ins->inst_p1;
2512 int ovf_size = ainfo->vtsize;
2513 int doffset = ainfo->offset;
2514 int struct_size = ainfo->struct_size;
2515 int i, soffset, dreg, tmpreg;
2517 if (ainfo->storage == RegTypeGSharedVtInReg) {
2519 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2522 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2523 /* Pass by addr on stack */
2524 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2529 for (i = 0; i < ainfo->size; ++i) {
2530 dreg = mono_alloc_ireg (cfg);
2531 switch (struct_size) {
2533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2539 tmpreg = mono_alloc_ireg (cfg);
2540 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2543 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2546 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2552 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2553 soffset += sizeof (gpointer);
2554 struct_size -= sizeof (gpointer);
2556 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2558 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2562 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2564 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2567 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2570 if (COMPILE_LLVM (cfg)) {
2571 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2573 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2574 ins->sreg1 = val->dreg + 1;
2575 ins->sreg2 = val->dreg + 2;
2576 MONO_ADD_INS (cfg->cbb, ins);
2581 case MONO_ARM_FPU_NONE:
2582 if (ret->type == MONO_TYPE_R8) {
2585 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2586 ins->dreg = cfg->ret->dreg;
2587 ins->sreg1 = val->dreg;
2588 MONO_ADD_INS (cfg->cbb, ins);
2591 if (ret->type == MONO_TYPE_R4) {
2592 /* Already converted to an int in method_to_ir () */
2593 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2597 case MONO_ARM_FPU_VFP:
2598 case MONO_ARM_FPU_VFP_HARD:
2599 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2602 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2603 ins->dreg = cfg->ret->dreg;
2604 ins->sreg1 = val->dreg;
2605 MONO_ADD_INS (cfg->cbb, ins);
2610 g_assert_not_reached ();
2614 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2617 #endif /* #ifndef DISABLE_JIT */
2620 mono_arch_is_inst_imm (gint64 imm)
2626 MonoMethodSignature *sig;
2629 MonoType **param_types;
2633 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2637 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2640 switch (cinfo->ret.storage) {
2642 case RegTypeGeneral:
2643 case RegTypeIRegPair:
2644 case RegTypeStructByAddr:
2655 for (i = 0; i < cinfo->nargs; ++i) {
2656 ArgInfo *ainfo = &cinfo->args [i];
2659 switch (ainfo->storage) {
2660 case RegTypeGeneral:
2662 case RegTypeIRegPair:
2665 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2668 case RegTypeStructByVal:
2669 if (ainfo->size == 0)
2670 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2672 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2673 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2681 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2682 for (i = 0; i < sig->param_count; ++i) {
2683 MonoType *t = sig->params [i];
2688 t = mini_replace_type (t);
2711 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2713 ArchDynCallInfo *info;
2717 cinfo = get_call_info (NULL, NULL, sig);
2719 if (!dyn_call_supported (cinfo, sig)) {
2724 info = g_new0 (ArchDynCallInfo, 1);
2725 // FIXME: Preprocess the info to speed up start_dyn_call ()
2727 info->cinfo = cinfo;
2728 info->rtype = mini_replace_type (sig->ret);
2729 info->param_types = g_new0 (MonoType*, sig->param_count);
2730 for (i = 0; i < sig->param_count; ++i)
2731 info->param_types [i] = mini_replace_type (sig->params [i]);
2733 return (MonoDynCallInfo*)info;
2737 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2739 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2741 g_free (ainfo->cinfo);
2746 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2748 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2749 DynCallArgs *p = (DynCallArgs*)buf;
2750 int arg_index, greg, i, j, pindex;
2751 MonoMethodSignature *sig = dinfo->sig;
2753 g_assert (buf_len >= sizeof (DynCallArgs));
2762 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2763 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2768 if (dinfo->cinfo->vtype_retaddr)
2769 p->regs [greg ++] = (mgreg_t)ret;
2771 for (i = pindex; i < sig->param_count; i++) {
2772 MonoType *t = dinfo->param_types [i];
2773 gpointer *arg = args [arg_index ++];
2774 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2777 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2779 else if (ainfo->storage == RegTypeBase)
2780 slot = PARAM_REGS + (ainfo->offset / 4);
2782 g_assert_not_reached ();
2785 p->regs [slot] = (mgreg_t)*arg;
2790 case MONO_TYPE_STRING:
2791 case MONO_TYPE_CLASS:
2792 case MONO_TYPE_ARRAY:
2793 case MONO_TYPE_SZARRAY:
2794 case MONO_TYPE_OBJECT:
2798 p->regs [slot] = (mgreg_t)*arg;
2800 case MONO_TYPE_BOOLEAN:
2802 p->regs [slot] = *(guint8*)arg;
2805 p->regs [slot] = *(gint8*)arg;
2808 p->regs [slot] = *(gint16*)arg;
2811 case MONO_TYPE_CHAR:
2812 p->regs [slot] = *(guint16*)arg;
2815 p->regs [slot] = *(gint32*)arg;
2818 p->regs [slot] = *(guint32*)arg;
2822 p->regs [slot ++] = (mgreg_t)arg [0];
2823 p->regs [slot] = (mgreg_t)arg [1];
2826 p->regs [slot] = *(mgreg_t*)arg;
2829 p->regs [slot ++] = (mgreg_t)arg [0];
2830 p->regs [slot] = (mgreg_t)arg [1];
2832 case MONO_TYPE_GENERICINST:
2833 if (MONO_TYPE_IS_REFERENCE (t)) {
2834 p->regs [slot] = (mgreg_t)*arg;
2839 case MONO_TYPE_VALUETYPE:
2840 g_assert (ainfo->storage == RegTypeStructByVal);
2842 if (ainfo->size == 0)
2843 slot = PARAM_REGS + (ainfo->offset / 4);
2847 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2848 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2851 g_assert_not_reached ();
2857 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2859 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2860 MonoType *ptype = ainfo->rtype;
2861 guint8 *ret = ((DynCallArgs*)buf)->ret;
2862 mgreg_t res = ((DynCallArgs*)buf)->res;
2863 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2865 switch (ptype->type) {
2866 case MONO_TYPE_VOID:
2867 *(gpointer*)ret = NULL;
2869 case MONO_TYPE_STRING:
2870 case MONO_TYPE_CLASS:
2871 case MONO_TYPE_ARRAY:
2872 case MONO_TYPE_SZARRAY:
2873 case MONO_TYPE_OBJECT:
2877 *(gpointer*)ret = (gpointer)res;
2883 case MONO_TYPE_BOOLEAN:
2884 *(guint8*)ret = res;
2887 *(gint16*)ret = res;
2890 case MONO_TYPE_CHAR:
2891 *(guint16*)ret = res;
2894 *(gint32*)ret = res;
2897 *(guint32*)ret = res;
2901 /* This handles endianness as well */
2902 ((gint32*)ret) [0] = res;
2903 ((gint32*)ret) [1] = res2;
2905 case MONO_TYPE_GENERICINST:
2906 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2907 *(gpointer*)ret = (gpointer)res;
2912 case MONO_TYPE_VALUETYPE:
2913 g_assert (ainfo->cinfo->vtype_retaddr);
2918 *(float*)ret = *(float*)&res;
2920 case MONO_TYPE_R8: {
2927 *(double*)ret = *(double*)®s;
2931 g_assert_not_reached ();
2938 * Allow tracing to work with this interface (with an optional argument)
2942 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2946 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2947 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2948 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2949 code = emit_call_reg (code, ARMREG_R2);
2963 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2966 int save_mode = SAVE_NONE;
2968 MonoMethod *method = cfg->method;
2969 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2970 int rtype = ret_type->type;
2971 int save_offset = cfg->param_area;
2975 offset = code - cfg->native_code;
2976 /* we need about 16 instructions */
2977 if (offset > (cfg->code_size - 16 * 4)) {
2978 cfg->code_size *= 2;
2979 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2980 code = cfg->native_code + offset;
2983 case MONO_TYPE_VOID:
2984 /* special case string .ctor icall */
2985 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2986 save_mode = SAVE_ONE;
2988 save_mode = SAVE_NONE;
2992 save_mode = SAVE_TWO;
2996 save_mode = SAVE_ONE_FP;
2998 save_mode = SAVE_ONE;
3002 save_mode = SAVE_TWO_FP;
3004 save_mode = SAVE_TWO;
3006 case MONO_TYPE_GENERICINST:
3007 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
3008 save_mode = SAVE_ONE;
3012 case MONO_TYPE_VALUETYPE:
3013 save_mode = SAVE_STRUCT;
3016 save_mode = SAVE_ONE;
3020 switch (save_mode) {
3022 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3023 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3024 if (enable_arguments) {
3025 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3026 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3030 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3031 if (enable_arguments) {
3032 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3036 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3037 if (enable_arguments) {
3038 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3042 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3043 if (enable_arguments) {
3044 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3048 if (enable_arguments) {
3049 /* FIXME: get the actual address */
3050 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3058 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3059 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3060 code = emit_call_reg (code, ARMREG_IP);
3062 switch (save_mode) {
3064 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3065 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3068 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3071 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3074 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3085 * The immediate field for cond branches is big enough for all reasonable methods
3087 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3088 if (0 && ins->inst_true_bb->native_offset) { \
3089 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3091 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3092 ARM_B_COND (code, (condcode), 0); \
3095 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3097 /* emit an exception if condition is fail
3099 * We assign the extra code used to throw the implicit exceptions
3100 * to cfg->bb_exit as far as the big branch handling is concerned
3102 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3104 mono_add_patch_info (cfg, code - cfg->native_code, \
3105 MONO_PATCH_INFO_EXC, exc_name); \
3106 ARM_BL_COND (code, (condcode), 0); \
3109 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3112 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3117 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3119 MonoInst *ins, *n, *last_ins = NULL;
3121 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3122 switch (ins->opcode) {
3125 /* Already done by an arch-independent pass */
3127 case OP_LOAD_MEMBASE:
3128 case OP_LOADI4_MEMBASE:
3130 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3131 * OP_LOAD_MEMBASE offset(basereg), reg
3133 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3134 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3135 ins->inst_basereg == last_ins->inst_destbasereg &&
3136 ins->inst_offset == last_ins->inst_offset) {
3137 if (ins->dreg == last_ins->sreg1) {
3138 MONO_DELETE_INS (bb, ins);
3141 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3142 ins->opcode = OP_MOVE;
3143 ins->sreg1 = last_ins->sreg1;
3147 * Note: reg1 must be different from the basereg in the second load
3148 * OP_LOAD_MEMBASE offset(basereg), reg1
3149 * OP_LOAD_MEMBASE offset(basereg), reg2
3151 * OP_LOAD_MEMBASE offset(basereg), reg1
3152 * OP_MOVE reg1, reg2
3154 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3155 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3156 ins->inst_basereg != last_ins->dreg &&
3157 ins->inst_basereg == last_ins->inst_basereg &&
3158 ins->inst_offset == last_ins->inst_offset) {
3160 if (ins->dreg == last_ins->dreg) {
3161 MONO_DELETE_INS (bb, ins);
3164 ins->opcode = OP_MOVE;
3165 ins->sreg1 = last_ins->dreg;
3168 //g_assert_not_reached ();
3172 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3173 * OP_LOAD_MEMBASE offset(basereg), reg
3175 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3176 * OP_ICONST reg, imm
3178 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3179 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3180 ins->inst_basereg == last_ins->inst_destbasereg &&
3181 ins->inst_offset == last_ins->inst_offset) {
3182 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3183 ins->opcode = OP_ICONST;
3184 ins->inst_c0 = last_ins->inst_imm;
3185 g_assert_not_reached (); // check this rule
3189 case OP_LOADU1_MEMBASE:
3190 case OP_LOADI1_MEMBASE:
3191 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3192 ins->inst_basereg == last_ins->inst_destbasereg &&
3193 ins->inst_offset == last_ins->inst_offset) {
3194 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3195 ins->sreg1 = last_ins->sreg1;
3198 case OP_LOADU2_MEMBASE:
3199 case OP_LOADI2_MEMBASE:
3200 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3201 ins->inst_basereg == last_ins->inst_destbasereg &&
3202 ins->inst_offset == last_ins->inst_offset) {
3203 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3204 ins->sreg1 = last_ins->sreg1;
3208 ins->opcode = OP_MOVE;
3212 if (ins->dreg == ins->sreg1) {
3213 MONO_DELETE_INS (bb, ins);
3217 * OP_MOVE sreg, dreg
3218 * OP_MOVE dreg, sreg
3220 if (last_ins && last_ins->opcode == OP_MOVE &&
3221 ins->sreg1 == last_ins->dreg &&
3222 ins->dreg == last_ins->sreg1) {
3223 MONO_DELETE_INS (bb, ins);
3231 bb->last_ins = last_ins;
3235 * the branch_cc_table should maintain the order of these
3249 branch_cc_table [] = {
3263 #define ADD_NEW_INS(cfg,dest,op) do { \
3264 MONO_INST_NEW ((cfg), (dest), (op)); \
3265 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3269 map_to_reg_reg_op (int op)
3278 case OP_COMPARE_IMM:
3280 case OP_ICOMPARE_IMM:
3294 case OP_LOAD_MEMBASE:
3295 return OP_LOAD_MEMINDEX;
3296 case OP_LOADI4_MEMBASE:
3297 return OP_LOADI4_MEMINDEX;
3298 case OP_LOADU4_MEMBASE:
3299 return OP_LOADU4_MEMINDEX;
3300 case OP_LOADU1_MEMBASE:
3301 return OP_LOADU1_MEMINDEX;
3302 case OP_LOADI2_MEMBASE:
3303 return OP_LOADI2_MEMINDEX;
3304 case OP_LOADU2_MEMBASE:
3305 return OP_LOADU2_MEMINDEX;
3306 case OP_LOADI1_MEMBASE:
3307 return OP_LOADI1_MEMINDEX;
3308 case OP_STOREI1_MEMBASE_REG:
3309 return OP_STOREI1_MEMINDEX;
3310 case OP_STOREI2_MEMBASE_REG:
3311 return OP_STOREI2_MEMINDEX;
3312 case OP_STOREI4_MEMBASE_REG:
3313 return OP_STOREI4_MEMINDEX;
3314 case OP_STORE_MEMBASE_REG:
3315 return OP_STORE_MEMINDEX;
3316 case OP_STORER4_MEMBASE_REG:
3317 return OP_STORER4_MEMINDEX;
3318 case OP_STORER8_MEMBASE_REG:
3319 return OP_STORER8_MEMINDEX;
3320 case OP_STORE_MEMBASE_IMM:
3321 return OP_STORE_MEMBASE_REG;
3322 case OP_STOREI1_MEMBASE_IMM:
3323 return OP_STOREI1_MEMBASE_REG;
3324 case OP_STOREI2_MEMBASE_IMM:
3325 return OP_STOREI2_MEMBASE_REG;
3326 case OP_STOREI4_MEMBASE_IMM:
3327 return OP_STOREI4_MEMBASE_REG;
3329 g_assert_not_reached ();
3333 * Remove from the instruction list the instructions that can't be
3334 * represented with very simple instructions with no register
3338 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3340 MonoInst *ins, *temp, *last_ins = NULL;
3341 int rot_amount, imm8, low_imm;
3343 MONO_BB_FOR_EACH_INS (bb, ins) {
3345 switch (ins->opcode) {
3349 case OP_COMPARE_IMM:
3350 case OP_ICOMPARE_IMM:
3364 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3365 ADD_NEW_INS (cfg, temp, OP_ICONST);
3366 temp->inst_c0 = ins->inst_imm;
3367 temp->dreg = mono_alloc_ireg (cfg);
3368 ins->sreg2 = temp->dreg;
3369 ins->opcode = mono_op_imm_to_op (ins->opcode);
3371 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3377 if (ins->inst_imm == 1) {
3378 ins->opcode = OP_MOVE;
3381 if (ins->inst_imm == 0) {
3382 ins->opcode = OP_ICONST;
3386 imm8 = mono_is_power_of_two (ins->inst_imm);
3388 ins->opcode = OP_SHL_IMM;
3389 ins->inst_imm = imm8;
3392 ADD_NEW_INS (cfg, temp, OP_ICONST);
3393 temp->inst_c0 = ins->inst_imm;
3394 temp->dreg = mono_alloc_ireg (cfg);
3395 ins->sreg2 = temp->dreg;
3396 ins->opcode = OP_IMUL;
3402 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3403 /* ARM sets the C flag to 1 if there was _no_ overflow */
3404 ins->next->opcode = OP_COND_EXC_NC;
3407 case OP_IDIV_UN_IMM:
3409 case OP_IREM_UN_IMM:
3410 ADD_NEW_INS (cfg, temp, OP_ICONST);
3411 temp->inst_c0 = ins->inst_imm;
3412 temp->dreg = mono_alloc_ireg (cfg);
3413 ins->sreg2 = temp->dreg;
3414 ins->opcode = mono_op_imm_to_op (ins->opcode);
3416 case OP_LOCALLOC_IMM:
3417 ADD_NEW_INS (cfg, temp, OP_ICONST);
3418 temp->inst_c0 = ins->inst_imm;
3419 temp->dreg = mono_alloc_ireg (cfg);
3420 ins->sreg1 = temp->dreg;
3421 ins->opcode = OP_LOCALLOC;
3423 case OP_LOAD_MEMBASE:
3424 case OP_LOADI4_MEMBASE:
3425 case OP_LOADU4_MEMBASE:
3426 case OP_LOADU1_MEMBASE:
3427 /* we can do two things: load the immed in a register
3428 * and use an indexed load, or see if the immed can be
3429 * represented as an ad_imm + a load with a smaller offset
3430 * that fits. We just do the first for now, optimize later.
3432 if (arm_is_imm12 (ins->inst_offset))
3434 ADD_NEW_INS (cfg, temp, OP_ICONST);
3435 temp->inst_c0 = ins->inst_offset;
3436 temp->dreg = mono_alloc_ireg (cfg);
3437 ins->sreg2 = temp->dreg;
3438 ins->opcode = map_to_reg_reg_op (ins->opcode);
3440 case OP_LOADI2_MEMBASE:
3441 case OP_LOADU2_MEMBASE:
3442 case OP_LOADI1_MEMBASE:
3443 if (arm_is_imm8 (ins->inst_offset))
3445 ADD_NEW_INS (cfg, temp, OP_ICONST);
3446 temp->inst_c0 = ins->inst_offset;
3447 temp->dreg = mono_alloc_ireg (cfg);
3448 ins->sreg2 = temp->dreg;
3449 ins->opcode = map_to_reg_reg_op (ins->opcode);
3451 case OP_LOADR4_MEMBASE:
3452 case OP_LOADR8_MEMBASE:
3453 if (arm_is_fpimm8 (ins->inst_offset))
3455 low_imm = ins->inst_offset & 0x1ff;
3456 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3457 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3458 temp->inst_imm = ins->inst_offset & ~0x1ff;
3459 temp->sreg1 = ins->inst_basereg;
3460 temp->dreg = mono_alloc_ireg (cfg);
3461 ins->inst_basereg = temp->dreg;
3462 ins->inst_offset = low_imm;
3466 ADD_NEW_INS (cfg, temp, OP_ICONST);
3467 temp->inst_c0 = ins->inst_offset;
3468 temp->dreg = mono_alloc_ireg (cfg);
3470 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3471 add_ins->sreg1 = ins->inst_basereg;
3472 add_ins->sreg2 = temp->dreg;
3473 add_ins->dreg = mono_alloc_ireg (cfg);
3475 ins->inst_basereg = add_ins->dreg;
3476 ins->inst_offset = 0;
3479 case OP_STORE_MEMBASE_REG:
3480 case OP_STOREI4_MEMBASE_REG:
3481 case OP_STOREI1_MEMBASE_REG:
3482 if (arm_is_imm12 (ins->inst_offset))
3484 ADD_NEW_INS (cfg, temp, OP_ICONST);
3485 temp->inst_c0 = ins->inst_offset;
3486 temp->dreg = mono_alloc_ireg (cfg);
3487 ins->sreg2 = temp->dreg;
3488 ins->opcode = map_to_reg_reg_op (ins->opcode);
3490 case OP_STOREI2_MEMBASE_REG:
3491 if (arm_is_imm8 (ins->inst_offset))
3493 ADD_NEW_INS (cfg, temp, OP_ICONST);
3494 temp->inst_c0 = ins->inst_offset;
3495 temp->dreg = mono_alloc_ireg (cfg);
3496 ins->sreg2 = temp->dreg;
3497 ins->opcode = map_to_reg_reg_op (ins->opcode);
3499 case OP_STORER4_MEMBASE_REG:
3500 case OP_STORER8_MEMBASE_REG:
3501 if (arm_is_fpimm8 (ins->inst_offset))
3503 low_imm = ins->inst_offset & 0x1ff;
3504 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3505 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3506 temp->inst_imm = ins->inst_offset & ~0x1ff;
3507 temp->sreg1 = ins->inst_destbasereg;
3508 temp->dreg = mono_alloc_ireg (cfg);
3509 ins->inst_destbasereg = temp->dreg;
3510 ins->inst_offset = low_imm;
3514 ADD_NEW_INS (cfg, temp, OP_ICONST);
3515 temp->inst_c0 = ins->inst_offset;
3516 temp->dreg = mono_alloc_ireg (cfg);
3518 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3519 add_ins->sreg1 = ins->inst_destbasereg;
3520 add_ins->sreg2 = temp->dreg;
3521 add_ins->dreg = mono_alloc_ireg (cfg);
3523 ins->inst_destbasereg = add_ins->dreg;
3524 ins->inst_offset = 0;
3527 case OP_STORE_MEMBASE_IMM:
3528 case OP_STOREI1_MEMBASE_IMM:
3529 case OP_STOREI2_MEMBASE_IMM:
3530 case OP_STOREI4_MEMBASE_IMM:
3531 ADD_NEW_INS (cfg, temp, OP_ICONST);
3532 temp->inst_c0 = ins->inst_imm;
3533 temp->dreg = mono_alloc_ireg (cfg);
3534 ins->sreg1 = temp->dreg;
3535 ins->opcode = map_to_reg_reg_op (ins->opcode);
3537 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3539 gboolean swap = FALSE;
3543 /* Optimized away */
3548 /* Some fp compares require swapped operands */
3549 switch (ins->next->opcode) {
3551 ins->next->opcode = OP_FBLT;
3555 ins->next->opcode = OP_FBLT_UN;
3559 ins->next->opcode = OP_FBGE;
3563 ins->next->opcode = OP_FBGE_UN;
3571 ins->sreg1 = ins->sreg2;
3580 bb->last_ins = last_ins;
3581 bb->max_vreg = cfg->next_vreg;
3585 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3589 if (long_ins->opcode == OP_LNEG) {
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3598 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3600 /* sreg is a float, dreg is an integer reg */
3602 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3604 ARM_TOSIZD (code, vfp_scratch1, sreg);
3606 ARM_TOUIZD (code, vfp_scratch1, sreg);
3607 ARM_FMRS (code, dreg, vfp_scratch1);
3608 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3612 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3613 else if (size == 2) {
3614 ARM_SHL_IMM (code, dreg, dreg, 16);
3615 ARM_SHR_IMM (code, dreg, dreg, 16);
3619 ARM_SHL_IMM (code, dreg, dreg, 24);
3620 ARM_SAR_IMM (code, dreg, dreg, 24);
3621 } else if (size == 2) {
3622 ARM_SHL_IMM (code, dreg, dreg, 16);
3623 ARM_SAR_IMM (code, dreg, dreg, 16);
3629 #endif /* #ifndef DISABLE_JIT */
3633 const guchar *target;
3638 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3641 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3642 PatchData *pdata = (PatchData*)user_data;
3643 guchar *code = data;
3644 guint32 *thunks = data;
3645 guint32 *endthunks = (guint32*)(code + bsize);
3647 int difflow, diffhigh;
3649 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3650 difflow = (char*)pdata->code - (char*)thunks;
3651 diffhigh = (char*)pdata->code - (char*)endthunks;
3652 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3656 * The thunk is composed of 3 words:
3657 * load constant from thunks [2] into ARM_IP
3660 * Note that the LR register is already setup
3662 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3663 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3664 while (thunks < endthunks) {
3665 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3666 if (thunks [2] == (guint32)pdata->target) {
3667 arm_patch (pdata->code, (guchar*)thunks);
3668 mono_arch_flush_icache (pdata->code, 4);
3671 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3672 /* found a free slot instead: emit thunk */
3673 /* ARMREG_IP is fine to use since this can't be an IMT call
3676 code = (guchar*)thunks;
3677 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3678 if (thumb_supported)
3679 ARM_BX (code, ARMREG_IP);
3681 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3682 thunks [2] = (guint32)pdata->target;
3683 mono_arch_flush_icache ((guchar*)thunks, 12);
3685 arm_patch (pdata->code, (guchar*)thunks);
3686 mono_arch_flush_icache (pdata->code, 4);
3690 /* skip 12 bytes, the size of the thunk */
3694 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3700 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3705 domain = mono_domain_get ();
3708 pdata.target = target;
3709 pdata.absolute = absolute;
3713 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3716 if (pdata.found != 1) {
3717 mono_domain_lock (domain);
3718 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3721 /* this uses the first available slot */
3723 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3725 mono_domain_unlock (domain);
3728 if (pdata.found != 1) {
3730 GHashTableIter iter;
3731 MonoJitDynamicMethodInfo *ji;
3734 * This might be a dynamic method, search its code manager. We can only
3735 * use the dynamic method containing CODE, since the others might be freed later.
3739 mono_domain_lock (domain);
3740 hash = domain_jit_info (domain)->dynamic_code_hash;
3742 /* FIXME: Speed this up */
3743 g_hash_table_iter_init (&iter, hash);
3744 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3745 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3746 if (pdata.found == 1)
3750 mono_domain_unlock (domain);
3752 if (pdata.found != 1)
3753 g_print ("thunk failed for %p from %p\n", target, code);
3754 g_assert (pdata.found == 1);
3758 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3760 guint32 *code32 = (void*)code;
3761 guint32 ins = *code32;
3762 guint32 prim = (ins >> 25) & 7;
3763 guint32 tval = GPOINTER_TO_UINT (target);
3765 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3766 if (prim == 5) { /* 101b */
3767 /* the diff starts 8 bytes from the branch opcode */
3768 gint diff = target - code - 8;
3770 gint tmask = 0xffffffff;
3771 if (tval & 1) { /* entering thumb mode */
3772 diff = target - 1 - code - 8;
3773 g_assert (thumb_supported);
3774 tbits = 0xf << 28; /* bl->blx bit pattern */
3775 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3776 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3780 tmask = ~(1 << 24); /* clear the link bit */
3781 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3786 if (diff <= 33554431) {
3788 ins = (ins & 0xff000000) | diff;
3790 *code32 = ins | tbits;
3794 /* diff between 0 and -33554432 */
3795 if (diff >= -33554432) {
3797 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3799 *code32 = ins | tbits;
3804 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3808 #ifdef USE_JUMP_TABLES
3810 gpointer *jte = mono_jumptable_get_entry (code);
3812 jte [0] = (gpointer) target;
3816 * The alternative call sequences looks like this:
3818 * ldr ip, [pc] // loads the address constant
3819 * b 1f // jumps around the constant
3820 * address constant embedded in the code
3825 * There are two cases for patching:
3826 * a) at the end of method emission: in this case code points to the start
3827 * of the call sequence
3828 * b) during runtime patching of the call site: in this case code points
3829 * to the mov pc, ip instruction
3831 * We have to handle also the thunk jump code sequence:
3835 * address constant // execution never reaches here
3837 if ((ins & 0x0ffffff0) == 0x12fff10) {
3838 /* Branch and exchange: the address is constructed in a reg
3839 * We can patch BX when the code sequence is the following:
3840 * ldr ip, [pc, #0] ; 0x8
3847 guint8 *emit = (guint8*)ccode;
3848 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3850 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3851 ARM_BX (emit, ARMREG_IP);
3853 /*patching from magic trampoline*/
3854 if (ins == ccode [3]) {
3855 g_assert (code32 [-4] == ccode [0]);
3856 g_assert (code32 [-3] == ccode [1]);
3857 g_assert (code32 [-1] == ccode [2]);
3858 code32 [-2] = (guint32)target;
3861 /*patching from JIT*/
3862 if (ins == ccode [0]) {
3863 g_assert (code32 [1] == ccode [1]);
3864 g_assert (code32 [3] == ccode [2]);
3865 g_assert (code32 [4] == ccode [3]);
3866 code32 [2] = (guint32)target;
3869 g_assert_not_reached ();
3870 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3878 guint8 *emit = (guint8*)ccode;
3879 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3881 ARM_BLX_REG (emit, ARMREG_IP);
3883 g_assert (code32 [-3] == ccode [0]);
3884 g_assert (code32 [-2] == ccode [1]);
3885 g_assert (code32 [0] == ccode [2]);
3887 code32 [-1] = (guint32)target;
3890 guint32 *tmp = ccode;
3891 guint8 *emit = (guint8*)tmp;
3892 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3893 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3894 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3895 ARM_BX (emit, ARMREG_IP);
3896 if (ins == ccode [2]) {
3897 g_assert_not_reached (); // should be -2 ...
3898 code32 [-1] = (guint32)target;
3901 if (ins == ccode [0]) {
3902 /* handles both thunk jump code and the far call sequence */
3903 code32 [2] = (guint32)target;
3906 g_assert_not_reached ();
3908 // g_print ("patched with 0x%08x\n", ins);
3913 arm_patch (guchar *code, const guchar *target)
3915 arm_patch_general (NULL, code, target, NULL);
3919 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3920 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3921 * to be used with the emit macros.
3922 * Return -1 otherwise.
3925 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3928 for (i = 0; i < 31; i+= 2) {
3929 res = (val << (32 - i)) | (val >> i);
3932 *rot_amount = i? 32 - i: 0;
3939 * Emits in code a sequence of instructions that load the value 'val'
3940 * into the dreg register. Uses at most 4 instructions.
3943 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3945 int imm8, rot_amount;
3947 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3948 /* skip the constant pool */
3954 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3955 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3956 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3957 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3960 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3962 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3966 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3968 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3970 if (val & 0xFF0000) {
3971 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3973 if (val & 0xFF000000) {
3974 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3976 } else if (val & 0xFF00) {
3977 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3978 if (val & 0xFF0000) {
3979 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3981 if (val & 0xFF000000) {
3982 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3984 } else if (val & 0xFF0000) {
3985 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3986 if (val & 0xFF000000) {
3987 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3990 //g_assert_not_reached ();
3996 mono_arm_thumb_supported (void)
3998 return thumb_supported;
4004 * emit_load_volatile_arguments:
4006 * Load volatile arguments from the stack to the original input registers.
4007 * Required before a tail call.
4010 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
4012 MonoMethod *method = cfg->method;
4013 MonoMethodSignature *sig;
4018 /* FIXME: Generate intermediate code instead */
4020 sig = mono_method_signature (method);
4022 /* This is the opposite of the code in emit_prolog */
4026 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4028 if (cinfo->vtype_retaddr) {
4029 ArgInfo *ainfo = &cinfo->ret;
4030 inst = cfg->vret_addr;
4031 g_assert (arm_is_imm12 (inst->inst_offset));
4032 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4034 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4035 ArgInfo *ainfo = cinfo->args + i;
4036 inst = cfg->args [pos];
4038 if (cfg->verbose_level > 2)
4039 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4040 if (inst->opcode == OP_REGVAR) {
4041 if (ainfo->storage == RegTypeGeneral)
4042 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4043 else if (ainfo->storage == RegTypeFP) {
4044 g_assert_not_reached ();
4045 } else if (ainfo->storage == RegTypeBase) {
4049 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4050 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4052 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4053 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4057 g_assert_not_reached ();
4059 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4060 switch (ainfo->size) {
4067 g_assert (arm_is_imm12 (inst->inst_offset));
4068 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4069 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4070 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4073 if (arm_is_imm12 (inst->inst_offset)) {
4074 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4076 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4077 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4081 } else if (ainfo->storage == RegTypeBaseGen) {
4084 } else if (ainfo->storage == RegTypeBase) {
4086 } else if (ainfo->storage == RegTypeFP) {
4087 g_assert_not_reached ();
4088 } else if (ainfo->storage == RegTypeStructByVal) {
4089 int doffset = inst->inst_offset;
4093 if (mono_class_from_mono_type (inst->inst_vtype))
4094 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4095 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4096 if (arm_is_imm12 (doffset)) {
4097 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4099 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4100 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4102 soffset += sizeof (gpointer);
4103 doffset += sizeof (gpointer);
4108 } else if (ainfo->storage == RegTypeStructByAddr) {
4123 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4128 guint8 *code = cfg->native_code + cfg->code_len;
4129 MonoInst *last_ins = NULL;
4130 guint last_offset = 0;
4132 int imm8, rot_amount;
4134 /* we don't align basic blocks of loops on arm */
4136 if (cfg->verbose_level > 2)
4137 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4139 cpos = bb->max_offset;
4141 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4142 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4143 //g_assert (!mono_compile_aot);
4146 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4147 /* this is not thread save, but good enough */
4148 /* fixme: howto handle overflows? */
4149 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4152 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4153 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4154 (gpointer)"mono_break");
4155 code = emit_call_seq (cfg, code);
4158 MONO_BB_FOR_EACH_INS (bb, ins) {
4159 offset = code - cfg->native_code;
4161 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4163 if (offset > (cfg->code_size - max_len - 16)) {
4164 cfg->code_size *= 2;
4165 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4166 code = cfg->native_code + offset;
4168 // if (ins->cil_code)
4169 // g_print ("cil code\n");
4170 mono_debug_record_line_number (cfg, ins, offset);
4172 switch (ins->opcode) {
4173 case OP_MEMORY_BARRIER:
4175 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4176 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4180 #ifdef HAVE_AEABI_READ_TP
4181 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4182 (gpointer)"__aeabi_read_tp");
4183 code = emit_call_seq (cfg, code);
4185 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4187 g_assert_not_reached ();
4190 case OP_ATOMIC_EXCHANGE_I4:
4191 case OP_ATOMIC_CAS_I4:
4192 case OP_ATOMIC_ADD_I4: {
4196 g_assert (v7_supported);
4199 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4201 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4203 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4207 g_assert (cfg->arch.atomic_tmp_offset != -1);
4208 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4210 switch (ins->opcode) {
4211 case OP_ATOMIC_EXCHANGE_I4:
4213 ARM_DMB (code, ARM_DMB_SY);
4214 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4215 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4216 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4218 ARM_B_COND (code, ARMCOND_NE, 0);
4219 arm_patch (buf [1], buf [0]);
4221 case OP_ATOMIC_CAS_I4:
4222 ARM_DMB (code, ARM_DMB_SY);
4224 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4225 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4227 ARM_B_COND (code, ARMCOND_NE, 0);
4228 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4229 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4231 ARM_B_COND (code, ARMCOND_NE, 0);
4232 arm_patch (buf [2], buf [0]);
4233 arm_patch (buf [1], code);
4235 case OP_ATOMIC_ADD_I4:
4237 ARM_DMB (code, ARM_DMB_SY);
4238 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4239 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4240 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4241 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4243 ARM_B_COND (code, ARMCOND_NE, 0);
4244 arm_patch (buf [1], buf [0]);
4247 g_assert_not_reached ();
4250 ARM_DMB (code, ARM_DMB_SY);
4251 if (tmpreg != ins->dreg)
4252 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4253 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4258 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4259 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4262 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4263 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4265 case OP_STOREI1_MEMBASE_IMM:
4266 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4267 g_assert (arm_is_imm12 (ins->inst_offset));
4268 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4270 case OP_STOREI2_MEMBASE_IMM:
4271 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4272 g_assert (arm_is_imm8 (ins->inst_offset));
4273 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4275 case OP_STORE_MEMBASE_IMM:
4276 case OP_STOREI4_MEMBASE_IMM:
4277 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4278 g_assert (arm_is_imm12 (ins->inst_offset));
4279 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4281 case OP_STOREI1_MEMBASE_REG:
4282 g_assert (arm_is_imm12 (ins->inst_offset));
4283 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4285 case OP_STOREI2_MEMBASE_REG:
4286 g_assert (arm_is_imm8 (ins->inst_offset));
4287 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4289 case OP_STORE_MEMBASE_REG:
4290 case OP_STOREI4_MEMBASE_REG:
4291 /* this case is special, since it happens for spill code after lowering has been called */
4292 if (arm_is_imm12 (ins->inst_offset)) {
4293 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4295 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4296 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4299 case OP_STOREI1_MEMINDEX:
4300 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4302 case OP_STOREI2_MEMINDEX:
4303 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4305 case OP_STORE_MEMINDEX:
4306 case OP_STOREI4_MEMINDEX:
4307 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4310 g_assert_not_reached ();
4312 case OP_LOAD_MEMINDEX:
4313 case OP_LOADI4_MEMINDEX:
4314 case OP_LOADU4_MEMINDEX:
4315 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4317 case OP_LOADI1_MEMINDEX:
4318 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4320 case OP_LOADU1_MEMINDEX:
4321 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4323 case OP_LOADI2_MEMINDEX:
4324 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4326 case OP_LOADU2_MEMINDEX:
4327 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4329 case OP_LOAD_MEMBASE:
4330 case OP_LOADI4_MEMBASE:
4331 case OP_LOADU4_MEMBASE:
4332 /* this case is special, since it happens for spill code after lowering has been called */
4333 if (arm_is_imm12 (ins->inst_offset)) {
4334 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4336 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4337 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4340 case OP_LOADI1_MEMBASE:
4341 g_assert (arm_is_imm8 (ins->inst_offset));
4342 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4344 case OP_LOADU1_MEMBASE:
4345 g_assert (arm_is_imm12 (ins->inst_offset));
4346 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4348 case OP_LOADU2_MEMBASE:
4349 g_assert (arm_is_imm8 (ins->inst_offset));
4350 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4352 case OP_LOADI2_MEMBASE:
4353 g_assert (arm_is_imm8 (ins->inst_offset));
4354 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4356 case OP_ICONV_TO_I1:
4357 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4358 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4360 case OP_ICONV_TO_I2:
4361 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4362 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4364 case OP_ICONV_TO_U1:
4365 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4367 case OP_ICONV_TO_U2:
4368 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4369 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4373 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4375 case OP_COMPARE_IMM:
4376 case OP_ICOMPARE_IMM:
4377 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4378 g_assert (imm8 >= 0);
4379 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4383 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4384 * So instead of emitting a trap, we emit a call a C function and place a
4387 //*(int*)code = 0xef9f0001;
4390 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4391 (gpointer)"mono_break");
4392 code = emit_call_seq (cfg, code);
4394 case OP_RELAXED_NOP:
4399 case OP_DUMMY_STORE:
4400 case OP_DUMMY_ICONST:
4401 case OP_DUMMY_R8CONST:
4402 case OP_NOT_REACHED:
4405 case OP_SEQ_POINT: {
4407 MonoInst *info_var = cfg->arch.seq_point_info_var;
4408 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4409 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4410 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4411 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4413 int dreg = ARMREG_LR;
4415 if (cfg->soft_breakpoints) {
4416 g_assert (!cfg->compile_aot);
4420 * For AOT, we use one got slot per method, which will point to a
4421 * SeqPointInfo structure, containing all the information required
4422 * by the code below.
4424 if (cfg->compile_aot) {
4425 g_assert (info_var);
4426 g_assert (info_var->opcode == OP_REGOFFSET);
4427 g_assert (arm_is_imm12 (info_var->inst_offset));
4430 if (!cfg->soft_breakpoints) {
4432 * Read from the single stepping trigger page. This will cause a
4433 * SIGSEGV when single stepping is enabled.
4434 * We do this _before_ the breakpoint, so single stepping after
4435 * a breakpoint is hit will step to the next IL offset.
4437 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4440 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4441 if (cfg->soft_breakpoints) {
4442 /* Load the address of the sequence point trigger variable. */
4445 g_assert (var->opcode == OP_REGOFFSET);
4446 g_assert (arm_is_imm12 (var->inst_offset));
4447 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4449 /* Read the value and check whether it is non-zero. */
4450 ARM_LDR_IMM (code, dreg, dreg, 0);
4451 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4453 /* Load the address of the sequence point method. */
4454 var = ss_method_var;
4456 g_assert (var->opcode == OP_REGOFFSET);
4457 g_assert (arm_is_imm12 (var->inst_offset));
4458 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4460 /* Call it conditionally. */
4461 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4463 if (cfg->compile_aot) {
4464 /* Load the trigger page addr from the variable initialized in the prolog */
4465 var = ss_trigger_page_var;
4467 g_assert (var->opcode == OP_REGOFFSET);
4468 g_assert (arm_is_imm12 (var->inst_offset));
4469 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4471 #ifdef USE_JUMP_TABLES
4472 gpointer *jte = mono_jumptable_add_entry ();
4473 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4474 jte [0] = ss_trigger_page;
4476 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4478 *(int*)code = (int)ss_trigger_page;
4482 ARM_LDR_IMM (code, dreg, dreg, 0);
4486 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4488 if (cfg->soft_breakpoints) {
4489 /* Load the address of the breakpoint method into ip. */
4490 var = bp_method_var;
4492 g_assert (var->opcode == OP_REGOFFSET);
4493 g_assert (arm_is_imm12 (var->inst_offset));
4494 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4497 * A placeholder for a possible breakpoint inserted by
4498 * mono_arch_set_breakpoint ().
4501 } else if (cfg->compile_aot) {
4502 guint32 offset = code - cfg->native_code;
4505 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4506 /* Add the offset */
4507 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4508 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4509 if (arm_is_imm12 ((int)val)) {
4510 ARM_LDR_IMM (code, dreg, dreg, val);
4512 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4514 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4516 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4517 g_assert (!(val & 0xFF000000));
4519 ARM_LDR_IMM (code, dreg, dreg, 0);
4521 /* What is faster, a branch or a load ? */
4522 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4523 /* The breakpoint instruction */
4524 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4527 * A placeholder for a possible breakpoint inserted by
4528 * mono_arch_set_breakpoint ().
4530 for (i = 0; i < 4; ++i)
4537 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4540 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4544 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4547 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4548 g_assert (imm8 >= 0);
4549 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4553 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4554 g_assert (imm8 >= 0);
4555 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4559 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4560 g_assert (imm8 >= 0);
4561 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4564 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4565 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4567 case OP_IADD_OVF_UN:
4568 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4569 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4572 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4573 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4575 case OP_ISUB_OVF_UN:
4576 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4577 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4579 case OP_ADD_OVF_CARRY:
4580 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4581 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4583 case OP_ADD_OVF_UN_CARRY:
4584 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4585 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4587 case OP_SUB_OVF_CARRY:
4588 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4589 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4591 case OP_SUB_OVF_UN_CARRY:
4592 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4593 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4597 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4600 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4601 g_assert (imm8 >= 0);
4602 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4605 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4609 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4613 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4614 g_assert (imm8 >= 0);
4615 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4619 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4620 g_assert (imm8 >= 0);
4621 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4623 case OP_ARM_RSBS_IMM:
4624 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4625 g_assert (imm8 >= 0);
4626 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4628 case OP_ARM_RSC_IMM:
4629 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4630 g_assert (imm8 >= 0);
4631 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4634 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4638 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4639 g_assert (imm8 >= 0);
4640 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4643 g_assert (v7s_supported);
4644 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4647 g_assert (v7s_supported);
4648 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4651 g_assert (v7s_supported);
4652 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4653 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4656 g_assert (v7s_supported);
4657 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4658 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4662 g_assert_not_reached ();
4664 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4668 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4669 g_assert (imm8 >= 0);
4670 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4673 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4677 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4678 g_assert (imm8 >= 0);
4679 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4682 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4687 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4688 else if (ins->dreg != ins->sreg1)
4689 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4692 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4697 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4698 else if (ins->dreg != ins->sreg1)
4699 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4702 case OP_ISHR_UN_IMM:
4704 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4705 else if (ins->dreg != ins->sreg1)
4706 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4709 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4712 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4715 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4718 if (ins->dreg == ins->sreg2)
4719 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4721 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4724 g_assert_not_reached ();
4727 /* FIXME: handle ovf/ sreg2 != dreg */
4728 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4729 /* FIXME: MUL doesn't set the C/O flags on ARM */
4731 case OP_IMUL_OVF_UN:
4732 /* FIXME: handle ovf/ sreg2 != dreg */
4733 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4734 /* FIXME: MUL doesn't set the C/O flags on ARM */
4737 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4740 /* Load the GOT offset */
4741 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4742 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4744 *(gpointer*)code = NULL;
4746 /* Load the value from the GOT */
4747 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4749 case OP_OBJC_GET_SELECTOR:
4750 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4751 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4753 *(gpointer*)code = NULL;
4755 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4757 case OP_ICONV_TO_I4:
4758 case OP_ICONV_TO_U4:
4760 if (ins->dreg != ins->sreg1)
4761 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4764 int saved = ins->sreg2;
4765 if (ins->sreg2 == ARM_LSW_REG) {
4766 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4769 if (ins->sreg1 != ARM_LSW_REG)
4770 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4771 if (saved != ARM_MSW_REG)
4772 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4777 ARM_CPYD (code, ins->dreg, ins->sreg1);
4779 case OP_FCONV_TO_R4:
4781 ARM_CVTD (code, ins->dreg, ins->sreg1);
4782 ARM_CVTS (code, ins->dreg, ins->dreg);
4787 * Keep in sync with mono_arch_emit_epilog
4789 g_assert (!cfg->method->save_lmf);
4791 code = emit_load_volatile_arguments (cfg, code);
4793 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4795 if (cfg->used_int_regs)
4796 ARM_POP (code, cfg->used_int_regs);
4797 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4799 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4801 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4802 if (cfg->compile_aot) {
4803 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4805 *(gpointer*)code = NULL;
4807 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4809 code = mono_arm_patchable_b (code, ARMCOND_AL);
4813 MonoCallInst *call = (MonoCallInst*)ins;
4816 * The stack looks like the following:
4817 * <caller argument area>
4820 * <callee argument area>
4821 * Need to copy the arguments from the callee argument area to
4822 * the caller argument area, and pop the frame.
4824 if (call->stack_usage) {
4825 int i, prev_sp_offset = 0;
4827 /* Compute size of saved registers restored below */
4829 prev_sp_offset = 2 * 4;
4831 prev_sp_offset = 1 * 4;
4832 for (i = 0; i < 16; ++i) {
4833 if (cfg->used_int_regs & (1 << i))
4834 prev_sp_offset += 4;
4837 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4839 /* Copy arguments on the stack to our argument area */
4840 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4841 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4842 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4847 * Keep in sync with mono_arch_emit_epilog
4849 g_assert (!cfg->method->save_lmf);
4851 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4853 if (cfg->used_int_regs)
4854 ARM_POP (code, cfg->used_int_regs);
4855 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4857 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4860 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4861 if (cfg->compile_aot) {
4862 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4864 *(gpointer*)code = NULL;
4866 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4868 code = mono_arm_patchable_b (code, ARMCOND_AL);
4873 /* ensure ins->sreg1 is not NULL */
4874 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4877 g_assert (cfg->sig_cookie < 128);
4878 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4879 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4888 call = (MonoCallInst*)ins;
4891 code = emit_float_args (cfg, call, code, &max_len, &offset);
4893 if (ins->flags & MONO_INST_HAS_METHOD)
4894 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4896 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4897 code = emit_call_seq (cfg, code);
4898 ins->flags |= MONO_INST_GC_CALLSITE;
4899 ins->backend.pc_offset = code - cfg->native_code;
4900 code = emit_move_return_value (cfg, ins, code);
4906 case OP_VOIDCALL_REG:
4909 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4911 code = emit_call_reg (code, ins->sreg1);
4912 ins->flags |= MONO_INST_GC_CALLSITE;
4913 ins->backend.pc_offset = code - cfg->native_code;
4914 code = emit_move_return_value (cfg, ins, code);
4916 case OP_FCALL_MEMBASE:
4917 case OP_LCALL_MEMBASE:
4918 case OP_VCALL_MEMBASE:
4919 case OP_VCALL2_MEMBASE:
4920 case OP_VOIDCALL_MEMBASE:
4921 case OP_CALL_MEMBASE: {
4922 gboolean imt_arg = FALSE;
4924 g_assert (ins->sreg1 != ARMREG_LR);
4925 call = (MonoCallInst*)ins;
4928 code = emit_float_args (cfg, call, code, &max_len, &offset);
4930 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4932 if (!arm_is_imm12 (ins->inst_offset))
4933 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4934 #ifdef USE_JUMP_TABLES
4940 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4942 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4944 if (!arm_is_imm12 (ins->inst_offset))
4945 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4947 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4950 * We can't embed the method in the code stream in PIC code, or
4952 * Instead, we put it in V5 in code emitted by
4953 * mono_arch_emit_imt_argument (), and embed NULL here to
4954 * signal the IMT thunk that the value is in V5.
4956 #ifdef USE_JUMP_TABLES
4957 /* In case of jumptables we always use value in V5. */
4960 if (call->dynamic_imt_arg)
4961 *((gpointer*)code) = NULL;
4963 *((gpointer*)code) = (gpointer)call->method;
4967 ins->flags |= MONO_INST_GC_CALLSITE;
4968 ins->backend.pc_offset = code - cfg->native_code;
4969 code = emit_move_return_value (cfg, ins, code);
4973 /* round the size to 8 bytes */
4974 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4975 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4976 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4977 /* memzero the area: dreg holds the size, sp is the pointer */
4978 if (ins->flags & MONO_INST_INIT) {
4979 guint8 *start_loop, *branch_to_cond;
4980 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4981 branch_to_cond = code;
4984 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4985 arm_patch (branch_to_cond, code);
4986 /* decrement by 4 and set flags */
4987 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4988 ARM_B_COND (code, ARMCOND_GE, 0);
4989 arm_patch (code - 4, start_loop);
4991 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
4992 if (cfg->param_area)
4993 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
4998 MonoInst *var = cfg->dyn_call_var;
5000 g_assert (var->opcode == OP_REGOFFSET);
5001 g_assert (arm_is_imm12 (var->inst_offset));
5003 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5004 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
5006 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
5008 /* Save args buffer */
5009 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5011 /* Set stack slots using R0 as scratch reg */
5012 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5013 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5014 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5015 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5018 /* Set argument registers */
5019 for (i = 0; i < PARAM_REGS; ++i)
5020 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5023 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5024 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5027 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5028 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5029 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5033 if (ins->sreg1 != ARMREG_R0)
5034 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5035 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5036 (gpointer)"mono_arch_throw_exception");
5037 code = emit_call_seq (cfg, code);
5041 if (ins->sreg1 != ARMREG_R0)
5042 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5043 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5044 (gpointer)"mono_arch_rethrow_exception");
5045 code = emit_call_seq (cfg, code);
5048 case OP_START_HANDLER: {
5049 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5052 /* Reserve a param area, see filter-stack.exe */
5053 if (cfg->param_area) {
5054 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5055 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5057 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5058 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5062 if (arm_is_imm12 (spvar->inst_offset)) {
5063 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5065 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5066 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5070 case OP_ENDFILTER: {
5071 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5074 /* Free the param area */
5075 if (cfg->param_area) {
5076 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5077 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5079 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5080 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5084 if (ins->sreg1 != ARMREG_R0)
5085 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5086 if (arm_is_imm12 (spvar->inst_offset)) {
5087 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5089 g_assert (ARMREG_IP != spvar->inst_basereg);
5090 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5091 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5093 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5096 case OP_ENDFINALLY: {
5097 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5100 /* Free the param area */
5101 if (cfg->param_area) {
5102 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5103 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5105 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5106 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5110 if (arm_is_imm12 (spvar->inst_offset)) {
5111 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5113 g_assert (ARMREG_IP != spvar->inst_basereg);
5114 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5115 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5117 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5120 case OP_CALL_HANDLER:
5121 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5122 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5123 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5126 ins->inst_c0 = code - cfg->native_code;
5129 /*if (ins->inst_target_bb->native_offset) {
5131 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5133 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5134 code = mono_arm_patchable_b (code, ARMCOND_AL);
5138 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5142 * In the normal case we have:
5143 * ldr pc, [pc, ins->sreg1 << 2]
5146 * ldr lr, [pc, ins->sreg1 << 2]
5148 * After follows the data.
5149 * FIXME: add aot support.
5151 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5152 #ifdef USE_JUMP_TABLES
5154 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5155 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5156 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5160 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5161 if (offset + max_len > (cfg->code_size - 16)) {
5162 cfg->code_size += max_len;
5163 cfg->code_size *= 2;
5164 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5165 code = cfg->native_code + offset;
5167 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5169 code += 4 * GPOINTER_TO_INT (ins->klass);
5174 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5175 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5179 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5180 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5184 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5185 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5189 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5190 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5194 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5195 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5198 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5199 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5202 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5203 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5206 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5207 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5211 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5212 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5214 case OP_COND_EXC_EQ:
5215 case OP_COND_EXC_NE_UN:
5216 case OP_COND_EXC_LT:
5217 case OP_COND_EXC_LT_UN:
5218 case OP_COND_EXC_GT:
5219 case OP_COND_EXC_GT_UN:
5220 case OP_COND_EXC_GE:
5221 case OP_COND_EXC_GE_UN:
5222 case OP_COND_EXC_LE:
5223 case OP_COND_EXC_LE_UN:
5224 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5226 case OP_COND_EXC_IEQ:
5227 case OP_COND_EXC_INE_UN:
5228 case OP_COND_EXC_ILT:
5229 case OP_COND_EXC_ILT_UN:
5230 case OP_COND_EXC_IGT:
5231 case OP_COND_EXC_IGT_UN:
5232 case OP_COND_EXC_IGE:
5233 case OP_COND_EXC_IGE_UN:
5234 case OP_COND_EXC_ILE:
5235 case OP_COND_EXC_ILE_UN:
5236 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5239 case OP_COND_EXC_IC:
5240 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5242 case OP_COND_EXC_OV:
5243 case OP_COND_EXC_IOV:
5244 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5246 case OP_COND_EXC_NC:
5247 case OP_COND_EXC_INC:
5248 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5250 case OP_COND_EXC_NO:
5251 case OP_COND_EXC_INO:
5252 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5264 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5267 /* floating point opcodes */
5269 if (cfg->compile_aot) {
5270 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5272 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5274 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5277 /* FIXME: we can optimize the imm load by dealing with part of
5278 * the displacement in LDFD (aligning to 512).
5280 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5281 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5285 if (cfg->compile_aot) {
5286 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5288 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5290 ARM_CVTS (code, ins->dreg, ins->dreg);
5292 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5293 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5294 ARM_CVTS (code, ins->dreg, ins->dreg);
5297 case OP_STORER8_MEMBASE_REG:
5298 /* This is generated by the local regalloc pass which runs after the lowering pass */
5299 if (!arm_is_fpimm8 (ins->inst_offset)) {
5300 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5301 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5302 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5304 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5307 case OP_LOADR8_MEMBASE:
5308 /* This is generated by the local regalloc pass which runs after the lowering pass */
5309 if (!arm_is_fpimm8 (ins->inst_offset)) {
5310 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5311 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5312 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5314 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5317 case OP_STORER4_MEMBASE_REG:
5318 g_assert (arm_is_fpimm8 (ins->inst_offset));
5319 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5320 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5321 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5322 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5324 case OP_LOADR4_MEMBASE:
5325 g_assert (arm_is_fpimm8 (ins->inst_offset));
5326 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5327 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5328 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5329 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5331 case OP_ICONV_TO_R_UN: {
5332 g_assert_not_reached ();
5335 case OP_ICONV_TO_R4:
5336 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5337 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5338 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5339 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5340 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5342 case OP_ICONV_TO_R8:
5343 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5344 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5345 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5346 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5350 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5351 if (sig_ret->type == MONO_TYPE_R4) {
5352 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5354 if (!IS_HARD_FLOAT) {
5355 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5358 if (IS_HARD_FLOAT) {
5359 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5361 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5366 case OP_FCONV_TO_I1:
5367 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5369 case OP_FCONV_TO_U1:
5370 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5372 case OP_FCONV_TO_I2:
5373 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5375 case OP_FCONV_TO_U2:
5376 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5378 case OP_FCONV_TO_I4:
5380 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5382 case OP_FCONV_TO_U4:
5384 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5386 case OP_FCONV_TO_I8:
5387 case OP_FCONV_TO_U8:
5388 g_assert_not_reached ();
5389 /* Implemented as helper calls */
5391 case OP_LCONV_TO_R_UN:
5392 g_assert_not_reached ();
5393 /* Implemented as helper calls */
5395 case OP_LCONV_TO_OVF_I4_2: {
5396 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5398 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5401 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5402 high_bit_not_set = code;
5403 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5405 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5406 valid_negative = code;
5407 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5408 invalid_negative = code;
5409 ARM_B_COND (code, ARMCOND_AL, 0);
5411 arm_patch (high_bit_not_set, code);
5413 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5414 valid_positive = code;
5415 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5417 arm_patch (invalid_negative, code);
5418 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5420 arm_patch (valid_negative, code);
5421 arm_patch (valid_positive, code);
5423 if (ins->dreg != ins->sreg1)
5424 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5428 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5431 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5434 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5437 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5440 ARM_NEGD (code, ins->dreg, ins->sreg1);
5444 g_assert_not_reached ();
5448 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5454 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5457 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5458 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5462 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5465 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5466 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5470 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5473 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5474 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5475 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5479 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5482 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5483 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5487 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5490 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5491 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5492 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5496 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5499 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5500 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5504 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5507 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5508 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5512 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5515 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5516 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5519 /* ARM FPA flags table:
5520 * N Less than ARMCOND_MI
5521 * Z Equal ARMCOND_EQ
5522 * C Greater Than or Equal ARMCOND_CS
5523 * V Unordered ARMCOND_VS
5526 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5529 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5532 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5535 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5536 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5542 g_assert_not_reached ();
5546 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5548 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5549 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5550 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5554 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5555 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5560 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5561 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5563 #ifdef USE_JUMP_TABLES
5565 gpointer *jte = mono_jumptable_add_entries (2);
5566 jte [0] = GUINT_TO_POINTER (0xffffffff);
5567 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5568 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5569 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5572 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5573 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5575 *(guint32*)code = 0xffffffff;
5577 *(guint32*)code = 0x7fefffff;
5580 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5582 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5583 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5585 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5586 ARM_CPYD (code, ins->dreg, ins->sreg1);
5588 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5589 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5594 case OP_GC_LIVENESS_DEF:
5595 case OP_GC_LIVENESS_USE:
5596 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5597 ins->backend.pc_offset = code - cfg->native_code;
5599 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5600 ins->backend.pc_offset = code - cfg->native_code;
5601 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5605 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5606 g_assert_not_reached ();
5609 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5610 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5611 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5612 g_assert_not_reached ();
5618 last_offset = offset;
5621 cfg->code_len = code - cfg->native_code;
5624 #endif /* DISABLE_JIT */
5626 #ifdef HAVE_AEABI_READ_TP
5627 void __aeabi_read_tp (void);
5631 mono_arch_register_lowlevel_calls (void)
5633 /* The signature doesn't matter */
5634 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5635 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5637 #ifndef MONO_CROSS_COMPILE
5638 #ifdef HAVE_AEABI_READ_TP
5639 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5644 #define patch_lis_ori(ip,val) do {\
5645 guint16 *__lis_ori = (guint16*)(ip); \
5646 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5647 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5651 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5653 MonoJumpInfo *patch_info;
5654 gboolean compile_aot = !run_cctors;
5656 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5657 unsigned char *ip = patch_info->ip.i + code;
5658 const unsigned char *target;
5660 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5661 #ifdef USE_JUMP_TABLES
5662 gpointer *jt = mono_jumptable_get_entry (ip);
5664 gpointer *jt = (gpointer*)(ip + 8);
5667 /* jt is the inlined jump table, 2 instructions after ip
5668 * In the normal case we store the absolute addresses,
5669 * otherwise the displacements.
5671 for (i = 0; i < patch_info->data.table->table_size; i++)
5672 jt [i] = code + (int)patch_info->data.table->table [i];
5677 switch (patch_info->type) {
5678 case MONO_PATCH_INFO_BB:
5679 case MONO_PATCH_INFO_LABEL:
5682 /* No need to patch these */
5687 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5689 switch (patch_info->type) {
5690 case MONO_PATCH_INFO_IP:
5691 g_assert_not_reached ();
5692 patch_lis_ori (ip, ip);
5694 case MONO_PATCH_INFO_METHOD_REL:
5695 g_assert_not_reached ();
5696 *((gpointer *)(ip)) = code + patch_info->data.offset;
5698 case MONO_PATCH_INFO_METHODCONST:
5699 case MONO_PATCH_INFO_CLASS:
5700 case MONO_PATCH_INFO_IMAGE:
5701 case MONO_PATCH_INFO_FIELD:
5702 case MONO_PATCH_INFO_VTABLE:
5703 case MONO_PATCH_INFO_IID:
5704 case MONO_PATCH_INFO_SFLDA:
5705 case MONO_PATCH_INFO_LDSTR:
5706 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5707 case MONO_PATCH_INFO_LDTOKEN:
5708 g_assert_not_reached ();
5709 /* from OP_AOTCONST : lis + ori */
5710 patch_lis_ori (ip, target);
5712 case MONO_PATCH_INFO_R4:
5713 case MONO_PATCH_INFO_R8:
5714 g_assert_not_reached ();
5715 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5717 case MONO_PATCH_INFO_EXC_NAME:
5718 g_assert_not_reached ();
5719 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5721 case MONO_PATCH_INFO_NONE:
5722 case MONO_PATCH_INFO_BB_OVF:
5723 case MONO_PATCH_INFO_EXC_OVF:
5724 /* everything is dealt with at epilog output time */
5729 arm_patch_general (domain, ip, target, dyn_code_mp);
5736 * Stack frame layout:
5738 * ------------------- fp
5739 * MonoLMF structure or saved registers
5740 * -------------------
5742 * -------------------
5744 * -------------------
5745 * optional 8 bytes for tracing
5746 * -------------------
5747 * param area size is cfg->param_area
5748 * ------------------- sp
5751 mono_arch_emit_prolog (MonoCompile *cfg)
5753 MonoMethod *method = cfg->method;
5755 MonoMethodSignature *sig;
5757 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5762 int prev_sp_offset, reg_offset;
5764 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5767 sig = mono_method_signature (method);
5768 cfg->code_size = 256 + sig->param_count * 64;
5769 code = cfg->native_code = g_malloc (cfg->code_size);
5771 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5773 alloc_size = cfg->stack_offset;
5779 * The iphone uses R7 as the frame pointer, and it points at the saved
5784 * We can't use r7 as a frame pointer since it points into the middle of
5785 * the frame, so we keep using our own frame pointer.
5786 * FIXME: Optimize this.
5788 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5789 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5790 prev_sp_offset += 8; /* r7 and lr */
5791 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5792 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5795 if (!method->save_lmf) {
5797 /* No need to push LR again */
5798 if (cfg->used_int_regs)
5799 ARM_PUSH (code, cfg->used_int_regs);
5801 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5802 prev_sp_offset += 4;
5804 for (i = 0; i < 16; ++i) {
5805 if (cfg->used_int_regs & (1 << i))
5806 prev_sp_offset += 4;
5808 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5810 for (i = 0; i < 16; ++i) {
5811 if ((cfg->used_int_regs & (1 << i))) {
5812 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5813 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5818 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5819 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5821 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5822 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5825 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5826 ARM_PUSH (code, 0x5ff0);
5827 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5828 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5830 for (i = 0; i < 16; ++i) {
5831 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5832 /* The original r7 is saved at the start */
5833 if (!(iphone_abi && i == ARMREG_R7))
5834 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5838 g_assert (reg_offset == 4 * 10);
5839 pos += sizeof (MonoLMF) - (4 * 10);
5843 orig_alloc_size = alloc_size;
5844 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5845 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5846 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5847 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5850 /* the stack used in the pushed regs */
5851 if (prev_sp_offset & 4)
5853 cfg->stack_usage = alloc_size;
5855 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5856 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5858 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5859 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5861 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5863 if (cfg->frame_reg != ARMREG_SP) {
5864 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5865 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5867 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5868 prev_sp_offset += alloc_size;
5870 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5871 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5873 /* compute max_offset in order to use short forward jumps
5874 * we could skip do it on arm because the immediate displacement
5875 * for jumps is large enough, it may be useful later for constant pools
5878 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5879 MonoInst *ins = bb->code;
5880 bb->max_offset = max_offset;
5882 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5885 MONO_BB_FOR_EACH_INS (bb, ins)
5886 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5889 /* store runtime generic context */
5890 if (cfg->rgctx_var) {
5891 MonoInst *ins = cfg->rgctx_var;
5893 g_assert (ins->opcode == OP_REGOFFSET);
5895 if (arm_is_imm12 (ins->inst_offset)) {
5896 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5898 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5899 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5903 /* load arguments allocated to register from the stack */
5906 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5908 if (cinfo->vtype_retaddr) {
5909 ArgInfo *ainfo = &cinfo->ret;
5910 inst = cfg->vret_addr;
5911 g_assert (arm_is_imm12 (inst->inst_offset));
5912 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5915 if (sig->call_convention == MONO_CALL_VARARG) {
5916 ArgInfo *cookie = &cinfo->sig_cookie;
5918 /* Save the sig cookie address */
5919 g_assert (cookie->storage == RegTypeBase);
5921 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5922 g_assert (arm_is_imm12 (cfg->sig_cookie));
5923 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5924 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5927 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5928 ArgInfo *ainfo = cinfo->args + i;
5929 inst = cfg->args [pos];
5931 if (cfg->verbose_level > 2)
5932 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5933 if (inst->opcode == OP_REGVAR) {
5934 if (ainfo->storage == RegTypeGeneral)
5935 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5936 else if (ainfo->storage == RegTypeFP) {
5937 g_assert_not_reached ();
5938 } else if (ainfo->storage == RegTypeBase) {
5939 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5940 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5942 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5943 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5946 g_assert_not_reached ();
5948 if (cfg->verbose_level > 2)
5949 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5951 /* the argument should be put on the stack: FIXME handle size != word */
5952 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5953 switch (ainfo->size) {
5955 if (arm_is_imm12 (inst->inst_offset))
5956 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5958 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5959 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5963 if (arm_is_imm8 (inst->inst_offset)) {
5964 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5966 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5967 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5971 if (arm_is_imm12 (inst->inst_offset)) {
5972 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5974 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5975 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5977 if (arm_is_imm12 (inst->inst_offset + 4)) {
5978 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5980 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5981 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5985 if (arm_is_imm12 (inst->inst_offset)) {
5986 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5988 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5989 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5993 } else if (ainfo->storage == RegTypeBaseGen) {
5994 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5995 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5997 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5998 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6000 if (arm_is_imm12 (inst->inst_offset + 4)) {
6001 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6002 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6004 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6005 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6006 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6007 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6009 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
6010 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6011 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6013 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6014 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6017 switch (ainfo->size) {
6019 if (arm_is_imm8 (inst->inst_offset)) {
6020 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6022 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6023 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6027 if (arm_is_imm8 (inst->inst_offset)) {
6028 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6030 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6031 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6035 if (arm_is_imm12 (inst->inst_offset)) {
6036 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6038 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6039 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6041 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6042 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6044 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6045 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6047 if (arm_is_imm12 (inst->inst_offset + 4)) {
6048 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6050 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6051 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6055 if (arm_is_imm12 (inst->inst_offset)) {
6056 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6058 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6059 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6063 } else if (ainfo->storage == RegTypeFP) {
6064 int imm8, rot_amount;
6066 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6067 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6068 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6070 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6072 if (ainfo->size == 8)
6073 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6075 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6076 } else if (ainfo->storage == RegTypeStructByVal) {
6077 int doffset = inst->inst_offset;
6081 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6082 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6083 if (arm_is_imm12 (doffset)) {
6084 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6086 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6087 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6089 soffset += sizeof (gpointer);
6090 doffset += sizeof (gpointer);
6092 if (ainfo->vtsize) {
6093 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6094 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6095 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6097 } else if (ainfo->storage == RegTypeStructByAddr) {
6098 g_assert_not_reached ();
6099 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6100 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6102 g_assert_not_reached ();
6107 if (method->save_lmf)
6108 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6111 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6113 if (cfg->arch.seq_point_info_var) {
6114 MonoInst *ins = cfg->arch.seq_point_info_var;
6116 /* Initialize the variable from a GOT slot */
6117 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6118 #ifdef USE_JUMP_TABLES
6120 gpointer *jte = mono_jumptable_add_entry ();
6121 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6122 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6124 /** XXX: is it correct? */
6126 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6128 *(gpointer*)code = NULL;
6131 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6133 g_assert (ins->opcode == OP_REGOFFSET);
6135 if (arm_is_imm12 (ins->inst_offset)) {
6136 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6138 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6139 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6143 /* Initialize ss_trigger_page_var */
6144 if (!cfg->soft_breakpoints) {
6145 MonoInst *info_var = cfg->arch.seq_point_info_var;
6146 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6147 int dreg = ARMREG_LR;
6150 g_assert (info_var->opcode == OP_REGOFFSET);
6151 g_assert (arm_is_imm12 (info_var->inst_offset));
6153 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6154 /* Load the trigger page addr */
6155 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6156 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6160 if (cfg->arch.seq_point_read_var) {
6161 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6162 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6163 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6164 #ifdef USE_JUMP_TABLES
6167 g_assert (read_ins->opcode == OP_REGOFFSET);
6168 g_assert (arm_is_imm12 (read_ins->inst_offset));
6169 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6170 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6171 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6172 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6174 #ifdef USE_JUMP_TABLES
6175 jte = mono_jumptable_add_entries (3);
6176 jte [0] = (gpointer)&ss_trigger_var;
6177 jte [1] = single_step_func_wrapper;
6178 jte [2] = breakpoint_func_wrapper;
6179 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6181 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6183 *(volatile int **)code = &ss_trigger_var;
6185 *(gpointer*)code = single_step_func_wrapper;
6187 *(gpointer*)code = breakpoint_func_wrapper;
6191 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6192 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6193 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6194 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6195 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6196 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6199 cfg->code_len = code - cfg->native_code;
6200 g_assert (cfg->code_len < cfg->code_size);
6207 mono_arch_emit_epilog (MonoCompile *cfg)
6209 MonoMethod *method = cfg->method;
6210 int pos, i, rot_amount;
6211 int max_epilog_size = 16 + 20*4;
6215 if (cfg->method->save_lmf)
6216 max_epilog_size += 128;
6218 if (mono_jit_trace_calls != NULL)
6219 max_epilog_size += 50;
6221 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6222 max_epilog_size += 50;
6224 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6225 cfg->code_size *= 2;
6226 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6227 cfg->stat_code_reallocs++;
6231 * Keep in sync with OP_JMP
6233 code = cfg->native_code + cfg->code_len;
6235 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6236 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6240 /* Load returned vtypes into registers if needed */
6241 cinfo = cfg->arch.cinfo;
6242 if (cinfo->ret.storage == RegTypeStructByVal) {
6243 MonoInst *ins = cfg->ret;
6245 if (arm_is_imm12 (ins->inst_offset)) {
6246 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6248 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6249 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6253 if (method->save_lmf) {
6254 int lmf_offset, reg, sp_adj, regmask;
6255 /* all but r0-r3, sp and pc */
6256 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6259 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6261 /* This points to r4 inside MonoLMF->iregs */
6262 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6264 regmask = 0x9ff0; /* restore lr to pc */
6265 /* Skip caller saved registers not used by the method */
6266 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6267 regmask &= ~(1 << reg);
6272 /* Restored later */
6273 regmask &= ~(1 << ARMREG_PC);
6274 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6275 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6277 ARM_POP (code, regmask);
6279 /* Restore saved r7, restore LR to PC */
6280 /* Skip lr from the lmf */
6281 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6282 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6285 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6286 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6288 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6289 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6293 /* Restore saved gregs */
6294 if (cfg->used_int_regs)
6295 ARM_POP (code, cfg->used_int_regs);
6296 /* Restore saved r7, restore LR to PC */
6297 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6299 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6303 cfg->code_len = code - cfg->native_code;
6305 g_assert (cfg->code_len < cfg->code_size);
6310 mono_arch_emit_exceptions (MonoCompile *cfg)
6312 MonoJumpInfo *patch_info;
6315 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6316 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6317 int max_epilog_size = 50;
6319 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6320 exc_throw_pos [i] = NULL;
6321 exc_throw_found [i] = 0;
6324 /* count the number of exception infos */
6327 * make sure we have enough space for exceptions
6329 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6330 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6331 i = mini_exception_id_by_name (patch_info->data.target);
6332 if (!exc_throw_found [i]) {
6333 max_epilog_size += 32;
6334 exc_throw_found [i] = TRUE;
6339 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6340 cfg->code_size *= 2;
6341 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6342 cfg->stat_code_reallocs++;
6345 code = cfg->native_code + cfg->code_len;
6347 /* add code to raise exceptions */
6348 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6349 switch (patch_info->type) {
6350 case MONO_PATCH_INFO_EXC: {
6351 MonoClass *exc_class;
6352 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6354 i = mini_exception_id_by_name (patch_info->data.target);
6355 if (exc_throw_pos [i]) {
6356 arm_patch (ip, exc_throw_pos [i]);
6357 patch_info->type = MONO_PATCH_INFO_NONE;
6360 exc_throw_pos [i] = code;
6362 arm_patch (ip, code);
6364 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6365 g_assert (exc_class);
6367 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6368 #ifdef USE_JUMP_TABLES
6370 gpointer *jte = mono_jumptable_add_entries (2);
6371 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6372 patch_info->data.name = "mono_arch_throw_corlib_exception";
6373 patch_info->ip.i = code - cfg->native_code;
6374 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6375 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6376 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6377 ARM_BLX_REG (code, ARMREG_IP);
6378 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6381 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6382 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6383 patch_info->data.name = "mono_arch_throw_corlib_exception";
6384 patch_info->ip.i = code - cfg->native_code;
6386 *(guint32*)(gpointer)code = exc_class->type_token;
6397 cfg->code_len = code - cfg->native_code;
6399 g_assert (cfg->code_len < cfg->code_size);
6403 #endif /* #ifndef DISABLE_JIT */
6406 mono_arch_finish_init (void)
6411 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6416 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6423 mono_arch_print_tree (MonoInst *tree, int arity)
6433 mono_arch_get_patch_offset (guint8 *code)
6440 mono_arch_flush_register_windows (void)
6447 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6449 int method_reg = mono_alloc_ireg (cfg);
6450 #ifdef USE_JUMP_TABLES
6451 int use_jumptables = TRUE;
6453 int use_jumptables = FALSE;
6456 if (cfg->compile_aot) {
6459 call->dynamic_imt_arg = TRUE;
6462 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6464 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6465 ins->dreg = method_reg;
6466 ins->inst_p0 = call->method;
6467 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6468 MONO_ADD_INS (cfg->cbb, ins);
6470 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6471 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6472 /* Always pass in a register for simplicity */
6473 call->dynamic_imt_arg = TRUE;
6475 cfg->uses_rgctx_reg = TRUE;
6478 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6482 MONO_INST_NEW (cfg, ins, OP_PCONST);
6483 ins->inst_p0 = call->method;
6484 ins->dreg = method_reg;
6485 MONO_ADD_INS (cfg->cbb, ins);
6488 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6492 #endif /* DISABLE_JIT */
6495 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6497 #ifdef USE_JUMP_TABLES
6498 return (MonoMethod*)regs [ARMREG_V5];
6501 guint32 *code_ptr = (guint32*)code;
6503 method = GUINT_TO_POINTER (code_ptr [1]);
6507 return (MonoMethod*)regs [ARMREG_V5];
6509 /* The IMT value is stored in the code stream right after the LDC instruction. */
6510 /* This is no longer true for the gsharedvt_in trampoline */
6512 if (!IS_LDR_PC (code_ptr [0])) {
6513 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6514 g_assert (IS_LDR_PC (code_ptr [0]));
6518 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6519 return (MonoMethod*)regs [ARMREG_V5];
6521 return (MonoMethod*) method;
6526 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6528 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6531 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6532 #define BASE_SIZE (6 * 4)
6533 #define BSEARCH_ENTRY_SIZE (4 * 4)
6534 #define CMP_SIZE (3 * 4)
6535 #define BRANCH_SIZE (1 * 4)
6536 #define CALL_SIZE (2 * 4)
6537 #define WMC_SIZE (8 * 4)
6538 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6540 #ifdef USE_JUMP_TABLES
6542 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6544 g_assert (base [index] == NULL);
6545 base [index] = value;
6548 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6550 if (arm_is_imm12 (jti * 4)) {
6551 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6553 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6554 if ((jti * 4) >> 16)
6555 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6556 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6562 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6564 guint32 delta = DISTANCE (target, code);
6566 g_assert (delta >= 0 && delta <= 0xFFF);
6567 *target = *target | delta;
6573 #ifdef ENABLE_WRONG_METHOD_CHECK
6575 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6577 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6583 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6584 gpointer fail_tramp)
6587 arminstr_t *code, *start;
6588 #ifdef USE_JUMP_TABLES
6591 gboolean large_offsets = FALSE;
6592 guint32 **constant_pool_starts;
6593 arminstr_t *vtable_target = NULL;
6594 int extra_space = 0;
6596 #ifdef ENABLE_WRONG_METHOD_CHECK
6601 #ifdef USE_JUMP_TABLES
6602 for (i = 0; i < count; ++i) {
6603 MonoIMTCheckItem *item = imt_entries [i];
6604 item->chunk_size += 4 * 16;
6605 if (!item->is_equals)
6606 imt_entries [item->check_target_idx]->compare_done = TRUE;
6607 size += item->chunk_size;
6610 constant_pool_starts = g_new0 (guint32*, count);
6612 for (i = 0; i < count; ++i) {
6613 MonoIMTCheckItem *item = imt_entries [i];
6614 if (item->is_equals) {
6615 gboolean fail_case = !item->check_target_idx && fail_tramp;
6617 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6618 item->chunk_size += 32;
6619 large_offsets = TRUE;
6622 if (item->check_target_idx || fail_case) {
6623 if (!item->compare_done || fail_case)
6624 item->chunk_size += CMP_SIZE;
6625 item->chunk_size += BRANCH_SIZE;
6627 #ifdef ENABLE_WRONG_METHOD_CHECK
6628 item->chunk_size += WMC_SIZE;
6632 item->chunk_size += 16;
6633 large_offsets = TRUE;
6635 item->chunk_size += CALL_SIZE;
6637 item->chunk_size += BSEARCH_ENTRY_SIZE;
6638 imt_entries [item->check_target_idx]->compare_done = TRUE;
6640 size += item->chunk_size;
6644 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6648 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6650 code = mono_domain_code_reserve (domain, size);
6654 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6655 for (i = 0; i < count; ++i) {
6656 MonoIMTCheckItem *item = imt_entries [i];
6657 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6661 #ifdef USE_JUMP_TABLES
6662 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6663 /* If jumptables we always pass the IMT method in R5 */
6664 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6665 #define VTABLE_JTI 0
6666 #define IMT_METHOD_OFFSET 0
6667 #define TARGET_CODE_OFFSET 1
6668 #define JUMP_CODE_OFFSET 2
6669 #define RECORDS_PER_ENTRY 3
6670 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6671 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6672 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6674 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6675 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6676 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6677 set_jumptable_element (jte, VTABLE_JTI, vtable);
6680 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6682 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6683 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6684 vtable_target = code;
6685 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6687 if (mono_use_llvm) {
6688 /* LLVM always passes the IMT method in R5 */
6689 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6691 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6692 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6693 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6697 for (i = 0; i < count; ++i) {
6698 MonoIMTCheckItem *item = imt_entries [i];
6699 #ifdef USE_JUMP_TABLES
6700 guint32 imt_method_jti = 0, target_code_jti = 0;
6702 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6704 gint32 vtable_offset;
6706 item->code_target = (guint8*)code;
6708 if (item->is_equals) {
6709 gboolean fail_case = !item->check_target_idx && fail_tramp;
6711 if (item->check_target_idx || fail_case) {
6712 if (!item->compare_done || fail_case) {
6713 #ifdef USE_JUMP_TABLES
6714 imt_method_jti = IMT_METHOD_JTI (i);
6715 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6718 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6720 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6722 #ifdef USE_JUMP_TABLES
6723 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6724 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6725 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6727 item->jmp_code = (guint8*)code;
6728 ARM_B_COND (code, ARMCOND_NE, 0);
6731 /*Enable the commented code to assert on wrong method*/
6732 #ifdef ENABLE_WRONG_METHOD_CHECK
6733 #ifdef USE_JUMP_TABLES
6734 imt_method_jti = IMT_METHOD_JTI (i);
6735 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6738 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6740 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6742 ARM_B_COND (code, ARMCOND_EQ, 0);
6744 /* Define this if your system is so bad that gdb is failing. */
6745 #ifdef BROKEN_DEV_ENV
6746 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6748 arm_patch (code - 1, mini_dump_bad_imt);
6752 arm_patch (cond, code);
6756 if (item->has_target_code) {
6757 /* Load target address */
6758 #ifdef USE_JUMP_TABLES
6759 target_code_jti = TARGET_CODE_JTI (i);
6760 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6761 /* Restore registers */
6762 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6764 ARM_BX (code, ARMREG_R1);
6765 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6767 target_code_ins = code;
6768 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6769 /* Save it to the fourth slot */
6770 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6771 /* Restore registers and branch */
6772 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6774 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6777 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6778 if (!arm_is_imm12 (vtable_offset)) {
6780 * We need to branch to a computed address but we don't have
6781 * a free register to store it, since IP must contain the
6782 * vtable address. So we push the two values to the stack, and
6783 * load them both using LDM.
6785 /* Compute target address */
6786 #ifdef USE_JUMP_TABLES
6787 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6788 if (vtable_offset >> 16)
6789 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6790 /* IP had vtable base. */
6791 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6792 /* Restore registers and branch */
6793 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6794 ARM_BX (code, ARMREG_IP);
6796 vtable_offset_ins = code;
6797 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6798 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6799 /* Save it to the fourth slot */
6800 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6801 /* Restore registers and branch */
6802 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6804 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6807 #ifdef USE_JUMP_TABLES
6808 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6809 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6810 ARM_BX (code, ARMREG_IP);
6812 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6814 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6815 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6821 #ifdef USE_JUMP_TABLES
6822 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6823 target_code_jti = TARGET_CODE_JTI (i);
6824 /* Load target address */
6825 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6826 /* Restore registers */
6827 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6829 ARM_BX (code, ARMREG_R1);
6830 set_jumptable_element (jte, target_code_jti, fail_tramp);
6832 arm_patch (item->jmp_code, (guchar*)code);
6834 target_code_ins = code;
6835 /* Load target address */
6836 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6837 /* Save it to the fourth slot */
6838 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6839 /* Restore registers and branch */
6840 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6842 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6844 item->jmp_code = NULL;
6847 #ifdef USE_JUMP_TABLES
6849 set_jumptable_element (jte, imt_method_jti, item->key);
6852 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6854 /*must emit after unconditional branch*/
6855 if (vtable_target) {
6856 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6857 item->chunk_size += 4;
6858 vtable_target = NULL;
6861 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6862 constant_pool_starts [i] = code;
6864 code += extra_space;
6869 #ifdef USE_JUMP_TABLES
6870 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6871 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6872 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6873 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6874 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6876 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6877 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6879 item->jmp_code = (guint8*)code;
6880 ARM_B_COND (code, ARMCOND_HS, 0);
6886 for (i = 0; i < count; ++i) {
6887 MonoIMTCheckItem *item = imt_entries [i];
6888 if (item->jmp_code) {
6889 if (item->check_target_idx)
6890 #ifdef USE_JUMP_TABLES
6891 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6893 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6896 if (i > 0 && item->is_equals) {
6898 #ifdef USE_JUMP_TABLES
6899 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6900 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6902 arminstr_t *space_start = constant_pool_starts [i];
6903 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6904 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6912 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6913 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6918 #ifndef USE_JUMP_TABLES
6919 g_free (constant_pool_starts);
6922 mono_arch_flush_icache ((guint8*)start, size);
6923 mono_stats.imt_thunks_size += code - start;
6925 g_assert (DISTANCE (start, code) <= size);
6930 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6932 return ctx->regs [reg];
6936 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6938 ctx->regs [reg] = val;
6942 * mono_arch_get_trampolines:
6944 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6948 mono_arch_get_trampolines (gboolean aot)
6950 return mono_arm_get_exception_trampolines (aot);
6954 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
6961 bp = MONO_CONTEXT_GET_BP (ctx);
6962 lr_loc = (gpointer*)(bp + clause->exvar_offset);
6964 old_value = *lr_loc;
6965 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
6968 *lr_loc = new_value;
6973 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6975 * mono_arch_set_breakpoint:
6977 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6978 * The location should contain code emitted by OP_SEQ_POINT.
6981 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6984 guint32 native_offset = ip - (guint8*)ji->code_start;
6985 MonoDebugOptions *opt = mini_get_debug_options ();
6987 if (opt->soft_breakpoints) {
6988 g_assert (!ji->from_aot);
6990 ARM_BLX_REG (code, ARMREG_LR);
6991 mono_arch_flush_icache (code - 4, 4);
6992 } else if (ji->from_aot) {
6993 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6995 g_assert (native_offset % 4 == 0);
6996 g_assert (info->bp_addrs [native_offset / 4] == 0);
6997 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6999 int dreg = ARMREG_LR;
7001 /* Read from another trigger page */
7002 #ifdef USE_JUMP_TABLES
7003 gpointer *jte = mono_jumptable_add_entry ();
7004 code = mono_arm_load_jumptable_entry (code, jte, dreg);
7005 jte [0] = bp_trigger_page;
7007 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7009 *(int*)code = (int)bp_trigger_page;
7012 ARM_LDR_IMM (code, dreg, dreg, 0);
7014 mono_arch_flush_icache (code - 16, 16);
7017 /* This is currently implemented by emitting an SWI instruction, which
7018 * qemu/linux seems to convert to a SIGILL.
7020 *(int*)code = (0xef << 24) | 8;
7022 mono_arch_flush_icache (code - 4, 4);
7028 * mono_arch_clear_breakpoint:
7030 * Clear the breakpoint at IP.
7033 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7035 MonoDebugOptions *opt = mini_get_debug_options ();
7039 if (opt->soft_breakpoints) {
7040 g_assert (!ji->from_aot);
7043 mono_arch_flush_icache (code - 4, 4);
7044 } else if (ji->from_aot) {
7045 guint32 native_offset = ip - (guint8*)ji->code_start;
7046 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7048 g_assert (native_offset % 4 == 0);
7049 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7050 info->bp_addrs [native_offset / 4] = 0;
7052 for (i = 0; i < 4; ++i)
7055 mono_arch_flush_icache (ip, code - ip);
7060 * mono_arch_start_single_stepping:
7062 * Start single stepping.
7065 mono_arch_start_single_stepping (void)
7067 if (ss_trigger_page)
7068 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7074 * mono_arch_stop_single_stepping:
7076 * Stop single stepping.
7079 mono_arch_stop_single_stepping (void)
7081 if (ss_trigger_page)
7082 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7088 #define DBG_SIGNAL SIGBUS
7090 #define DBG_SIGNAL SIGSEGV
7094 * mono_arch_is_single_step_event:
7096 * Return whenever the machine state in SIGCTX corresponds to a single
7100 mono_arch_is_single_step_event (void *info, void *sigctx)
7102 siginfo_t *sinfo = info;
7104 if (!ss_trigger_page)
7107 /* Sometimes the address is off by 4 */
7108 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7115 * mono_arch_is_breakpoint_event:
7117 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7120 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7122 siginfo_t *sinfo = info;
7124 if (!ss_trigger_page)
7127 if (sinfo->si_signo == DBG_SIGNAL) {
7128 /* Sometimes the address is off by 4 */
7129 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7139 * mono_arch_skip_breakpoint:
7141 * See mini-amd64.c for docs.
7144 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7146 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7150 * mono_arch_skip_single_step:
7152 * See mini-amd64.c for docs.
7155 mono_arch_skip_single_step (MonoContext *ctx)
7157 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7160 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7163 * mono_arch_get_seq_point_info:
7165 * See mini-amd64.c for docs.
7168 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7173 // FIXME: Add a free function
7175 mono_domain_lock (domain);
7176 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7178 mono_domain_unlock (domain);
7181 ji = mono_jit_info_table_find (domain, (char*)code);
7184 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7186 info->ss_trigger_page = ss_trigger_page;
7187 info->bp_trigger_page = bp_trigger_page;
7189 mono_domain_lock (domain);
7190 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7192 mono_domain_unlock (domain);
7199 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7201 ext->lmf.previous_lmf = prev_lmf;
7202 /* Mark that this is a MonoLMFExt */
7203 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7204 ext->lmf.sp = (gssize)ext;
7208 * mono_arch_set_target:
7210 * Set the target architecture the JIT backend should generate code for, in the form
7211 * of a GNU target triplet. Only used in AOT mode.
7214 mono_arch_set_target (char *mtriple)
7216 /* The GNU target triple format is not very well documented */
7217 if (strstr (mtriple, "armv7")) {
7218 v5_supported = TRUE;
7219 v6_supported = TRUE;
7220 v7_supported = TRUE;
7222 if (strstr (mtriple, "armv6")) {
7223 v5_supported = TRUE;
7224 v6_supported = TRUE;
7226 if (strstr (mtriple, "armv7s")) {
7227 v7s_supported = TRUE;
7229 if (strstr (mtriple, "thumbv7s")) {
7230 v5_supported = TRUE;
7231 v6_supported = TRUE;
7232 v7_supported = TRUE;
7233 v7s_supported = TRUE;
7234 thumb_supported = TRUE;
7235 thumb2_supported = TRUE;
7237 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7238 v5_supported = TRUE;
7239 v6_supported = TRUE;
7240 thumb_supported = TRUE;
7243 if (strstr (mtriple, "gnueabi"))
7244 eabi_supported = TRUE;
7248 mono_arch_opcode_supported (int opcode)
7251 case OP_ATOMIC_ADD_I4:
7252 case OP_ATOMIC_EXCHANGE_I4:
7253 case OP_ATOMIC_CAS_I4:
7254 return v7_supported;
7260 #if defined(ENABLE_GSHAREDVT)
7262 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7264 #endif /* !MONOTOUCH */