2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
251 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
252 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
254 code = mono_arm_emit_load_imm (code, dreg, imm);
255 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
260 /* If dreg == sreg, this clobbers IP */
262 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
264 int imm8, rot_amount;
265 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
266 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
270 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
271 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
273 code = mono_arm_emit_load_imm (code, dreg, imm);
274 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
280 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
282 /* we can use r0-r3, since this is called only for incoming args on the stack */
283 if (size > sizeof (gpointer) * 4) {
285 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
286 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
287 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
288 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
289 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
290 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
291 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
292 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
293 ARM_B_COND (code, ARMCOND_NE, 0);
294 arm_patch (code - 4, start_loop);
297 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
298 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
300 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
301 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
307 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
308 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
309 doffset = soffset = 0;
311 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
312 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
318 g_assert (size == 0);
323 emit_call_reg (guint8 *code, int reg)
326 ARM_BLX_REG (code, reg);
328 #ifdef USE_JUMP_TABLES
329 g_assert_not_reached ();
331 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
335 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
341 emit_call_seq (MonoCompile *cfg, guint8 *code)
343 #ifdef USE_JUMP_TABLES
344 code = mono_arm_patchable_bl (code, ARMCOND_AL);
346 if (cfg->method->dynamic) {
347 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
349 *(gpointer*)code = NULL;
351 code = emit_call_reg (code, ARMREG_IP);
360 mono_arm_patchable_b (guint8 *code, int cond)
362 #ifdef USE_JUMP_TABLES
365 jte = mono_jumptable_add_entry ();
366 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
367 ARM_BX_COND (code, cond, ARMREG_IP);
369 ARM_B_COND (code, cond, 0);
375 mono_arm_patchable_bl (guint8 *code, int cond)
377 #ifdef USE_JUMP_TABLES
380 jte = mono_jumptable_add_entry ();
381 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
382 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
384 ARM_BL_COND (code, cond, 0);
389 #ifdef USE_JUMP_TABLES
391 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
393 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
394 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
399 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
401 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
402 ARM_LDR_IMM (code, reg, reg, 0);
408 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
410 switch (ins->opcode) {
413 case OP_FCALL_MEMBASE:
415 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
416 if (sig_ret->type == MONO_TYPE_R4) {
418 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
420 ARM_FMSR (code, ins->dreg, ARMREG_R0);
421 ARM_CVTS (code, ins->dreg, ins->dreg);
425 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
427 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
440 * Emit code to push an LMF structure on the LMF stack.
441 * On arm, this is intermixed with the initialization of other fields of the structure.
444 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
446 gboolean get_lmf_fast = FALSE;
449 #ifdef HAVE_AEABI_READ_TP
450 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
452 if (lmf_addr_tls_offset != -1) {
455 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
456 (gpointer)"__aeabi_read_tp");
457 code = emit_call_seq (cfg, code);
459 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
465 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
468 /* Inline mono_get_lmf_addr () */
469 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
471 /* Load mono_jit_tls_id */
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
474 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
476 *(gpointer*)code = NULL;
478 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
479 /* call pthread_getspecific () */
480 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
481 (gpointer)"pthread_getspecific");
482 code = emit_call_seq (cfg, code);
483 /* lmf_addr = &jit_tls->lmf */
484 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
485 g_assert (arm_is_imm8 (lmf_offset));
486 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
493 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
494 (gpointer)"mono_get_lmf_addr");
495 code = emit_call_seq (cfg, code);
497 /* we build the MonoLMF structure on the stack - see mini-arm.h */
498 /* lmf_offset is the offset from the previous stack pointer,
499 * alloc_size is the total stack space allocated, so the offset
500 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
501 * The pointer to the struct is put in r1 (new_lmf).
502 * ip is used as scratch
503 * The callee-saved registers are already in the MonoLMF structure
505 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
506 /* r0 is the result from mono_get_lmf_addr () */
507 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
508 /* new_lmf->previous_lmf = *lmf_addr */
509 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
510 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
511 /* *(lmf_addr) = r1 */
512 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
513 /* Skip method (only needed for trampoline LMF frames) */
514 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
515 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
516 /* save the current IP */
517 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
518 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
520 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
521 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
532 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
536 for (list = inst->float_args; list; list = list->next) {
537 FloatArgData *fad = list->data;
538 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
539 gboolean imm = arm_is_fpimm8 (var->inst_offset);
541 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
547 if (*offset + *max_len > cfg->code_size) {
548 cfg->code_size += *max_len;
549 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
551 code = cfg->native_code + *offset;
555 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
556 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
558 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
560 *offset = code - cfg->native_code;
567 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FSTD (code, reg, ARMREG_LR, 0);
580 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
587 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
591 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
593 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
596 if (!arm_is_fpimm8 (inst->inst_offset)) {
597 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
598 ARM_FLDD (code, reg, ARMREG_LR, 0);
600 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
609 * Emit code to pop an LMF structure from the LMF stack.
612 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
616 if (lmf_offset < 32) {
617 basereg = cfg->frame_reg;
622 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
625 /* ip = previous_lmf */
626 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
628 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
629 /* *(lmf_addr) = previous_lmf */
630 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
635 #endif /* #ifndef DISABLE_JIT */
638 * mono_arch_get_argument_info:
639 * @csig: a method signature
640 * @param_count: the number of parameters to consider
641 * @arg_info: an array to store the result infos
643 * Gathers information on parameters such as size, alignment and
644 * padding. arg_info should be large enought to hold param_count + 1 entries.
646 * Returns the size of the activation frame.
649 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
651 int k, frame_size = 0;
652 guint32 size, align, pad;
656 t = mini_type_get_underlying_type (gsctx, csig->ret);
657 if (MONO_TYPE_ISSTRUCT (t)) {
658 frame_size += sizeof (gpointer);
662 arg_info [0].offset = offset;
665 frame_size += sizeof (gpointer);
669 arg_info [0].size = frame_size;
671 for (k = 0; k < param_count; k++) {
672 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
674 /* ignore alignment for now */
677 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
678 arg_info [k].pad = pad;
680 arg_info [k + 1].pad = 0;
681 arg_info [k + 1].size = size;
683 arg_info [k + 1].offset = offset;
687 align = MONO_ARCH_FRAME_ALIGNMENT;
688 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
689 arg_info [k].pad = pad;
694 #define MAX_ARCH_DELEGATE_PARAMS 3
697 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
699 guint8 *code, *start;
702 start = code = mono_global_codeman_reserve (12);
704 /* Replace the this argument with the target */
705 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
706 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
707 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
709 g_assert ((code - start) <= 12);
711 mono_arch_flush_icache (start, 12);
715 size = 8 + param_count * 4;
716 start = code = mono_global_codeman_reserve (size);
718 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
719 /* slide down the arguments */
720 for (i = 0; i < param_count; ++i) {
721 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
723 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
725 g_assert ((code - start) <= size);
727 mono_arch_flush_icache (start, size);
731 *code_size = code - start;
737 * mono_arch_get_delegate_invoke_impls:
739 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
743 mono_arch_get_delegate_invoke_impls (void)
751 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
752 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
754 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
755 code = get_delegate_invoke_impl (FALSE, i, &code_len);
756 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
757 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
765 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
767 guint8 *code, *start;
770 /* FIXME: Support more cases */
771 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
772 if (MONO_TYPE_ISSTRUCT (sig_ret))
776 static guint8* cached = NULL;
777 mono_mini_arch_lock ();
779 mono_mini_arch_unlock ();
784 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
786 start = get_delegate_invoke_impl (TRUE, 0, NULL);
788 mono_mini_arch_unlock ();
791 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
794 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
796 for (i = 0; i < sig->param_count; ++i)
797 if (!mono_is_regsize_var (sig->params [i]))
800 mono_mini_arch_lock ();
801 code = cache [sig->param_count];
803 mono_mini_arch_unlock ();
808 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
809 start = mono_aot_get_trampoline (name);
812 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
814 cache [sig->param_count] = start;
815 mono_mini_arch_unlock ();
823 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
829 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
831 return (gpointer)regs [ARMREG_R0];
835 * Initialize the cpu to execute managed code.
838 mono_arch_cpu_init (void)
840 i8_align = MONO_ABI_ALIGNOF (gint64);
841 #ifdef MONO_CROSS_COMPILE
842 /* Need to set the alignment of i8 since it can different on the target */
843 #ifdef TARGET_ANDROID
845 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
851 create_function_wrapper (gpointer function)
853 guint8 *start, *code;
855 start = code = mono_global_codeman_reserve (96);
858 * Construct the MonoContext structure on the stack.
861 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
863 /* save ip, lr and pc into their correspodings ctx.regs slots. */
864 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
865 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
866 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
868 /* save r0..r10 and fp */
869 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
870 ARM_STM (code, ARMREG_IP, 0x0fff);
872 /* now we can update fp. */
873 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
875 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
876 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
877 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
878 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
880 /* make ctx.eip hold the address of the call. */
881 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
882 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
884 /* r0 now points to the MonoContext */
885 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
888 #ifdef USE_JUMP_TABLES
890 gpointer *jte = mono_jumptable_add_entry ();
891 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
895 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
897 *(gpointer*)code = function;
900 ARM_BLX_REG (code, ARMREG_IP);
902 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
903 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
904 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
905 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
907 /* make ip point to the regs array, then restore everything, including pc. */
908 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
909 ARM_LDM (code, ARMREG_IP, 0xffff);
911 mono_arch_flush_icache (start, code - start);
917 * Initialize architecture specific code.
920 mono_arch_init (void)
922 const char *cpu_arch;
924 mono_mutex_init_recursive (&mini_arch_mutex);
925 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
926 if (mini_get_debug_options ()->soft_breakpoints) {
927 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
928 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
933 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
934 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
935 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
938 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
939 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
940 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
941 #if defined(ENABLE_GSHAREDVT)
942 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
945 #if defined(__ARM_EABI__)
946 eabi_supported = TRUE;
949 #if defined(ARM_FPU_VFP_HARD)
950 arm_fpu = MONO_ARM_FPU_VFP_HARD;
952 arm_fpu = MONO_ARM_FPU_VFP;
954 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
955 /* If we're compiling with a soft float fallback and it
956 turns out that no VFP unit is available, we need to
957 switch to soft float. We don't do this for iOS, since
958 iOS devices always have a VFP unit. */
959 if (!mono_hwcap_arm_has_vfp)
960 arm_fpu = MONO_ARM_FPU_NONE;
964 v5_supported = mono_hwcap_arm_is_v5;
965 v6_supported = mono_hwcap_arm_is_v6;
966 v7_supported = mono_hwcap_arm_is_v7;
967 v7s_supported = mono_hwcap_arm_is_v7s;
969 #if defined(__APPLE__)
970 /* iOS is special-cased here because we don't yet
971 have a way to properly detect CPU features on it. */
972 thumb_supported = TRUE;
975 thumb_supported = mono_hwcap_arm_has_thumb;
976 thumb2_supported = mono_hwcap_arm_has_thumb2;
979 /* Format: armv(5|6|7[s])[-thumb[2]] */
980 cpu_arch = g_getenv ("MONO_CPU_ARCH");
982 /* Do this here so it overrides any detection. */
984 if (strncmp (cpu_arch, "armv", 4) == 0) {
985 v5_supported = cpu_arch [4] >= '5';
986 v6_supported = cpu_arch [4] >= '6';
987 v7_supported = cpu_arch [4] >= '7';
988 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
991 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
992 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
997 * Cleanup architecture specific code.
1000 mono_arch_cleanup (void)
1005 * This function returns the optimizations supported on this cpu.
1008 mono_arch_cpu_optimizations (guint32 *exclude_mask)
1010 /* no arm-specific optimizations yet */
1016 * This function test for all SIMD functions supported.
1018 * Returns a bitmask corresponding to all supported versions.
1022 mono_arch_cpu_enumerate_simd_versions (void)
1024 /* SIMD is currently unimplemented */
1032 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1034 if (v7s_supported) {
1048 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1050 mono_arch_is_soft_float (void)
1052 return arm_fpu == MONO_ARM_FPU_NONE;
1057 mono_arm_is_hard_float (void)
1059 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1063 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1066 t = mini_type_get_underlying_type (gsctx, t);
1073 case MONO_TYPE_FNPTR:
1075 case MONO_TYPE_OBJECT:
1076 case MONO_TYPE_STRING:
1077 case MONO_TYPE_CLASS:
1078 case MONO_TYPE_SZARRAY:
1079 case MONO_TYPE_ARRAY:
1081 case MONO_TYPE_GENERICINST:
1082 if (!mono_type_generic_inst_is_valuetype (t))
1085 case MONO_TYPE_VALUETYPE:
1092 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1097 for (i = 0; i < cfg->num_varinfo; i++) {
1098 MonoInst *ins = cfg->varinfo [i];
1099 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1102 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1105 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1108 /* we can only allocate 32 bit values */
1109 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1110 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1111 g_assert (i == vmv->idx);
1112 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1119 #define USE_EXTRA_TEMPS 0
1122 mono_arch_get_global_int_regs (MonoCompile *cfg)
1126 mono_arch_compute_omit_fp (cfg);
1129 * FIXME: Interface calls might go through a static rgctx trampoline which
1130 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1133 if (cfg->flags & MONO_CFG_HAS_CALLS)
1134 cfg->uses_rgctx_reg = TRUE;
1136 if (cfg->arch.omit_fp)
1137 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1138 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1139 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1140 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1142 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1143 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1145 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1146 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1147 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1148 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1149 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1150 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1156 * mono_arch_regalloc_cost:
1158 * Return the cost, in number of memory references, of the action of
1159 * allocating the variable VMV into a register during global register
1163 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1169 #endif /* #ifndef DISABLE_JIT */
1171 #ifndef __GNUC_PREREQ
1172 #define __GNUC_PREREQ(maj, min) (0)
1176 mono_arch_flush_icache (guint8 *code, gint size)
1178 #if defined(__native_client__)
1179 // For Native Client we don't have to flush i-cache here,
1180 // as it's being done by dyncode interface.
1183 #ifdef MONO_CROSS_COMPILE
1185 sys_icache_invalidate (code, size);
1186 #elif __GNUC_PREREQ(4, 3)
1187 __builtin___clear_cache (code, code + size);
1188 #elif __GNUC_PREREQ(4, 1)
1189 __clear_cache (code, code + size);
1190 #elif defined(PLATFORM_ANDROID)
1191 const int syscall = 0xf0002;
1199 : "r" (code), "r" (code + size), "r" (syscall)
1200 : "r0", "r1", "r7", "r2"
1203 __asm __volatile ("mov r0, %0\n"
1206 "swi 0x9f0002 @ sys_cacheflush"
1208 : "r" (code), "r" (code + size), "r" (0)
1209 : "r0", "r1", "r3" );
1211 #endif /* !__native_client__ */
1222 RegTypeStructByAddr,
1223 /* gsharedvt argument passed by addr in greg */
1224 RegTypeGSharedVtInReg,
1225 /* gsharedvt argument passed by addr on stack */
1226 RegTypeGSharedVtOnStack,
1231 guint16 vtsize; /* in param area */
1235 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1240 guint32 stack_usage;
1241 gboolean vtype_retaddr;
1242 /* The index of the vret arg in the argument list */
1252 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1255 if (*gr > ARMREG_R3) {
1257 ainfo->offset = *stack_size;
1258 ainfo->reg = ARMREG_SP; /* in the caller */
1259 ainfo->storage = RegTypeBase;
1262 ainfo->storage = RegTypeGeneral;
1269 split = i8_align == 4;
1274 if (*gr == ARMREG_R3 && split) {
1275 /* first word in r3 and the second on the stack */
1276 ainfo->offset = *stack_size;
1277 ainfo->reg = ARMREG_SP; /* in the caller */
1278 ainfo->storage = RegTypeBaseGen;
1280 } else if (*gr >= ARMREG_R3) {
1281 if (eabi_supported) {
1282 /* darwin aligns longs to 4 byte only */
1283 if (i8_align == 8) {
1288 ainfo->offset = *stack_size;
1289 ainfo->reg = ARMREG_SP; /* in the caller */
1290 ainfo->storage = RegTypeBase;
1293 if (eabi_supported) {
1294 if (i8_align == 8 && ((*gr) & 1))
1297 ainfo->storage = RegTypeIRegPair;
1306 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1309 * If we're calling a function like this:
1311 * void foo(float a, double b, float c)
1313 * We pass a in s0 and b in d1. That leaves us
1314 * with s1 being unused. The armhf ABI recognizes
1315 * this and requires register assignment to then
1316 * use that for the next single-precision arg,
1317 * i.e. c in this example. So float_spare either
1318 * tells us which reg to use for the next single-
1319 * precision arg, or it's -1, meaning use *fpr.
1321 * Note that even though most of the JIT speaks
1322 * double-precision, fpr represents single-
1323 * precision registers.
1325 * See parts 5.5 and 6.1.2 of the AAPCS for how
1329 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1330 ainfo->storage = RegTypeFP;
1334 * If we're passing a double-precision value
1335 * and *fpr is odd (e.g. it's s1, s3, ...)
1336 * we need to use the next even register. So
1337 * we mark the current *fpr as a spare that
1338 * can be used for the next single-precision
1342 *float_spare = *fpr;
1347 * At this point, we have an even register
1348 * so we assign that and move along.
1352 } else if (*float_spare >= 0) {
1354 * We're passing a single-precision value
1355 * and it looks like a spare single-
1356 * precision register is available. Let's
1360 ainfo->reg = *float_spare;
1364 * If we hit this branch, we're passing a
1365 * single-precision value and we can simply
1366 * use the next available register.
1374 * We've exhausted available floating point
1375 * regs, so pass the rest on the stack.
1383 ainfo->offset = *stack_size;
1384 ainfo->reg = ARMREG_SP;
1385 ainfo->storage = RegTypeBase;
1392 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1394 guint i, gr, fpr, pstart;
1396 int n = sig->hasthis + sig->param_count;
1397 MonoType *simpletype;
1398 guint32 stack_size = 0;
1400 gboolean is_pinvoke = sig->pinvoke;
1404 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1406 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1413 t = mini_type_get_underlying_type (gsctx, sig->ret);
1414 if (MONO_TYPE_ISSTRUCT (t)) {
1417 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1418 cinfo->ret.storage = RegTypeStructByVal;
1420 cinfo->vtype_retaddr = TRUE;
1422 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1423 cinfo->vtype_retaddr = TRUE;
1429 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1430 * the first argument, allowing 'this' to be always passed in the first arg reg.
1431 * Also do this if the first argument is a reference type, since virtual calls
1432 * are sometimes made using calli without sig->hasthis set, like in the delegate
1435 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1437 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1439 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1443 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1444 cinfo->vret_arg_index = 1;
1448 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1452 if (cinfo->vtype_retaddr)
1453 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1456 DEBUG(printf("params: %d\n", sig->param_count));
1457 for (i = pstart; i < sig->param_count; ++i) {
1458 ArgInfo *ainfo = &cinfo->args [n];
1460 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1461 /* Prevent implicit arguments and sig_cookie from
1462 being passed in registers */
1465 /* Emit the signature cookie just before the implicit arguments */
1466 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1468 DEBUG(printf("param %d: ", i));
1469 if (sig->params [i]->byref) {
1470 DEBUG(printf("byref\n"));
1471 add_general (&gr, &stack_size, ainfo, TRUE);
1475 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1476 switch (simpletype->type) {
1477 case MONO_TYPE_BOOLEAN:
1480 cinfo->args [n].size = 1;
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_CHAR:
1487 cinfo->args [n].size = 2;
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1493 cinfo->args [n].size = 4;
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1500 case MONO_TYPE_FNPTR:
1501 case MONO_TYPE_CLASS:
1502 case MONO_TYPE_OBJECT:
1503 case MONO_TYPE_STRING:
1504 case MONO_TYPE_SZARRAY:
1505 case MONO_TYPE_ARRAY:
1506 cinfo->args [n].size = sizeof (gpointer);
1507 add_general (&gr, &stack_size, ainfo, TRUE);
1510 case MONO_TYPE_GENERICINST:
1511 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1512 cinfo->args [n].size = sizeof (gpointer);
1513 add_general (&gr, &stack_size, ainfo, TRUE);
1517 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1518 /* gsharedvt arguments are passed by ref */
1519 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1520 add_general (&gr, &stack_size, ainfo, TRUE);
1521 switch (ainfo->storage) {
1522 case RegTypeGeneral:
1523 ainfo->storage = RegTypeGSharedVtInReg;
1526 ainfo->storage = RegTypeGSharedVtOnStack;
1529 g_assert_not_reached ();
1535 case MONO_TYPE_TYPEDBYREF:
1536 case MONO_TYPE_VALUETYPE: {
1542 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1543 size = sizeof (MonoTypedRef);
1544 align = sizeof (gpointer);
1546 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1548 size = mono_class_native_size (klass, &align);
1550 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1552 DEBUG(printf ("load %d bytes struct\n", size));
1555 align_size += (sizeof (gpointer) - 1);
1556 align_size &= ~(sizeof (gpointer) - 1);
1557 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1558 ainfo->storage = RegTypeStructByVal;
1559 ainfo->struct_size = size;
1560 /* FIXME: align stack_size if needed */
1561 if (eabi_supported) {
1562 if (align >= 8 && (gr & 1))
1565 if (gr > ARMREG_R3) {
1567 ainfo->vtsize = nwords;
1569 int rest = ARMREG_R3 - gr + 1;
1570 int n_in_regs = rest >= nwords? nwords: rest;
1572 ainfo->size = n_in_regs;
1573 ainfo->vtsize = nwords - n_in_regs;
1576 nwords -= n_in_regs;
1578 if (sig->call_convention == MONO_CALL_VARARG)
1579 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1580 stack_size = ALIGN_TO (stack_size, align);
1581 ainfo->offset = stack_size;
1582 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1583 stack_size += nwords * sizeof (gpointer);
1590 add_general (&gr, &stack_size, ainfo, FALSE);
1597 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1599 add_general (&gr, &stack_size, ainfo, TRUE);
1607 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1609 add_general (&gr, &stack_size, ainfo, FALSE);
1614 case MONO_TYPE_MVAR:
1615 /* gsharedvt arguments are passed by ref */
1616 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1617 add_general (&gr, &stack_size, ainfo, TRUE);
1618 switch (ainfo->storage) {
1619 case RegTypeGeneral:
1620 ainfo->storage = RegTypeGSharedVtInReg;
1623 ainfo->storage = RegTypeGSharedVtOnStack;
1626 g_assert_not_reached ();
1631 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1635 /* Handle the case where there are no implicit arguments */
1636 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1637 /* Prevent implicit arguments and sig_cookie from
1638 being passed in registers */
1641 /* Emit the signature cookie just before the implicit arguments */
1642 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1646 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1647 switch (simpletype->type) {
1648 case MONO_TYPE_BOOLEAN:
1653 case MONO_TYPE_CHAR:
1659 case MONO_TYPE_FNPTR:
1660 case MONO_TYPE_CLASS:
1661 case MONO_TYPE_OBJECT:
1662 case MONO_TYPE_SZARRAY:
1663 case MONO_TYPE_ARRAY:
1664 case MONO_TYPE_STRING:
1665 cinfo->ret.storage = RegTypeGeneral;
1666 cinfo->ret.reg = ARMREG_R0;
1670 cinfo->ret.storage = RegTypeIRegPair;
1671 cinfo->ret.reg = ARMREG_R0;
1675 cinfo->ret.storage = RegTypeFP;
1677 if (IS_HARD_FLOAT) {
1678 cinfo->ret.reg = ARM_VFP_F0;
1680 cinfo->ret.reg = ARMREG_R0;
1684 case MONO_TYPE_GENERICINST:
1685 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1686 cinfo->ret.storage = RegTypeGeneral;
1687 cinfo->ret.reg = ARMREG_R0;
1690 // FIXME: Only for variable types
1691 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1692 cinfo->ret.storage = RegTypeStructByAddr;
1693 g_assert (cinfo->vtype_retaddr);
1697 case MONO_TYPE_VALUETYPE:
1698 case MONO_TYPE_TYPEDBYREF:
1699 if (cinfo->ret.storage != RegTypeStructByVal)
1700 cinfo->ret.storage = RegTypeStructByAddr;
1703 case MONO_TYPE_MVAR:
1704 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1705 cinfo->ret.storage = RegTypeStructByAddr;
1706 g_assert (cinfo->vtype_retaddr);
1708 case MONO_TYPE_VOID:
1711 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1715 /* align stack size to 8 */
1716 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1717 stack_size = (stack_size + 7) & ~7;
1719 cinfo->stack_usage = stack_size;
1725 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1727 MonoType *callee_ret;
1731 if (cfg->compile_aot && !cfg->full_aot)
1732 /* OP_TAILCALL doesn't work with AOT */
1735 c1 = get_call_info (NULL, NULL, caller_sig);
1736 c2 = get_call_info (NULL, NULL, callee_sig);
1739 * Tail calls with more callee stack usage than the caller cannot be supported, since
1740 * the extra stack space would be left on the stack after the tail call.
1742 res = c1->stack_usage >= c2->stack_usage;
1743 callee_ret = mini_replace_type (callee_sig->ret);
1744 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1745 /* An address on the callee's stack is passed as the first argument */
1748 if (c2->stack_usage > 16 * 4)
1760 debug_omit_fp (void)
1763 return mono_debug_count ();
1770 * mono_arch_compute_omit_fp:
1772 * Determine whenever the frame pointer can be eliminated.
1775 mono_arch_compute_omit_fp (MonoCompile *cfg)
1777 MonoMethodSignature *sig;
1778 MonoMethodHeader *header;
1782 if (cfg->arch.omit_fp_computed)
1785 header = cfg->header;
1787 sig = mono_method_signature (cfg->method);
1789 if (!cfg->arch.cinfo)
1790 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1791 cinfo = cfg->arch.cinfo;
1794 * FIXME: Remove some of the restrictions.
1796 cfg->arch.omit_fp = TRUE;
1797 cfg->arch.omit_fp_computed = TRUE;
1799 if (cfg->disable_omit_fp)
1800 cfg->arch.omit_fp = FALSE;
1801 if (!debug_omit_fp ())
1802 cfg->arch.omit_fp = FALSE;
1804 if (cfg->method->save_lmf)
1805 cfg->arch.omit_fp = FALSE;
1807 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1808 cfg->arch.omit_fp = FALSE;
1809 if (header->num_clauses)
1810 cfg->arch.omit_fp = FALSE;
1811 if (cfg->param_area)
1812 cfg->arch.omit_fp = FALSE;
1813 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1814 cfg->arch.omit_fp = FALSE;
1815 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1816 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1817 cfg->arch.omit_fp = FALSE;
1818 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1819 ArgInfo *ainfo = &cinfo->args [i];
1821 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1823 * The stack offset can only be determined when the frame
1826 cfg->arch.omit_fp = FALSE;
1831 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1832 MonoInst *ins = cfg->varinfo [i];
1835 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1840 * Set var information according to the calling convention. arm version.
1841 * The locals var stuff should most likely be split in another method.
1844 mono_arch_allocate_vars (MonoCompile *cfg)
1846 MonoMethodSignature *sig;
1847 MonoMethodHeader *header;
1850 int i, offset, size, align, curinst;
1854 sig = mono_method_signature (cfg->method);
1856 if (!cfg->arch.cinfo)
1857 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1858 cinfo = cfg->arch.cinfo;
1859 sig_ret = mini_replace_type (sig->ret);
1861 mono_arch_compute_omit_fp (cfg);
1863 if (cfg->arch.omit_fp)
1864 cfg->frame_reg = ARMREG_SP;
1866 cfg->frame_reg = ARMREG_FP;
1868 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1870 /* allow room for the vararg method args: void* and long/double */
1871 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1872 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1874 header = cfg->header;
1876 /* See mono_arch_get_global_int_regs () */
1877 if (cfg->flags & MONO_CFG_HAS_CALLS)
1878 cfg->uses_rgctx_reg = TRUE;
1880 if (cfg->frame_reg != ARMREG_SP)
1881 cfg->used_int_regs |= 1 << cfg->frame_reg;
1883 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1884 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1885 cfg->used_int_regs |= (1 << ARMREG_V5);
1889 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1890 if (sig_ret->type != MONO_TYPE_VOID) {
1891 cfg->ret->opcode = OP_REGVAR;
1892 cfg->ret->inst_c0 = ARMREG_R0;
1895 /* local vars are at a positive offset from the stack pointer */
1897 * also note that if the function uses alloca, we use FP
1898 * to point at the local variables.
1900 offset = 0; /* linkage area */
1901 /* align the offset to 16 bytes: not sure this is needed here */
1903 //offset &= ~(8 - 1);
1905 /* add parameter area size for called functions */
1906 offset += cfg->param_area;
1909 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1912 /* allow room to save the return value */
1913 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1916 /* the MonoLMF structure is stored just below the stack pointer */
1917 if (cinfo->ret.storage == RegTypeStructByVal) {
1918 cfg->ret->opcode = OP_REGOFFSET;
1919 cfg->ret->inst_basereg = cfg->frame_reg;
1920 offset += sizeof (gpointer) - 1;
1921 offset &= ~(sizeof (gpointer) - 1);
1922 cfg->ret->inst_offset = - offset;
1923 offset += sizeof(gpointer);
1924 } else if (cinfo->vtype_retaddr) {
1925 ins = cfg->vret_addr;
1926 offset += sizeof(gpointer) - 1;
1927 offset &= ~(sizeof(gpointer) - 1);
1928 ins->inst_offset = offset;
1929 ins->opcode = OP_REGOFFSET;
1930 ins->inst_basereg = cfg->frame_reg;
1931 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1932 printf ("vret_addr =");
1933 mono_print_ins (cfg->vret_addr);
1935 offset += sizeof(gpointer);
1938 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1939 if (cfg->arch.seq_point_info_var) {
1942 ins = cfg->arch.seq_point_info_var;
1946 offset += align - 1;
1947 offset &= ~(align - 1);
1948 ins->opcode = OP_REGOFFSET;
1949 ins->inst_basereg = cfg->frame_reg;
1950 ins->inst_offset = offset;
1953 ins = cfg->arch.ss_trigger_page_var;
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->opcode = OP_REGOFFSET;
1959 ins->inst_basereg = cfg->frame_reg;
1960 ins->inst_offset = offset;
1964 if (cfg->arch.seq_point_read_var) {
1967 ins = cfg->arch.seq_point_read_var;
1971 offset += align - 1;
1972 offset &= ~(align - 1);
1973 ins->opcode = OP_REGOFFSET;
1974 ins->inst_basereg = cfg->frame_reg;
1975 ins->inst_offset = offset;
1978 ins = cfg->arch.seq_point_ss_method_var;
1981 offset += align - 1;
1982 offset &= ~(align - 1);
1983 ins->opcode = OP_REGOFFSET;
1984 ins->inst_basereg = cfg->frame_reg;
1985 ins->inst_offset = offset;
1988 ins = cfg->arch.seq_point_bp_method_var;
1991 offset += align - 1;
1992 offset &= ~(align - 1);
1993 ins->opcode = OP_REGOFFSET;
1994 ins->inst_basereg = cfg->frame_reg;
1995 ins->inst_offset = offset;
1999 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2000 /* Allocate a temporary used by the atomic ops */
2004 /* Allocate a local slot to hold the sig cookie address */
2005 offset += align - 1;
2006 offset &= ~(align - 1);
2007 cfg->arch.atomic_tmp_offset = offset;
2010 cfg->arch.atomic_tmp_offset = -1;
2013 cfg->locals_min_stack_offset = offset;
2015 curinst = cfg->locals_start;
2016 for (i = curinst; i < cfg->num_varinfo; ++i) {
2019 ins = cfg->varinfo [i];
2020 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2023 t = ins->inst_vtype;
2024 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2027 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2028 * pinvoke wrappers when they call functions returning structure */
2029 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2030 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2034 size = mono_type_size (t, &align);
2036 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2037 * since it loads/stores misaligned words, which don't do the right thing.
2039 if (align < 4 && size >= 4)
2041 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2042 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2043 offset += align - 1;
2044 offset &= ~(align - 1);
2045 ins->opcode = OP_REGOFFSET;
2046 ins->inst_offset = offset;
2047 ins->inst_basereg = cfg->frame_reg;
2049 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2052 cfg->locals_max_stack_offset = offset;
2056 ins = cfg->args [curinst];
2057 if (ins->opcode != OP_REGVAR) {
2058 ins->opcode = OP_REGOFFSET;
2059 ins->inst_basereg = cfg->frame_reg;
2060 offset += sizeof (gpointer) - 1;
2061 offset &= ~(sizeof (gpointer) - 1);
2062 ins->inst_offset = offset;
2063 offset += sizeof (gpointer);
2068 if (sig->call_convention == MONO_CALL_VARARG) {
2072 /* Allocate a local slot to hold the sig cookie address */
2073 offset += align - 1;
2074 offset &= ~(align - 1);
2075 cfg->sig_cookie = offset;
2079 for (i = 0; i < sig->param_count; ++i) {
2080 ins = cfg->args [curinst];
2082 if (ins->opcode != OP_REGVAR) {
2083 ins->opcode = OP_REGOFFSET;
2084 ins->inst_basereg = cfg->frame_reg;
2085 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2087 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2088 * since it loads/stores misaligned words, which don't do the right thing.
2090 if (align < 4 && size >= 4)
2092 /* The code in the prolog () stores words when storing vtypes received in a register */
2093 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2095 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2096 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2097 offset += align - 1;
2098 offset &= ~(align - 1);
2099 ins->inst_offset = offset;
2105 /* align the offset to 8 bytes */
2106 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2107 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2112 cfg->stack_offset = offset;
2116 mono_arch_create_vars (MonoCompile *cfg)
2118 MonoMethodSignature *sig;
2122 sig = mono_method_signature (cfg->method);
2124 if (!cfg->arch.cinfo)
2125 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2126 cinfo = cfg->arch.cinfo;
2128 if (IS_HARD_FLOAT) {
2129 for (i = 0; i < 2; i++) {
2130 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2131 inst->flags |= MONO_INST_VOLATILE;
2133 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2137 if (cinfo->ret.storage == RegTypeStructByVal)
2138 cfg->ret_var_is_local = TRUE;
2140 if (cinfo->vtype_retaddr) {
2141 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2142 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2143 printf ("vret_addr = ");
2144 mono_print_ins (cfg->vret_addr);
2148 if (cfg->gen_seq_points) {
2149 if (cfg->soft_breakpoints) {
2150 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2151 ins->flags |= MONO_INST_VOLATILE;
2152 cfg->arch.seq_point_read_var = ins;
2154 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2155 ins->flags |= MONO_INST_VOLATILE;
2156 cfg->arch.seq_point_ss_method_var = ins;
2158 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2159 ins->flags |= MONO_INST_VOLATILE;
2160 cfg->arch.seq_point_bp_method_var = ins;
2162 g_assert (!cfg->compile_aot);
2163 } else if (cfg->compile_aot) {
2164 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2165 ins->flags |= MONO_INST_VOLATILE;
2166 cfg->arch.seq_point_info_var = ins;
2168 /* Allocate a separate variable for this to save 1 load per seq point */
2169 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2170 ins->flags |= MONO_INST_VOLATILE;
2171 cfg->arch.ss_trigger_page_var = ins;
2177 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2179 MonoMethodSignature *tmp_sig;
2182 if (call->tail_call)
2185 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2188 * mono_ArgIterator_Setup assumes the signature cookie is
2189 * passed first and all the arguments which were before it are
2190 * passed on the stack after the signature. So compensate by
2191 * passing a different signature.
2193 tmp_sig = mono_metadata_signature_dup (call->signature);
2194 tmp_sig->param_count -= call->signature->sentinelpos;
2195 tmp_sig->sentinelpos = 0;
2196 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2198 sig_reg = mono_alloc_ireg (cfg);
2199 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2201 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2206 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2211 LLVMCallInfo *linfo;
2213 n = sig->param_count + sig->hasthis;
2215 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2217 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2220 * LLVM always uses the native ABI while we use our own ABI, the
2221 * only difference is the handling of vtypes:
2222 * - we only pass/receive them in registers in some cases, and only
2223 * in 1 or 2 integer registers.
2225 if (cinfo->vtype_retaddr) {
2226 /* Vtype returned using a hidden argument */
2227 linfo->ret.storage = LLVMArgVtypeRetAddr;
2228 linfo->vret_arg_index = cinfo->vret_arg_index;
2229 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2230 cfg->exception_message = g_strdup ("unknown ret conv");
2231 cfg->disable_llvm = TRUE;
2235 for (i = 0; i < n; ++i) {
2236 ainfo = cinfo->args + i;
2238 linfo->args [i].storage = LLVMArgNone;
2240 switch (ainfo->storage) {
2241 case RegTypeGeneral:
2242 case RegTypeIRegPair:
2244 linfo->args [i].storage = LLVMArgInIReg;
2246 case RegTypeStructByVal:
2247 // FIXME: Passing entirely on the stack or split reg/stack
2248 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2249 linfo->args [i].storage = LLVMArgVtypeInReg;
2250 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2251 if (ainfo->size == 2)
2252 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2254 linfo->args [i].pair_storage [1] = LLVMArgNone;
2256 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2257 cfg->disable_llvm = TRUE;
2261 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2262 cfg->disable_llvm = TRUE;
2272 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2275 MonoMethodSignature *sig;
2279 sig = call->signature;
2280 n = sig->param_count + sig->hasthis;
2282 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2284 for (i = 0; i < n; ++i) {
2285 ArgInfo *ainfo = cinfo->args + i;
2288 if (i >= sig->hasthis)
2289 t = sig->params [i - sig->hasthis];
2291 t = &mono_defaults.int_class->byval_arg;
2292 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2294 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2295 /* Emit the signature cookie just before the implicit arguments */
2296 emit_sig_cookie (cfg, call, cinfo);
2299 in = call->args [i];
2301 switch (ainfo->storage) {
2302 case RegTypeGeneral:
2303 case RegTypeIRegPair:
2304 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2305 MONO_INST_NEW (cfg, ins, OP_MOVE);
2306 ins->dreg = mono_alloc_ireg (cfg);
2307 ins->sreg1 = in->dreg + 1;
2308 MONO_ADD_INS (cfg->cbb, ins);
2309 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2311 MONO_INST_NEW (cfg, ins, OP_MOVE);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg + 2;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2316 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2317 if (ainfo->size == 4) {
2318 if (IS_SOFT_FLOAT) {
2319 /* mono_emit_call_args () have already done the r8->r4 conversion */
2320 /* The converted value is in an int vreg */
2321 MONO_INST_NEW (cfg, ins, OP_MOVE);
2322 ins->dreg = mono_alloc_ireg (cfg);
2323 ins->sreg1 = in->dreg;
2324 MONO_ADD_INS (cfg->cbb, ins);
2325 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2329 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2330 creg = mono_alloc_ireg (cfg);
2331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2332 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2335 if (IS_SOFT_FLOAT) {
2336 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2337 ins->dreg = mono_alloc_ireg (cfg);
2338 ins->sreg1 = in->dreg;
2339 MONO_ADD_INS (cfg->cbb, ins);
2340 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2342 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2343 ins->dreg = mono_alloc_ireg (cfg);
2344 ins->sreg1 = in->dreg;
2345 MONO_ADD_INS (cfg->cbb, ins);
2346 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2350 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2351 creg = mono_alloc_ireg (cfg);
2352 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2353 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2354 creg = mono_alloc_ireg (cfg);
2355 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2356 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2359 cfg->flags |= MONO_CFG_HAS_FPOUT;
2361 MONO_INST_NEW (cfg, ins, OP_MOVE);
2362 ins->dreg = mono_alloc_ireg (cfg);
2363 ins->sreg1 = in->dreg;
2364 MONO_ADD_INS (cfg->cbb, ins);
2366 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2369 case RegTypeStructByAddr:
2372 /* FIXME: where si the data allocated? */
2373 arg->backend.reg3 = ainfo->reg;
2374 call->used_iregs |= 1 << ainfo->reg;
2375 g_assert_not_reached ();
2378 case RegTypeStructByVal:
2379 case RegTypeGSharedVtInReg:
2380 case RegTypeGSharedVtOnStack:
2381 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2382 ins->opcode = OP_OUTARG_VT;
2383 ins->sreg1 = in->dreg;
2384 ins->klass = in->klass;
2385 ins->inst_p0 = call;
2386 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2387 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2388 mono_call_inst_add_outarg_vt (cfg, call, ins);
2389 MONO_ADD_INS (cfg->cbb, ins);
2392 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2394 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2395 if (t->type == MONO_TYPE_R8) {
2396 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2407 case RegTypeBaseGen:
2408 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2409 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2410 MONO_INST_NEW (cfg, ins, OP_MOVE);
2411 ins->dreg = mono_alloc_ireg (cfg);
2412 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2413 MONO_ADD_INS (cfg->cbb, ins);
2414 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2415 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2418 /* This should work for soft-float as well */
2420 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2421 creg = mono_alloc_ireg (cfg);
2422 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2424 creg = mono_alloc_ireg (cfg);
2425 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2426 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2427 cfg->flags |= MONO_CFG_HAS_FPOUT;
2429 g_assert_not_reached ();
2433 int fdreg = mono_alloc_freg (cfg);
2435 if (ainfo->size == 8) {
2436 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2437 ins->sreg1 = in->dreg;
2439 MONO_ADD_INS (cfg->cbb, ins);
2441 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2446 * Mono's register allocator doesn't speak single-precision registers that
2447 * overlap double-precision registers (i.e. armhf). So we have to work around
2448 * the register allocator and load the value from memory manually.
2450 * So we create a variable for the float argument and an instruction to store
2451 * the argument into the variable. We then store the list of these arguments
2452 * in cfg->float_args. This list is then used by emit_float_args later to
2453 * pass the arguments in the various call opcodes.
2455 * This is not very nice, and we should really try to fix the allocator.
2458 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2460 /* Make sure the instruction isn't seen as pointless and removed.
2462 float_arg->flags |= MONO_INST_VOLATILE;
2464 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2466 /* We use the dreg to look up the instruction later. The hreg is used to
2467 * emit the instruction that loads the value into the FP reg.
2469 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2470 fad->vreg = float_arg->dreg;
2471 fad->hreg = ainfo->reg;
2473 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2476 call->used_iregs |= 1 << ainfo->reg;
2477 cfg->flags |= MONO_CFG_HAS_FPOUT;
2481 g_assert_not_reached ();
2485 /* Handle the case where there are no implicit arguments */
2486 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2487 emit_sig_cookie (cfg, call, cinfo);
2489 if (cinfo->ret.storage == RegTypeStructByVal) {
2490 /* The JIT will transform this into a normal call */
2491 call->vret_in_reg = TRUE;
2492 } else if (cinfo->vtype_retaddr) {
2494 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2495 vtarg->sreg1 = call->vret_var->dreg;
2496 vtarg->dreg = mono_alloc_preg (cfg);
2497 MONO_ADD_INS (cfg->cbb, vtarg);
2499 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2502 call->stack_usage = cinfo->stack_usage;
2508 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2510 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2511 ArgInfo *ainfo = ins->inst_p1;
2512 int ovf_size = ainfo->vtsize;
2513 int doffset = ainfo->offset;
2514 int struct_size = ainfo->struct_size;
2515 int i, soffset, dreg, tmpreg;
2517 if (ainfo->storage == RegTypeGSharedVtInReg) {
2519 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2522 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2523 /* Pass by addr on stack */
2524 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2529 for (i = 0; i < ainfo->size; ++i) {
2530 dreg = mono_alloc_ireg (cfg);
2531 switch (struct_size) {
2533 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2536 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2539 tmpreg = mono_alloc_ireg (cfg);
2540 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2542 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2543 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2544 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2546 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2549 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2552 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2553 soffset += sizeof (gpointer);
2554 struct_size -= sizeof (gpointer);
2556 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2558 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2562 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2564 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2567 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2570 if (COMPILE_LLVM (cfg)) {
2571 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2573 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2574 ins->sreg1 = val->dreg + 1;
2575 ins->sreg2 = val->dreg + 2;
2576 MONO_ADD_INS (cfg->cbb, ins);
2581 case MONO_ARM_FPU_NONE:
2582 if (ret->type == MONO_TYPE_R8) {
2585 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2586 ins->dreg = cfg->ret->dreg;
2587 ins->sreg1 = val->dreg;
2588 MONO_ADD_INS (cfg->cbb, ins);
2591 if (ret->type == MONO_TYPE_R4) {
2592 /* Already converted to an int in method_to_ir () */
2593 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2597 case MONO_ARM_FPU_VFP:
2598 case MONO_ARM_FPU_VFP_HARD:
2599 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2602 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2603 ins->dreg = cfg->ret->dreg;
2604 ins->sreg1 = val->dreg;
2605 MONO_ADD_INS (cfg->cbb, ins);
2610 g_assert_not_reached ();
2614 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2617 #endif /* #ifndef DISABLE_JIT */
2620 mono_arch_is_inst_imm (gint64 imm)
2626 MonoMethodSignature *sig;
2629 MonoType **param_types;
2633 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2637 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2640 switch (cinfo->ret.storage) {
2642 case RegTypeGeneral:
2643 case RegTypeIRegPair:
2644 case RegTypeStructByAddr:
2655 for (i = 0; i < cinfo->nargs; ++i) {
2656 ArgInfo *ainfo = &cinfo->args [i];
2659 switch (ainfo->storage) {
2660 case RegTypeGeneral:
2662 case RegTypeIRegPair:
2665 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2668 case RegTypeStructByVal:
2669 if (ainfo->size == 0)
2670 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2672 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2673 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2681 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2682 for (i = 0; i < sig->param_count; ++i) {
2683 MonoType *t = sig->params [i];
2688 t = mini_replace_type (t);
2711 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2713 ArchDynCallInfo *info;
2717 cinfo = get_call_info (NULL, NULL, sig);
2719 if (!dyn_call_supported (cinfo, sig)) {
2724 info = g_new0 (ArchDynCallInfo, 1);
2725 // FIXME: Preprocess the info to speed up start_dyn_call ()
2727 info->cinfo = cinfo;
2728 info->rtype = mini_replace_type (sig->ret);
2729 info->param_types = g_new0 (MonoType*, sig->param_count);
2730 for (i = 0; i < sig->param_count; ++i)
2731 info->param_types [i] = mini_replace_type (sig->params [i]);
2733 return (MonoDynCallInfo*)info;
2737 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2739 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2741 g_free (ainfo->cinfo);
2746 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2748 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2749 DynCallArgs *p = (DynCallArgs*)buf;
2750 int arg_index, greg, i, j, pindex;
2751 MonoMethodSignature *sig = dinfo->sig;
2753 g_assert (buf_len >= sizeof (DynCallArgs));
2762 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2763 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2768 if (dinfo->cinfo->vtype_retaddr)
2769 p->regs [greg ++] = (mgreg_t)ret;
2771 for (i = pindex; i < sig->param_count; i++) {
2772 MonoType *t = dinfo->param_types [i];
2773 gpointer *arg = args [arg_index ++];
2774 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2777 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2779 else if (ainfo->storage == RegTypeBase)
2780 slot = PARAM_REGS + (ainfo->offset / 4);
2782 g_assert_not_reached ();
2785 p->regs [slot] = (mgreg_t)*arg;
2790 case MONO_TYPE_STRING:
2791 case MONO_TYPE_CLASS:
2792 case MONO_TYPE_ARRAY:
2793 case MONO_TYPE_SZARRAY:
2794 case MONO_TYPE_OBJECT:
2798 p->regs [slot] = (mgreg_t)*arg;
2800 case MONO_TYPE_BOOLEAN:
2802 p->regs [slot] = *(guint8*)arg;
2805 p->regs [slot] = *(gint8*)arg;
2808 p->regs [slot] = *(gint16*)arg;
2811 case MONO_TYPE_CHAR:
2812 p->regs [slot] = *(guint16*)arg;
2815 p->regs [slot] = *(gint32*)arg;
2818 p->regs [slot] = *(guint32*)arg;
2822 p->regs [slot ++] = (mgreg_t)arg [0];
2823 p->regs [slot] = (mgreg_t)arg [1];
2826 p->regs [slot] = *(mgreg_t*)arg;
2829 p->regs [slot ++] = (mgreg_t)arg [0];
2830 p->regs [slot] = (mgreg_t)arg [1];
2832 case MONO_TYPE_GENERICINST:
2833 if (MONO_TYPE_IS_REFERENCE (t)) {
2834 p->regs [slot] = (mgreg_t)*arg;
2839 case MONO_TYPE_VALUETYPE:
2840 g_assert (ainfo->storage == RegTypeStructByVal);
2842 if (ainfo->size == 0)
2843 slot = PARAM_REGS + (ainfo->offset / 4);
2847 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2848 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2851 g_assert_not_reached ();
2857 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2859 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2860 MonoType *ptype = ainfo->rtype;
2861 guint8 *ret = ((DynCallArgs*)buf)->ret;
2862 mgreg_t res = ((DynCallArgs*)buf)->res;
2863 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2865 switch (ptype->type) {
2866 case MONO_TYPE_VOID:
2867 *(gpointer*)ret = NULL;
2869 case MONO_TYPE_STRING:
2870 case MONO_TYPE_CLASS:
2871 case MONO_TYPE_ARRAY:
2872 case MONO_TYPE_SZARRAY:
2873 case MONO_TYPE_OBJECT:
2877 *(gpointer*)ret = (gpointer)res;
2883 case MONO_TYPE_BOOLEAN:
2884 *(guint8*)ret = res;
2887 *(gint16*)ret = res;
2890 case MONO_TYPE_CHAR:
2891 *(guint16*)ret = res;
2894 *(gint32*)ret = res;
2897 *(guint32*)ret = res;
2901 /* This handles endianness as well */
2902 ((gint32*)ret) [0] = res;
2903 ((gint32*)ret) [1] = res2;
2905 case MONO_TYPE_GENERICINST:
2906 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2907 *(gpointer*)ret = (gpointer)res;
2912 case MONO_TYPE_VALUETYPE:
2913 g_assert (ainfo->cinfo->vtype_retaddr);
2918 *(float*)ret = *(float*)&res;
2920 case MONO_TYPE_R8: {
2927 *(double*)ret = *(double*)®s;
2931 g_assert_not_reached ();
2938 * Allow tracing to work with this interface (with an optional argument)
2942 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2946 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2947 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2948 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2949 code = emit_call_reg (code, ARMREG_R2);
2963 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2966 int save_mode = SAVE_NONE;
2968 MonoMethod *method = cfg->method;
2969 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2970 int rtype = ret_type->type;
2971 int save_offset = cfg->param_area;
2975 offset = code - cfg->native_code;
2976 /* we need about 16 instructions */
2977 if (offset > (cfg->code_size - 16 * 4)) {
2978 cfg->code_size *= 2;
2979 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2980 code = cfg->native_code + offset;
2983 case MONO_TYPE_VOID:
2984 /* special case string .ctor icall */
2985 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2986 save_mode = SAVE_ONE;
2988 save_mode = SAVE_NONE;
2992 save_mode = SAVE_TWO;
2996 save_mode = SAVE_ONE_FP;
2998 save_mode = SAVE_ONE;
3002 save_mode = SAVE_TWO_FP;
3004 save_mode = SAVE_TWO;
3006 case MONO_TYPE_GENERICINST:
3007 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
3008 save_mode = SAVE_ONE;
3012 case MONO_TYPE_VALUETYPE:
3013 save_mode = SAVE_STRUCT;
3016 save_mode = SAVE_ONE;
3020 switch (save_mode) {
3022 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3023 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3024 if (enable_arguments) {
3025 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3026 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3030 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3031 if (enable_arguments) {
3032 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3036 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3037 if (enable_arguments) {
3038 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3042 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3043 if (enable_arguments) {
3044 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3048 if (enable_arguments) {
3049 /* FIXME: get the actual address */
3050 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3058 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3059 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3060 code = emit_call_reg (code, ARMREG_IP);
3062 switch (save_mode) {
3064 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3065 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3068 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3071 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3074 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3085 * The immediate field for cond branches is big enough for all reasonable methods
3087 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3088 if (0 && ins->inst_true_bb->native_offset) { \
3089 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3091 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3092 ARM_B_COND (code, (condcode), 0); \
3095 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3097 /* emit an exception if condition is fail
3099 * We assign the extra code used to throw the implicit exceptions
3100 * to cfg->bb_exit as far as the big branch handling is concerned
3102 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3104 mono_add_patch_info (cfg, code - cfg->native_code, \
3105 MONO_PATCH_INFO_EXC, exc_name); \
3106 ARM_BL_COND (code, (condcode), 0); \
3109 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3112 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3117 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3119 MonoInst *ins, *n, *last_ins = NULL;
3121 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3122 switch (ins->opcode) {
3125 /* Already done by an arch-independent pass */
3127 case OP_LOAD_MEMBASE:
3128 case OP_LOADI4_MEMBASE:
3130 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3131 * OP_LOAD_MEMBASE offset(basereg), reg
3133 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3134 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3135 ins->inst_basereg == last_ins->inst_destbasereg &&
3136 ins->inst_offset == last_ins->inst_offset) {
3137 if (ins->dreg == last_ins->sreg1) {
3138 MONO_DELETE_INS (bb, ins);
3141 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3142 ins->opcode = OP_MOVE;
3143 ins->sreg1 = last_ins->sreg1;
3147 * Note: reg1 must be different from the basereg in the second load
3148 * OP_LOAD_MEMBASE offset(basereg), reg1
3149 * OP_LOAD_MEMBASE offset(basereg), reg2
3151 * OP_LOAD_MEMBASE offset(basereg), reg1
3152 * OP_MOVE reg1, reg2
3154 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3155 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3156 ins->inst_basereg != last_ins->dreg &&
3157 ins->inst_basereg == last_ins->inst_basereg &&
3158 ins->inst_offset == last_ins->inst_offset) {
3160 if (ins->dreg == last_ins->dreg) {
3161 MONO_DELETE_INS (bb, ins);
3164 ins->opcode = OP_MOVE;
3165 ins->sreg1 = last_ins->dreg;
3168 //g_assert_not_reached ();
3172 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3173 * OP_LOAD_MEMBASE offset(basereg), reg
3175 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3176 * OP_ICONST reg, imm
3178 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3179 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3180 ins->inst_basereg == last_ins->inst_destbasereg &&
3181 ins->inst_offset == last_ins->inst_offset) {
3182 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3183 ins->opcode = OP_ICONST;
3184 ins->inst_c0 = last_ins->inst_imm;
3185 g_assert_not_reached (); // check this rule
3189 case OP_LOADU1_MEMBASE:
3190 case OP_LOADI1_MEMBASE:
3191 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3192 ins->inst_basereg == last_ins->inst_destbasereg &&
3193 ins->inst_offset == last_ins->inst_offset) {
3194 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3195 ins->sreg1 = last_ins->sreg1;
3198 case OP_LOADU2_MEMBASE:
3199 case OP_LOADI2_MEMBASE:
3200 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3201 ins->inst_basereg == last_ins->inst_destbasereg &&
3202 ins->inst_offset == last_ins->inst_offset) {
3203 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3204 ins->sreg1 = last_ins->sreg1;
3208 ins->opcode = OP_MOVE;
3212 if (ins->dreg == ins->sreg1) {
3213 MONO_DELETE_INS (bb, ins);
3217 * OP_MOVE sreg, dreg
3218 * OP_MOVE dreg, sreg
3220 if (last_ins && last_ins->opcode == OP_MOVE &&
3221 ins->sreg1 == last_ins->dreg &&
3222 ins->dreg == last_ins->sreg1) {
3223 MONO_DELETE_INS (bb, ins);
3231 bb->last_ins = last_ins;
3235 * the branch_cc_table should maintain the order of these
3249 branch_cc_table [] = {
3263 #define ADD_NEW_INS(cfg,dest,op) do { \
3264 MONO_INST_NEW ((cfg), (dest), (op)); \
3265 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3269 map_to_reg_reg_op (int op)
3278 case OP_COMPARE_IMM:
3280 case OP_ICOMPARE_IMM:
3294 case OP_LOAD_MEMBASE:
3295 return OP_LOAD_MEMINDEX;
3296 case OP_LOADI4_MEMBASE:
3297 return OP_LOADI4_MEMINDEX;
3298 case OP_LOADU4_MEMBASE:
3299 return OP_LOADU4_MEMINDEX;
3300 case OP_LOADU1_MEMBASE:
3301 return OP_LOADU1_MEMINDEX;
3302 case OP_LOADI2_MEMBASE:
3303 return OP_LOADI2_MEMINDEX;
3304 case OP_LOADU2_MEMBASE:
3305 return OP_LOADU2_MEMINDEX;
3306 case OP_LOADI1_MEMBASE:
3307 return OP_LOADI1_MEMINDEX;
3308 case OP_STOREI1_MEMBASE_REG:
3309 return OP_STOREI1_MEMINDEX;
3310 case OP_STOREI2_MEMBASE_REG:
3311 return OP_STOREI2_MEMINDEX;
3312 case OP_STOREI4_MEMBASE_REG:
3313 return OP_STOREI4_MEMINDEX;
3314 case OP_STORE_MEMBASE_REG:
3315 return OP_STORE_MEMINDEX;
3316 case OP_STORER4_MEMBASE_REG:
3317 return OP_STORER4_MEMINDEX;
3318 case OP_STORER8_MEMBASE_REG:
3319 return OP_STORER8_MEMINDEX;
3320 case OP_STORE_MEMBASE_IMM:
3321 return OP_STORE_MEMBASE_REG;
3322 case OP_STOREI1_MEMBASE_IMM:
3323 return OP_STOREI1_MEMBASE_REG;
3324 case OP_STOREI2_MEMBASE_IMM:
3325 return OP_STOREI2_MEMBASE_REG;
3326 case OP_STOREI4_MEMBASE_IMM:
3327 return OP_STOREI4_MEMBASE_REG;
3329 g_assert_not_reached ();
3333 * Remove from the instruction list the instructions that can't be
3334 * represented with very simple instructions with no register
3338 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3340 MonoInst *ins, *temp, *last_ins = NULL;
3341 int rot_amount, imm8, low_imm;
3343 MONO_BB_FOR_EACH_INS (bb, ins) {
3345 switch (ins->opcode) {
3349 case OP_COMPARE_IMM:
3350 case OP_ICOMPARE_IMM:
3364 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3365 ADD_NEW_INS (cfg, temp, OP_ICONST);
3366 temp->inst_c0 = ins->inst_imm;
3367 temp->dreg = mono_alloc_ireg (cfg);
3368 ins->sreg2 = temp->dreg;
3369 ins->opcode = mono_op_imm_to_op (ins->opcode);
3371 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3377 if (ins->inst_imm == 1) {
3378 ins->opcode = OP_MOVE;
3381 if (ins->inst_imm == 0) {
3382 ins->opcode = OP_ICONST;
3386 imm8 = mono_is_power_of_two (ins->inst_imm);
3388 ins->opcode = OP_SHL_IMM;
3389 ins->inst_imm = imm8;
3392 ADD_NEW_INS (cfg, temp, OP_ICONST);
3393 temp->inst_c0 = ins->inst_imm;
3394 temp->dreg = mono_alloc_ireg (cfg);
3395 ins->sreg2 = temp->dreg;
3396 ins->opcode = OP_IMUL;
3402 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3403 /* ARM sets the C flag to 1 if there was _no_ overflow */
3404 ins->next->opcode = OP_COND_EXC_NC;
3407 case OP_IDIV_UN_IMM:
3409 case OP_IREM_UN_IMM:
3410 ADD_NEW_INS (cfg, temp, OP_ICONST);
3411 temp->inst_c0 = ins->inst_imm;
3412 temp->dreg = mono_alloc_ireg (cfg);
3413 ins->sreg2 = temp->dreg;
3414 ins->opcode = mono_op_imm_to_op (ins->opcode);
3416 case OP_LOCALLOC_IMM:
3417 ADD_NEW_INS (cfg, temp, OP_ICONST);
3418 temp->inst_c0 = ins->inst_imm;
3419 temp->dreg = mono_alloc_ireg (cfg);
3420 ins->sreg1 = temp->dreg;
3421 ins->opcode = OP_LOCALLOC;
3423 case OP_LOAD_MEMBASE:
3424 case OP_LOADI4_MEMBASE:
3425 case OP_LOADU4_MEMBASE:
3426 case OP_LOADU1_MEMBASE:
3427 /* we can do two things: load the immed in a register
3428 * and use an indexed load, or see if the immed can be
3429 * represented as an ad_imm + a load with a smaller offset
3430 * that fits. We just do the first for now, optimize later.
3432 if (arm_is_imm12 (ins->inst_offset))
3434 ADD_NEW_INS (cfg, temp, OP_ICONST);
3435 temp->inst_c0 = ins->inst_offset;
3436 temp->dreg = mono_alloc_ireg (cfg);
3437 ins->sreg2 = temp->dreg;
3438 ins->opcode = map_to_reg_reg_op (ins->opcode);
3440 case OP_LOADI2_MEMBASE:
3441 case OP_LOADU2_MEMBASE:
3442 case OP_LOADI1_MEMBASE:
3443 if (arm_is_imm8 (ins->inst_offset))
3445 ADD_NEW_INS (cfg, temp, OP_ICONST);
3446 temp->inst_c0 = ins->inst_offset;
3447 temp->dreg = mono_alloc_ireg (cfg);
3448 ins->sreg2 = temp->dreg;
3449 ins->opcode = map_to_reg_reg_op (ins->opcode);
3451 case OP_LOADR4_MEMBASE:
3452 case OP_LOADR8_MEMBASE:
3453 if (arm_is_fpimm8 (ins->inst_offset))
3455 low_imm = ins->inst_offset & 0x1ff;
3456 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3457 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3458 temp->inst_imm = ins->inst_offset & ~0x1ff;
3459 temp->sreg1 = ins->inst_basereg;
3460 temp->dreg = mono_alloc_ireg (cfg);
3461 ins->inst_basereg = temp->dreg;
3462 ins->inst_offset = low_imm;
3466 ADD_NEW_INS (cfg, temp, OP_ICONST);
3467 temp->inst_c0 = ins->inst_offset;
3468 temp->dreg = mono_alloc_ireg (cfg);
3470 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3471 add_ins->sreg1 = ins->inst_basereg;
3472 add_ins->sreg2 = temp->dreg;
3473 add_ins->dreg = mono_alloc_ireg (cfg);
3475 ins->inst_basereg = add_ins->dreg;
3476 ins->inst_offset = 0;
3479 case OP_STORE_MEMBASE_REG:
3480 case OP_STOREI4_MEMBASE_REG:
3481 case OP_STOREI1_MEMBASE_REG:
3482 if (arm_is_imm12 (ins->inst_offset))
3484 ADD_NEW_INS (cfg, temp, OP_ICONST);
3485 temp->inst_c0 = ins->inst_offset;
3486 temp->dreg = mono_alloc_ireg (cfg);
3487 ins->sreg2 = temp->dreg;
3488 ins->opcode = map_to_reg_reg_op (ins->opcode);
3490 case OP_STOREI2_MEMBASE_REG:
3491 if (arm_is_imm8 (ins->inst_offset))
3493 ADD_NEW_INS (cfg, temp, OP_ICONST);
3494 temp->inst_c0 = ins->inst_offset;
3495 temp->dreg = mono_alloc_ireg (cfg);
3496 ins->sreg2 = temp->dreg;
3497 ins->opcode = map_to_reg_reg_op (ins->opcode);
3499 case OP_STORER4_MEMBASE_REG:
3500 case OP_STORER8_MEMBASE_REG:
3501 if (arm_is_fpimm8 (ins->inst_offset))
3503 low_imm = ins->inst_offset & 0x1ff;
3504 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3505 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3506 temp->inst_imm = ins->inst_offset & ~0x1ff;
3507 temp->sreg1 = ins->inst_destbasereg;
3508 temp->dreg = mono_alloc_ireg (cfg);
3509 ins->inst_destbasereg = temp->dreg;
3510 ins->inst_offset = low_imm;
3514 ADD_NEW_INS (cfg, temp, OP_ICONST);
3515 temp->inst_c0 = ins->inst_offset;
3516 temp->dreg = mono_alloc_ireg (cfg);
3518 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3519 add_ins->sreg1 = ins->inst_destbasereg;
3520 add_ins->sreg2 = temp->dreg;
3521 add_ins->dreg = mono_alloc_ireg (cfg);
3523 ins->inst_destbasereg = add_ins->dreg;
3524 ins->inst_offset = 0;
3527 case OP_STORE_MEMBASE_IMM:
3528 case OP_STOREI1_MEMBASE_IMM:
3529 case OP_STOREI2_MEMBASE_IMM:
3530 case OP_STOREI4_MEMBASE_IMM:
3531 ADD_NEW_INS (cfg, temp, OP_ICONST);
3532 temp->inst_c0 = ins->inst_imm;
3533 temp->dreg = mono_alloc_ireg (cfg);
3534 ins->sreg1 = temp->dreg;
3535 ins->opcode = map_to_reg_reg_op (ins->opcode);
3537 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3539 gboolean swap = FALSE;
3543 /* Optimized away */
3548 /* Some fp compares require swapped operands */
3549 switch (ins->next->opcode) {
3551 ins->next->opcode = OP_FBLT;
3555 ins->next->opcode = OP_FBLT_UN;
3559 ins->next->opcode = OP_FBGE;
3563 ins->next->opcode = OP_FBGE_UN;
3571 ins->sreg1 = ins->sreg2;
3580 bb->last_ins = last_ins;
3581 bb->max_vreg = cfg->next_vreg;
3585 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3589 if (long_ins->opcode == OP_LNEG) {
3591 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3592 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3598 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3600 /* sreg is a float, dreg is an integer reg */
3602 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3604 ARM_TOSIZD (code, vfp_scratch1, sreg);
3606 ARM_TOUIZD (code, vfp_scratch1, sreg);
3607 ARM_FMRS (code, dreg, vfp_scratch1);
3608 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3612 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3613 else if (size == 2) {
3614 ARM_SHL_IMM (code, dreg, dreg, 16);
3615 ARM_SHR_IMM (code, dreg, dreg, 16);
3619 ARM_SHL_IMM (code, dreg, dreg, 24);
3620 ARM_SAR_IMM (code, dreg, dreg, 24);
3621 } else if (size == 2) {
3622 ARM_SHL_IMM (code, dreg, dreg, 16);
3623 ARM_SAR_IMM (code, dreg, dreg, 16);
3629 #endif /* #ifndef DISABLE_JIT */
3633 const guchar *target;
3638 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3641 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3642 PatchData *pdata = (PatchData*)user_data;
3643 guchar *code = data;
3644 guint32 *thunks = data;
3645 guint32 *endthunks = (guint32*)(code + bsize);
3647 int difflow, diffhigh;
3649 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3650 difflow = (char*)pdata->code - (char*)thunks;
3651 diffhigh = (char*)pdata->code - (char*)endthunks;
3652 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3656 * The thunk is composed of 3 words:
3657 * load constant from thunks [2] into ARM_IP
3660 * Note that the LR register is already setup
3662 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3663 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3664 while (thunks < endthunks) {
3665 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3666 if (thunks [2] == (guint32)pdata->target) {
3667 arm_patch (pdata->code, (guchar*)thunks);
3668 mono_arch_flush_icache (pdata->code, 4);
3671 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3672 /* found a free slot instead: emit thunk */
3673 /* ARMREG_IP is fine to use since this can't be an IMT call
3676 code = (guchar*)thunks;
3677 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3678 if (thumb_supported)
3679 ARM_BX (code, ARMREG_IP);
3681 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3682 thunks [2] = (guint32)pdata->target;
3683 mono_arch_flush_icache ((guchar*)thunks, 12);
3685 arm_patch (pdata->code, (guchar*)thunks);
3686 mono_arch_flush_icache (pdata->code, 4);
3690 /* skip 12 bytes, the size of the thunk */
3694 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3700 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3705 domain = mono_domain_get ();
3708 pdata.target = target;
3709 pdata.absolute = absolute;
3713 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3716 if (pdata.found != 1) {
3717 mono_domain_lock (domain);
3718 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3721 /* this uses the first available slot */
3723 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3725 mono_domain_unlock (domain);
3728 if (pdata.found != 1) {
3730 GHashTableIter iter;
3731 MonoJitDynamicMethodInfo *ji;
3734 * This might be a dynamic method, search its code manager. We can only
3735 * use the dynamic method containing CODE, since the others might be freed later.
3739 mono_domain_lock (domain);
3740 hash = domain_jit_info (domain)->dynamic_code_hash;
3742 /* FIXME: Speed this up */
3743 g_hash_table_iter_init (&iter, hash);
3744 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3745 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3746 if (pdata.found == 1)
3750 mono_domain_unlock (domain);
3752 if (pdata.found != 1)
3753 g_print ("thunk failed for %p from %p\n", target, code);
3754 g_assert (pdata.found == 1);
3758 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3760 guint32 *code32 = (void*)code;
3761 guint32 ins = *code32;
3762 guint32 prim = (ins >> 25) & 7;
3763 guint32 tval = GPOINTER_TO_UINT (target);
3765 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3766 if (prim == 5) { /* 101b */
3767 /* the diff starts 8 bytes from the branch opcode */
3768 gint diff = target - code - 8;
3770 gint tmask = 0xffffffff;
3771 if (tval & 1) { /* entering thumb mode */
3772 diff = target - 1 - code - 8;
3773 g_assert (thumb_supported);
3774 tbits = 0xf << 28; /* bl->blx bit pattern */
3775 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3776 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3780 tmask = ~(1 << 24); /* clear the link bit */
3781 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3786 if (diff <= 33554431) {
3788 ins = (ins & 0xff000000) | diff;
3790 *code32 = ins | tbits;
3794 /* diff between 0 and -33554432 */
3795 if (diff >= -33554432) {
3797 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3799 *code32 = ins | tbits;
3804 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3808 #ifdef USE_JUMP_TABLES
3810 gpointer *jte = mono_jumptable_get_entry (code);
3812 jte [0] = (gpointer) target;
3816 * The alternative call sequences looks like this:
3818 * ldr ip, [pc] // loads the address constant
3819 * b 1f // jumps around the constant
3820 * address constant embedded in the code
3825 * There are two cases for patching:
3826 * a) at the end of method emission: in this case code points to the start
3827 * of the call sequence
3828 * b) during runtime patching of the call site: in this case code points
3829 * to the mov pc, ip instruction
3831 * We have to handle also the thunk jump code sequence:
3835 * address constant // execution never reaches here
3837 if ((ins & 0x0ffffff0) == 0x12fff10) {
3838 /* Branch and exchange: the address is constructed in a reg
3839 * We can patch BX when the code sequence is the following:
3840 * ldr ip, [pc, #0] ; 0x8
3847 guint8 *emit = (guint8*)ccode;
3848 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3850 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3851 ARM_BX (emit, ARMREG_IP);
3853 /*patching from magic trampoline*/
3854 if (ins == ccode [3]) {
3855 g_assert (code32 [-4] == ccode [0]);
3856 g_assert (code32 [-3] == ccode [1]);
3857 g_assert (code32 [-1] == ccode [2]);
3858 code32 [-2] = (guint32)target;
3861 /*patching from JIT*/
3862 if (ins == ccode [0]) {
3863 g_assert (code32 [1] == ccode [1]);
3864 g_assert (code32 [3] == ccode [2]);
3865 g_assert (code32 [4] == ccode [3]);
3866 code32 [2] = (guint32)target;
3869 g_assert_not_reached ();
3870 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3878 guint8 *emit = (guint8*)ccode;
3879 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3881 ARM_BLX_REG (emit, ARMREG_IP);
3883 g_assert (code32 [-3] == ccode [0]);
3884 g_assert (code32 [-2] == ccode [1]);
3885 g_assert (code32 [0] == ccode [2]);
3887 code32 [-1] = (guint32)target;
3890 guint32 *tmp = ccode;
3891 guint8 *emit = (guint8*)tmp;
3892 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3893 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3894 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3895 ARM_BX (emit, ARMREG_IP);
3896 if (ins == ccode [2]) {
3897 g_assert_not_reached (); // should be -2 ...
3898 code32 [-1] = (guint32)target;
3901 if (ins == ccode [0]) {
3902 /* handles both thunk jump code and the far call sequence */
3903 code32 [2] = (guint32)target;
3906 g_assert_not_reached ();
3908 // g_print ("patched with 0x%08x\n", ins);
3913 arm_patch (guchar *code, const guchar *target)
3915 arm_patch_general (NULL, code, target, NULL);
3919 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3920 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3921 * to be used with the emit macros.
3922 * Return -1 otherwise.
3925 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3928 for (i = 0; i < 31; i+= 2) {
3929 res = (val << (32 - i)) | (val >> i);
3932 *rot_amount = i? 32 - i: 0;
3939 * Emits in code a sequence of instructions that load the value 'val'
3940 * into the dreg register. Uses at most 4 instructions.
3943 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3945 int imm8, rot_amount;
3947 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3948 /* skip the constant pool */
3954 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3955 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3956 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3957 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3960 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3962 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3966 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3968 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3970 if (val & 0xFF0000) {
3971 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3973 if (val & 0xFF000000) {
3974 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3976 } else if (val & 0xFF00) {
3977 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3978 if (val & 0xFF0000) {
3979 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3981 if (val & 0xFF000000) {
3982 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3984 } else if (val & 0xFF0000) {
3985 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3986 if (val & 0xFF000000) {
3987 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3990 //g_assert_not_reached ();
3996 mono_arm_thumb_supported (void)
3998 return thumb_supported;
4004 * emit_load_volatile_arguments:
4006 * Load volatile arguments from the stack to the original input registers.
4007 * Required before a tail call.
4010 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
4012 MonoMethod *method = cfg->method;
4013 MonoMethodSignature *sig;
4018 /* FIXME: Generate intermediate code instead */
4020 sig = mono_method_signature (method);
4022 /* This is the opposite of the code in emit_prolog */
4026 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4028 if (cinfo->vtype_retaddr) {
4029 ArgInfo *ainfo = &cinfo->ret;
4030 inst = cfg->vret_addr;
4031 g_assert (arm_is_imm12 (inst->inst_offset));
4032 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4034 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4035 ArgInfo *ainfo = cinfo->args + i;
4036 inst = cfg->args [pos];
4038 if (cfg->verbose_level > 2)
4039 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4040 if (inst->opcode == OP_REGVAR) {
4041 if (ainfo->storage == RegTypeGeneral)
4042 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4043 else if (ainfo->storage == RegTypeFP) {
4044 g_assert_not_reached ();
4045 } else if (ainfo->storage == RegTypeBase) {
4049 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4050 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4052 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4053 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4057 g_assert_not_reached ();
4059 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4060 switch (ainfo->size) {
4067 g_assert (arm_is_imm12 (inst->inst_offset));
4068 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4069 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4070 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4073 if (arm_is_imm12 (inst->inst_offset)) {
4074 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4076 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4077 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4081 } else if (ainfo->storage == RegTypeBaseGen) {
4084 } else if (ainfo->storage == RegTypeBase) {
4086 } else if (ainfo->storage == RegTypeFP) {
4087 g_assert_not_reached ();
4088 } else if (ainfo->storage == RegTypeStructByVal) {
4089 int doffset = inst->inst_offset;
4093 if (mono_class_from_mono_type (inst->inst_vtype))
4094 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4095 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4096 if (arm_is_imm12 (doffset)) {
4097 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4099 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4100 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4102 soffset += sizeof (gpointer);
4103 doffset += sizeof (gpointer);
4108 } else if (ainfo->storage == RegTypeStructByAddr) {
4123 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4128 guint8 *code = cfg->native_code + cfg->code_len;
4129 MonoInst *last_ins = NULL;
4130 guint last_offset = 0;
4132 int imm8, rot_amount;
4134 /* we don't align basic blocks of loops on arm */
4136 if (cfg->verbose_level > 2)
4137 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4139 cpos = bb->max_offset;
4141 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4142 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4143 //g_assert (!mono_compile_aot);
4146 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4147 /* this is not thread save, but good enough */
4148 /* fixme: howto handle overflows? */
4149 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4152 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4153 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4154 (gpointer)"mono_break");
4155 code = emit_call_seq (cfg, code);
4158 MONO_BB_FOR_EACH_INS (bb, ins) {
4159 offset = code - cfg->native_code;
4161 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4163 if (offset > (cfg->code_size - max_len - 16)) {
4164 cfg->code_size *= 2;
4165 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4166 code = cfg->native_code + offset;
4168 // if (ins->cil_code)
4169 // g_print ("cil code\n");
4170 mono_debug_record_line_number (cfg, ins, offset);
4172 switch (ins->opcode) {
4173 case OP_MEMORY_BARRIER:
4175 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4176 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4180 #ifdef HAVE_AEABI_READ_TP
4181 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4182 (gpointer)"__aeabi_read_tp");
4183 code = emit_call_seq (cfg, code);
4185 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4187 g_assert_not_reached ();
4190 case OP_ATOMIC_EXCHANGE_I4:
4191 case OP_ATOMIC_CAS_I4:
4192 case OP_ATOMIC_ADD_I4: {
4196 g_assert (v7_supported);
4199 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4201 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4203 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4207 g_assert (cfg->arch.atomic_tmp_offset != -1);
4208 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4210 switch (ins->opcode) {
4211 case OP_ATOMIC_EXCHANGE_I4:
4213 ARM_DMB (code, ARM_DMB_SY);
4214 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4215 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4216 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4218 ARM_B_COND (code, ARMCOND_NE, 0);
4219 arm_patch (buf [1], buf [0]);
4221 case OP_ATOMIC_CAS_I4:
4222 ARM_DMB (code, ARM_DMB_SY);
4224 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4225 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4227 ARM_B_COND (code, ARMCOND_NE, 0);
4228 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4229 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4231 ARM_B_COND (code, ARMCOND_NE, 0);
4232 arm_patch (buf [2], buf [0]);
4233 arm_patch (buf [1], code);
4235 case OP_ATOMIC_ADD_I4:
4237 ARM_DMB (code, ARM_DMB_SY);
4238 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4239 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4240 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4241 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4243 ARM_B_COND (code, ARMCOND_NE, 0);
4244 arm_patch (buf [1], buf [0]);
4247 g_assert_not_reached ();
4250 ARM_DMB (code, ARM_DMB_SY);
4251 if (tmpreg != ins->dreg)
4252 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4253 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4258 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4259 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4262 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4263 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4265 case OP_STOREI1_MEMBASE_IMM:
4266 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4267 g_assert (arm_is_imm12 (ins->inst_offset));
4268 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4270 case OP_STOREI2_MEMBASE_IMM:
4271 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4272 g_assert (arm_is_imm8 (ins->inst_offset));
4273 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4275 case OP_STORE_MEMBASE_IMM:
4276 case OP_STOREI4_MEMBASE_IMM:
4277 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4278 g_assert (arm_is_imm12 (ins->inst_offset));
4279 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4281 case OP_STOREI1_MEMBASE_REG:
4282 g_assert (arm_is_imm12 (ins->inst_offset));
4283 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4285 case OP_STOREI2_MEMBASE_REG:
4286 g_assert (arm_is_imm8 (ins->inst_offset));
4287 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4289 case OP_STORE_MEMBASE_REG:
4290 case OP_STOREI4_MEMBASE_REG:
4291 /* this case is special, since it happens for spill code after lowering has been called */
4292 if (arm_is_imm12 (ins->inst_offset)) {
4293 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4295 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4296 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4299 case OP_STOREI1_MEMINDEX:
4300 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4302 case OP_STOREI2_MEMINDEX:
4303 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4305 case OP_STORE_MEMINDEX:
4306 case OP_STOREI4_MEMINDEX:
4307 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4310 g_assert_not_reached ();
4312 case OP_LOAD_MEMINDEX:
4313 case OP_LOADI4_MEMINDEX:
4314 case OP_LOADU4_MEMINDEX:
4315 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4317 case OP_LOADI1_MEMINDEX:
4318 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4320 case OP_LOADU1_MEMINDEX:
4321 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4323 case OP_LOADI2_MEMINDEX:
4324 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4326 case OP_LOADU2_MEMINDEX:
4327 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4329 case OP_LOAD_MEMBASE:
4330 case OP_LOADI4_MEMBASE:
4331 case OP_LOADU4_MEMBASE:
4332 /* this case is special, since it happens for spill code after lowering has been called */
4333 if (arm_is_imm12 (ins->inst_offset)) {
4334 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4336 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4337 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4340 case OP_LOADI1_MEMBASE:
4341 g_assert (arm_is_imm8 (ins->inst_offset));
4342 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4344 case OP_LOADU1_MEMBASE:
4345 g_assert (arm_is_imm12 (ins->inst_offset));
4346 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4348 case OP_LOADU2_MEMBASE:
4349 g_assert (arm_is_imm8 (ins->inst_offset));
4350 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4352 case OP_LOADI2_MEMBASE:
4353 g_assert (arm_is_imm8 (ins->inst_offset));
4354 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4356 case OP_ICONV_TO_I1:
4357 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4358 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4360 case OP_ICONV_TO_I2:
4361 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4362 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4364 case OP_ICONV_TO_U1:
4365 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4367 case OP_ICONV_TO_U2:
4368 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4369 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4373 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4375 case OP_COMPARE_IMM:
4376 case OP_ICOMPARE_IMM:
4377 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4378 g_assert (imm8 >= 0);
4379 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4383 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4384 * So instead of emitting a trap, we emit a call a C function and place a
4387 //*(int*)code = 0xef9f0001;
4390 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4391 (gpointer)"mono_break");
4392 code = emit_call_seq (cfg, code);
4394 case OP_RELAXED_NOP:
4399 case OP_DUMMY_STORE:
4400 case OP_DUMMY_ICONST:
4401 case OP_DUMMY_R8CONST:
4402 case OP_NOT_REACHED:
4405 case OP_IL_SEQ_POINT:
4406 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4408 case OP_SEQ_POINT: {
4410 MonoInst *info_var = cfg->arch.seq_point_info_var;
4411 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4412 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4413 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4414 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4416 int dreg = ARMREG_LR;
4418 if (cfg->soft_breakpoints) {
4419 g_assert (!cfg->compile_aot);
4423 * For AOT, we use one got slot per method, which will point to a
4424 * SeqPointInfo structure, containing all the information required
4425 * by the code below.
4427 if (cfg->compile_aot) {
4428 g_assert (info_var);
4429 g_assert (info_var->opcode == OP_REGOFFSET);
4430 g_assert (arm_is_imm12 (info_var->inst_offset));
4433 if (!cfg->soft_breakpoints) {
4435 * Read from the single stepping trigger page. This will cause a
4436 * SIGSEGV when single stepping is enabled.
4437 * We do this _before_ the breakpoint, so single stepping after
4438 * a breakpoint is hit will step to the next IL offset.
4440 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4443 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4444 if (cfg->soft_breakpoints) {
4445 /* Load the address of the sequence point trigger variable. */
4448 g_assert (var->opcode == OP_REGOFFSET);
4449 g_assert (arm_is_imm12 (var->inst_offset));
4450 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4452 /* Read the value and check whether it is non-zero. */
4453 ARM_LDR_IMM (code, dreg, dreg, 0);
4454 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4456 /* Load the address of the sequence point method. */
4457 var = ss_method_var;
4459 g_assert (var->opcode == OP_REGOFFSET);
4460 g_assert (arm_is_imm12 (var->inst_offset));
4461 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4463 /* Call it conditionally. */
4464 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4466 if (cfg->compile_aot) {
4467 /* Load the trigger page addr from the variable initialized in the prolog */
4468 var = ss_trigger_page_var;
4470 g_assert (var->opcode == OP_REGOFFSET);
4471 g_assert (arm_is_imm12 (var->inst_offset));
4472 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4474 #ifdef USE_JUMP_TABLES
4475 gpointer *jte = mono_jumptable_add_entry ();
4476 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4477 jte [0] = ss_trigger_page;
4479 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4481 *(int*)code = (int)ss_trigger_page;
4485 ARM_LDR_IMM (code, dreg, dreg, 0);
4489 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4491 if (cfg->soft_breakpoints) {
4492 /* Load the address of the breakpoint method into ip. */
4493 var = bp_method_var;
4495 g_assert (var->opcode == OP_REGOFFSET);
4496 g_assert (arm_is_imm12 (var->inst_offset));
4497 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4500 * A placeholder for a possible breakpoint inserted by
4501 * mono_arch_set_breakpoint ().
4504 } else if (cfg->compile_aot) {
4505 guint32 offset = code - cfg->native_code;
4508 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4509 /* Add the offset */
4510 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4511 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4512 if (arm_is_imm12 ((int)val)) {
4513 ARM_LDR_IMM (code, dreg, dreg, val);
4515 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4517 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4519 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4520 g_assert (!(val & 0xFF000000));
4522 ARM_LDR_IMM (code, dreg, dreg, 0);
4524 /* What is faster, a branch or a load ? */
4525 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4526 /* The breakpoint instruction */
4527 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4530 * A placeholder for a possible breakpoint inserted by
4531 * mono_arch_set_breakpoint ().
4533 for (i = 0; i < 4; ++i)
4540 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4543 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4547 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4550 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4551 g_assert (imm8 >= 0);
4552 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4556 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4557 g_assert (imm8 >= 0);
4558 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4562 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4563 g_assert (imm8 >= 0);
4564 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4567 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4568 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4570 case OP_IADD_OVF_UN:
4571 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4572 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4575 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4576 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4578 case OP_ISUB_OVF_UN:
4579 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4580 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4582 case OP_ADD_OVF_CARRY:
4583 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4584 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4586 case OP_ADD_OVF_UN_CARRY:
4587 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4588 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4590 case OP_SUB_OVF_CARRY:
4591 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4592 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4594 case OP_SUB_OVF_UN_CARRY:
4595 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4596 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4600 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4603 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4604 g_assert (imm8 >= 0);
4605 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4608 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4612 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4616 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4617 g_assert (imm8 >= 0);
4618 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4622 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4623 g_assert (imm8 >= 0);
4624 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4626 case OP_ARM_RSBS_IMM:
4627 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4628 g_assert (imm8 >= 0);
4629 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4631 case OP_ARM_RSC_IMM:
4632 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4633 g_assert (imm8 >= 0);
4634 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4637 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4641 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4642 g_assert (imm8 >= 0);
4643 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4646 g_assert (v7s_supported);
4647 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4650 g_assert (v7s_supported);
4651 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4654 g_assert (v7s_supported);
4655 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4656 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4659 g_assert (v7s_supported);
4660 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4661 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4665 g_assert_not_reached ();
4667 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4671 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4672 g_assert (imm8 >= 0);
4673 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4676 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4680 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4681 g_assert (imm8 >= 0);
4682 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4685 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4690 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4691 else if (ins->dreg != ins->sreg1)
4692 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4695 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4700 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4701 else if (ins->dreg != ins->sreg1)
4702 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4705 case OP_ISHR_UN_IMM:
4707 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4708 else if (ins->dreg != ins->sreg1)
4709 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4712 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4715 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4718 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4721 if (ins->dreg == ins->sreg2)
4722 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4724 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4727 g_assert_not_reached ();
4730 /* FIXME: handle ovf/ sreg2 != dreg */
4731 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4732 /* FIXME: MUL doesn't set the C/O flags on ARM */
4734 case OP_IMUL_OVF_UN:
4735 /* FIXME: handle ovf/ sreg2 != dreg */
4736 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4737 /* FIXME: MUL doesn't set the C/O flags on ARM */
4740 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4743 /* Load the GOT offset */
4744 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4745 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4747 *(gpointer*)code = NULL;
4749 /* Load the value from the GOT */
4750 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4752 case OP_OBJC_GET_SELECTOR:
4753 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4754 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4756 *(gpointer*)code = NULL;
4758 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4760 case OP_ICONV_TO_I4:
4761 case OP_ICONV_TO_U4:
4763 if (ins->dreg != ins->sreg1)
4764 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4767 int saved = ins->sreg2;
4768 if (ins->sreg2 == ARM_LSW_REG) {
4769 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4772 if (ins->sreg1 != ARM_LSW_REG)
4773 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4774 if (saved != ARM_MSW_REG)
4775 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4780 ARM_CPYD (code, ins->dreg, ins->sreg1);
4782 case OP_FCONV_TO_R4:
4784 ARM_CVTD (code, ins->dreg, ins->sreg1);
4785 ARM_CVTS (code, ins->dreg, ins->dreg);
4790 * Keep in sync with mono_arch_emit_epilog
4792 g_assert (!cfg->method->save_lmf);
4794 code = emit_load_volatile_arguments (cfg, code);
4796 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4798 if (cfg->used_int_regs)
4799 ARM_POP (code, cfg->used_int_regs);
4800 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4802 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4804 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4805 if (cfg->compile_aot) {
4806 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4808 *(gpointer*)code = NULL;
4810 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4812 code = mono_arm_patchable_b (code, ARMCOND_AL);
4816 MonoCallInst *call = (MonoCallInst*)ins;
4819 * The stack looks like the following:
4820 * <caller argument area>
4823 * <callee argument area>
4824 * Need to copy the arguments from the callee argument area to
4825 * the caller argument area, and pop the frame.
4827 if (call->stack_usage) {
4828 int i, prev_sp_offset = 0;
4830 /* Compute size of saved registers restored below */
4832 prev_sp_offset = 2 * 4;
4834 prev_sp_offset = 1 * 4;
4835 for (i = 0; i < 16; ++i) {
4836 if (cfg->used_int_regs & (1 << i))
4837 prev_sp_offset += 4;
4840 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4842 /* Copy arguments on the stack to our argument area */
4843 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4844 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4845 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4850 * Keep in sync with mono_arch_emit_epilog
4852 g_assert (!cfg->method->save_lmf);
4854 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4856 if (cfg->used_int_regs)
4857 ARM_POP (code, cfg->used_int_regs);
4858 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4860 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4863 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4864 if (cfg->compile_aot) {
4865 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4867 *(gpointer*)code = NULL;
4869 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4871 code = mono_arm_patchable_b (code, ARMCOND_AL);
4876 /* ensure ins->sreg1 is not NULL */
4877 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4880 g_assert (cfg->sig_cookie < 128);
4881 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4882 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4891 call = (MonoCallInst*)ins;
4894 code = emit_float_args (cfg, call, code, &max_len, &offset);
4896 if (ins->flags & MONO_INST_HAS_METHOD)
4897 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4899 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4900 code = emit_call_seq (cfg, code);
4901 ins->flags |= MONO_INST_GC_CALLSITE;
4902 ins->backend.pc_offset = code - cfg->native_code;
4903 code = emit_move_return_value (cfg, ins, code);
4909 case OP_VOIDCALL_REG:
4912 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4914 code = emit_call_reg (code, ins->sreg1);
4915 ins->flags |= MONO_INST_GC_CALLSITE;
4916 ins->backend.pc_offset = code - cfg->native_code;
4917 code = emit_move_return_value (cfg, ins, code);
4919 case OP_FCALL_MEMBASE:
4920 case OP_LCALL_MEMBASE:
4921 case OP_VCALL_MEMBASE:
4922 case OP_VCALL2_MEMBASE:
4923 case OP_VOIDCALL_MEMBASE:
4924 case OP_CALL_MEMBASE: {
4925 gboolean imt_arg = FALSE;
4927 g_assert (ins->sreg1 != ARMREG_LR);
4928 call = (MonoCallInst*)ins;
4931 code = emit_float_args (cfg, call, code, &max_len, &offset);
4933 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4935 if (!arm_is_imm12 (ins->inst_offset))
4936 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4937 #ifdef USE_JUMP_TABLES
4943 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4945 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4947 if (!arm_is_imm12 (ins->inst_offset))
4948 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4950 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4953 * We can't embed the method in the code stream in PIC code, or
4955 * Instead, we put it in V5 in code emitted by
4956 * mono_arch_emit_imt_argument (), and embed NULL here to
4957 * signal the IMT thunk that the value is in V5.
4959 #ifdef USE_JUMP_TABLES
4960 /* In case of jumptables we always use value in V5. */
4963 if (call->dynamic_imt_arg)
4964 *((gpointer*)code) = NULL;
4966 *((gpointer*)code) = (gpointer)call->method;
4970 ins->flags |= MONO_INST_GC_CALLSITE;
4971 ins->backend.pc_offset = code - cfg->native_code;
4972 code = emit_move_return_value (cfg, ins, code);
4976 /* round the size to 8 bytes */
4977 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4978 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4979 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4980 /* memzero the area: dreg holds the size, sp is the pointer */
4981 if (ins->flags & MONO_INST_INIT) {
4982 guint8 *start_loop, *branch_to_cond;
4983 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4984 branch_to_cond = code;
4987 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4988 arm_patch (branch_to_cond, code);
4989 /* decrement by 4 and set flags */
4990 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4991 ARM_B_COND (code, ARMCOND_GE, 0);
4992 arm_patch (code - 4, start_loop);
4994 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
4995 if (cfg->param_area)
4996 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5001 MonoInst *var = cfg->dyn_call_var;
5003 g_assert (var->opcode == OP_REGOFFSET);
5004 g_assert (arm_is_imm12 (var->inst_offset));
5006 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5007 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
5009 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
5011 /* Save args buffer */
5012 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5014 /* Set stack slots using R0 as scratch reg */
5015 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5016 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5017 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5018 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5021 /* Set argument registers */
5022 for (i = 0; i < PARAM_REGS; ++i)
5023 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5026 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5027 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5030 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5031 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5032 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5036 if (ins->sreg1 != ARMREG_R0)
5037 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5038 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5039 (gpointer)"mono_arch_throw_exception");
5040 code = emit_call_seq (cfg, code);
5044 if (ins->sreg1 != ARMREG_R0)
5045 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5046 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5047 (gpointer)"mono_arch_rethrow_exception");
5048 code = emit_call_seq (cfg, code);
5051 case OP_START_HANDLER: {
5052 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5055 /* Reserve a param area, see filter-stack.exe */
5056 if (cfg->param_area) {
5057 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5058 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5060 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5061 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5065 if (arm_is_imm12 (spvar->inst_offset)) {
5066 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5068 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5069 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5073 case OP_ENDFILTER: {
5074 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5077 /* Free the param area */
5078 if (cfg->param_area) {
5079 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5080 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5082 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5083 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5087 if (ins->sreg1 != ARMREG_R0)
5088 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5089 if (arm_is_imm12 (spvar->inst_offset)) {
5090 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5092 g_assert (ARMREG_IP != spvar->inst_basereg);
5093 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5094 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5096 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5099 case OP_ENDFINALLY: {
5100 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5103 /* Free the param area */
5104 if (cfg->param_area) {
5105 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5106 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5108 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5109 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5113 if (arm_is_imm12 (spvar->inst_offset)) {
5114 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5116 g_assert (ARMREG_IP != spvar->inst_basereg);
5117 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5118 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5120 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5123 case OP_CALL_HANDLER:
5124 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5125 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5126 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5129 ins->inst_c0 = code - cfg->native_code;
5132 /*if (ins->inst_target_bb->native_offset) {
5134 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5136 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5137 code = mono_arm_patchable_b (code, ARMCOND_AL);
5141 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5145 * In the normal case we have:
5146 * ldr pc, [pc, ins->sreg1 << 2]
5149 * ldr lr, [pc, ins->sreg1 << 2]
5151 * After follows the data.
5152 * FIXME: add aot support.
5154 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5155 #ifdef USE_JUMP_TABLES
5157 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5158 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5159 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5163 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5164 if (offset + max_len > (cfg->code_size - 16)) {
5165 cfg->code_size += max_len;
5166 cfg->code_size *= 2;
5167 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5168 code = cfg->native_code + offset;
5170 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5172 code += 4 * GPOINTER_TO_INT (ins->klass);
5177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5178 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5182 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5183 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5187 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5192 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5193 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5197 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5198 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5201 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5202 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5205 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5206 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5209 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5210 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5214 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5215 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5217 case OP_COND_EXC_EQ:
5218 case OP_COND_EXC_NE_UN:
5219 case OP_COND_EXC_LT:
5220 case OP_COND_EXC_LT_UN:
5221 case OP_COND_EXC_GT:
5222 case OP_COND_EXC_GT_UN:
5223 case OP_COND_EXC_GE:
5224 case OP_COND_EXC_GE_UN:
5225 case OP_COND_EXC_LE:
5226 case OP_COND_EXC_LE_UN:
5227 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5229 case OP_COND_EXC_IEQ:
5230 case OP_COND_EXC_INE_UN:
5231 case OP_COND_EXC_ILT:
5232 case OP_COND_EXC_ILT_UN:
5233 case OP_COND_EXC_IGT:
5234 case OP_COND_EXC_IGT_UN:
5235 case OP_COND_EXC_IGE:
5236 case OP_COND_EXC_IGE_UN:
5237 case OP_COND_EXC_ILE:
5238 case OP_COND_EXC_ILE_UN:
5239 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5242 case OP_COND_EXC_IC:
5243 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5245 case OP_COND_EXC_OV:
5246 case OP_COND_EXC_IOV:
5247 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5249 case OP_COND_EXC_NC:
5250 case OP_COND_EXC_INC:
5251 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5253 case OP_COND_EXC_NO:
5254 case OP_COND_EXC_INO:
5255 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5267 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5270 /* floating point opcodes */
5272 if (cfg->compile_aot) {
5273 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5275 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5277 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5280 /* FIXME: we can optimize the imm load by dealing with part of
5281 * the displacement in LDFD (aligning to 512).
5283 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5284 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5288 if (cfg->compile_aot) {
5289 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5291 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5293 ARM_CVTS (code, ins->dreg, ins->dreg);
5295 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5296 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5297 ARM_CVTS (code, ins->dreg, ins->dreg);
5300 case OP_STORER8_MEMBASE_REG:
5301 /* This is generated by the local regalloc pass which runs after the lowering pass */
5302 if (!arm_is_fpimm8 (ins->inst_offset)) {
5303 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5304 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5305 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5307 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5310 case OP_LOADR8_MEMBASE:
5311 /* This is generated by the local regalloc pass which runs after the lowering pass */
5312 if (!arm_is_fpimm8 (ins->inst_offset)) {
5313 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5314 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5315 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5317 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5320 case OP_STORER4_MEMBASE_REG:
5321 g_assert (arm_is_fpimm8 (ins->inst_offset));
5322 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5323 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5324 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5325 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5327 case OP_LOADR4_MEMBASE:
5328 g_assert (arm_is_fpimm8 (ins->inst_offset));
5329 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5330 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5331 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5332 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5334 case OP_ICONV_TO_R_UN: {
5335 g_assert_not_reached ();
5338 case OP_ICONV_TO_R4:
5339 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5340 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5341 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5342 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5343 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5345 case OP_ICONV_TO_R8:
5346 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5347 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5348 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5349 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5353 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5354 if (sig_ret->type == MONO_TYPE_R4) {
5355 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5357 if (!IS_HARD_FLOAT) {
5358 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5361 if (IS_HARD_FLOAT) {
5362 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5364 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5369 case OP_FCONV_TO_I1:
5370 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5372 case OP_FCONV_TO_U1:
5373 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5375 case OP_FCONV_TO_I2:
5376 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5378 case OP_FCONV_TO_U2:
5379 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5381 case OP_FCONV_TO_I4:
5383 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5385 case OP_FCONV_TO_U4:
5387 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5389 case OP_FCONV_TO_I8:
5390 case OP_FCONV_TO_U8:
5391 g_assert_not_reached ();
5392 /* Implemented as helper calls */
5394 case OP_LCONV_TO_R_UN:
5395 g_assert_not_reached ();
5396 /* Implemented as helper calls */
5398 case OP_LCONV_TO_OVF_I4_2: {
5399 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5401 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5404 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5405 high_bit_not_set = code;
5406 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5408 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5409 valid_negative = code;
5410 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5411 invalid_negative = code;
5412 ARM_B_COND (code, ARMCOND_AL, 0);
5414 arm_patch (high_bit_not_set, code);
5416 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5417 valid_positive = code;
5418 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5420 arm_patch (invalid_negative, code);
5421 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5423 arm_patch (valid_negative, code);
5424 arm_patch (valid_positive, code);
5426 if (ins->dreg != ins->sreg1)
5427 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5431 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5434 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5437 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5440 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5443 ARM_NEGD (code, ins->dreg, ins->sreg1);
5447 g_assert_not_reached ();
5451 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5457 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5460 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5461 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5465 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5468 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5469 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5473 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5476 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5477 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5482 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5485 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5486 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5490 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5493 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5494 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5495 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5499 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5502 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5503 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5507 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5510 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5511 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5515 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5518 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5519 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5522 /* ARM FPA flags table:
5523 * N Less than ARMCOND_MI
5524 * Z Equal ARMCOND_EQ
5525 * C Greater Than or Equal ARMCOND_CS
5526 * V Unordered ARMCOND_VS
5529 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5532 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5535 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5538 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5539 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5545 g_assert_not_reached ();
5549 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5551 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5552 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5553 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5557 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5558 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5563 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5564 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5566 #ifdef USE_JUMP_TABLES
5568 gpointer *jte = mono_jumptable_add_entries (2);
5569 jte [0] = GUINT_TO_POINTER (0xffffffff);
5570 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5571 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5572 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5575 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5576 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5578 *(guint32*)code = 0xffffffff;
5580 *(guint32*)code = 0x7fefffff;
5583 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5585 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5586 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5588 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5589 ARM_CPYD (code, ins->dreg, ins->sreg1);
5591 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5592 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5597 case OP_GC_LIVENESS_DEF:
5598 case OP_GC_LIVENESS_USE:
5599 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5600 ins->backend.pc_offset = code - cfg->native_code;
5602 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5603 ins->backend.pc_offset = code - cfg->native_code;
5604 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5608 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5609 g_assert_not_reached ();
5612 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5613 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5614 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5615 g_assert_not_reached ();
5621 last_offset = offset;
5624 cfg->code_len = code - cfg->native_code;
5627 #endif /* DISABLE_JIT */
5629 #ifdef HAVE_AEABI_READ_TP
5630 void __aeabi_read_tp (void);
5634 mono_arch_register_lowlevel_calls (void)
5636 /* The signature doesn't matter */
5637 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5638 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5640 #ifndef MONO_CROSS_COMPILE
5641 #ifdef HAVE_AEABI_READ_TP
5642 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5647 #define patch_lis_ori(ip,val) do {\
5648 guint16 *__lis_ori = (guint16*)(ip); \
5649 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5650 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5654 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5656 MonoJumpInfo *patch_info;
5657 gboolean compile_aot = !run_cctors;
5659 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5660 unsigned char *ip = patch_info->ip.i + code;
5661 const unsigned char *target;
5663 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5664 #ifdef USE_JUMP_TABLES
5665 gpointer *jt = mono_jumptable_get_entry (ip);
5667 gpointer *jt = (gpointer*)(ip + 8);
5670 /* jt is the inlined jump table, 2 instructions after ip
5671 * In the normal case we store the absolute addresses,
5672 * otherwise the displacements.
5674 for (i = 0; i < patch_info->data.table->table_size; i++)
5675 jt [i] = code + (int)patch_info->data.table->table [i];
5680 switch (patch_info->type) {
5681 case MONO_PATCH_INFO_BB:
5682 case MONO_PATCH_INFO_LABEL:
5685 /* No need to patch these */
5690 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5692 switch (patch_info->type) {
5693 case MONO_PATCH_INFO_IP:
5694 g_assert_not_reached ();
5695 patch_lis_ori (ip, ip);
5697 case MONO_PATCH_INFO_METHOD_REL:
5698 g_assert_not_reached ();
5699 *((gpointer *)(ip)) = code + patch_info->data.offset;
5701 case MONO_PATCH_INFO_METHODCONST:
5702 case MONO_PATCH_INFO_CLASS:
5703 case MONO_PATCH_INFO_IMAGE:
5704 case MONO_PATCH_INFO_FIELD:
5705 case MONO_PATCH_INFO_VTABLE:
5706 case MONO_PATCH_INFO_IID:
5707 case MONO_PATCH_INFO_SFLDA:
5708 case MONO_PATCH_INFO_LDSTR:
5709 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5710 case MONO_PATCH_INFO_LDTOKEN:
5711 g_assert_not_reached ();
5712 /* from OP_AOTCONST : lis + ori */
5713 patch_lis_ori (ip, target);
5715 case MONO_PATCH_INFO_R4:
5716 case MONO_PATCH_INFO_R8:
5717 g_assert_not_reached ();
5718 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5720 case MONO_PATCH_INFO_EXC_NAME:
5721 g_assert_not_reached ();
5722 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5724 case MONO_PATCH_INFO_NONE:
5725 case MONO_PATCH_INFO_BB_OVF:
5726 case MONO_PATCH_INFO_EXC_OVF:
5727 /* everything is dealt with at epilog output time */
5732 arm_patch_general (domain, ip, target, dyn_code_mp);
5739 * Stack frame layout:
5741 * ------------------- fp
5742 * MonoLMF structure or saved registers
5743 * -------------------
5745 * -------------------
5747 * -------------------
5748 * optional 8 bytes for tracing
5749 * -------------------
5750 * param area size is cfg->param_area
5751 * ------------------- sp
5754 mono_arch_emit_prolog (MonoCompile *cfg)
5756 MonoMethod *method = cfg->method;
5758 MonoMethodSignature *sig;
5760 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5765 int prev_sp_offset, reg_offset;
5767 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5770 sig = mono_method_signature (method);
5771 cfg->code_size = 256 + sig->param_count * 64;
5772 code = cfg->native_code = g_malloc (cfg->code_size);
5774 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5776 alloc_size = cfg->stack_offset;
5782 * The iphone uses R7 as the frame pointer, and it points at the saved
5787 * We can't use r7 as a frame pointer since it points into the middle of
5788 * the frame, so we keep using our own frame pointer.
5789 * FIXME: Optimize this.
5791 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5792 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5793 prev_sp_offset += 8; /* r7 and lr */
5794 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5795 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5798 if (!method->save_lmf) {
5800 /* No need to push LR again */
5801 if (cfg->used_int_regs)
5802 ARM_PUSH (code, cfg->used_int_regs);
5804 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5805 prev_sp_offset += 4;
5807 for (i = 0; i < 16; ++i) {
5808 if (cfg->used_int_regs & (1 << i))
5809 prev_sp_offset += 4;
5811 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5813 for (i = 0; i < 16; ++i) {
5814 if ((cfg->used_int_regs & (1 << i))) {
5815 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5816 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5821 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5822 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5824 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5825 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5828 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5829 ARM_PUSH (code, 0x5ff0);
5830 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5831 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5833 for (i = 0; i < 16; ++i) {
5834 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5835 /* The original r7 is saved at the start */
5836 if (!(iphone_abi && i == ARMREG_R7))
5837 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5841 g_assert (reg_offset == 4 * 10);
5842 pos += sizeof (MonoLMF) - (4 * 10);
5846 orig_alloc_size = alloc_size;
5847 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5848 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5849 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5850 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5853 /* the stack used in the pushed regs */
5854 if (prev_sp_offset & 4)
5856 cfg->stack_usage = alloc_size;
5858 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5859 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5861 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5862 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5864 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5866 if (cfg->frame_reg != ARMREG_SP) {
5867 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5868 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5870 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5871 prev_sp_offset += alloc_size;
5873 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5874 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5876 /* compute max_offset in order to use short forward jumps
5877 * we could skip do it on arm because the immediate displacement
5878 * for jumps is large enough, it may be useful later for constant pools
5881 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5882 MonoInst *ins = bb->code;
5883 bb->max_offset = max_offset;
5885 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5888 MONO_BB_FOR_EACH_INS (bb, ins)
5889 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5892 /* store runtime generic context */
5893 if (cfg->rgctx_var) {
5894 MonoInst *ins = cfg->rgctx_var;
5896 g_assert (ins->opcode == OP_REGOFFSET);
5898 if (arm_is_imm12 (ins->inst_offset)) {
5899 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5901 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5902 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5906 /* load arguments allocated to register from the stack */
5909 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5911 if (cinfo->vtype_retaddr) {
5912 ArgInfo *ainfo = &cinfo->ret;
5913 inst = cfg->vret_addr;
5914 g_assert (arm_is_imm12 (inst->inst_offset));
5915 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5918 if (sig->call_convention == MONO_CALL_VARARG) {
5919 ArgInfo *cookie = &cinfo->sig_cookie;
5921 /* Save the sig cookie address */
5922 g_assert (cookie->storage == RegTypeBase);
5924 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5925 g_assert (arm_is_imm12 (cfg->sig_cookie));
5926 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5927 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5930 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5931 ArgInfo *ainfo = cinfo->args + i;
5932 inst = cfg->args [pos];
5934 if (cfg->verbose_level > 2)
5935 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5936 if (inst->opcode == OP_REGVAR) {
5937 if (ainfo->storage == RegTypeGeneral)
5938 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5939 else if (ainfo->storage == RegTypeFP) {
5940 g_assert_not_reached ();
5941 } else if (ainfo->storage == RegTypeBase) {
5942 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5943 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5945 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5946 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5949 g_assert_not_reached ();
5951 if (cfg->verbose_level > 2)
5952 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5954 /* the argument should be put on the stack: FIXME handle size != word */
5955 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5956 switch (ainfo->size) {
5958 if (arm_is_imm12 (inst->inst_offset))
5959 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5961 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5962 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5966 if (arm_is_imm8 (inst->inst_offset)) {
5967 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5969 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5970 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5974 if (arm_is_imm12 (inst->inst_offset)) {
5975 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5977 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5978 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5980 if (arm_is_imm12 (inst->inst_offset + 4)) {
5981 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5984 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5988 if (arm_is_imm12 (inst->inst_offset)) {
5989 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5991 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5992 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5996 } else if (ainfo->storage == RegTypeBaseGen) {
5997 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5998 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6000 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6001 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6003 if (arm_is_imm12 (inst->inst_offset + 4)) {
6004 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6005 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6007 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6008 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6009 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6010 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6012 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
6013 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6014 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6016 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6017 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6020 switch (ainfo->size) {
6022 if (arm_is_imm8 (inst->inst_offset)) {
6023 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6025 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6026 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6030 if (arm_is_imm8 (inst->inst_offset)) {
6031 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6033 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6034 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6038 if (arm_is_imm12 (inst->inst_offset)) {
6039 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6041 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6042 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6044 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6045 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6047 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6048 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6050 if (arm_is_imm12 (inst->inst_offset + 4)) {
6051 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6053 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6054 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6058 if (arm_is_imm12 (inst->inst_offset)) {
6059 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6061 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6062 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6066 } else if (ainfo->storage == RegTypeFP) {
6067 int imm8, rot_amount;
6069 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6070 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6071 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6073 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6075 if (ainfo->size == 8)
6076 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6078 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6079 } else if (ainfo->storage == RegTypeStructByVal) {
6080 int doffset = inst->inst_offset;
6084 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6085 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6086 if (arm_is_imm12 (doffset)) {
6087 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6089 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6090 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6092 soffset += sizeof (gpointer);
6093 doffset += sizeof (gpointer);
6095 if (ainfo->vtsize) {
6096 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6097 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6098 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6100 } else if (ainfo->storage == RegTypeStructByAddr) {
6101 g_assert_not_reached ();
6102 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6103 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6105 g_assert_not_reached ();
6110 if (method->save_lmf)
6111 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6114 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6116 if (cfg->arch.seq_point_info_var) {
6117 MonoInst *ins = cfg->arch.seq_point_info_var;
6119 /* Initialize the variable from a GOT slot */
6120 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6121 #ifdef USE_JUMP_TABLES
6123 gpointer *jte = mono_jumptable_add_entry ();
6124 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6125 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6127 /** XXX: is it correct? */
6129 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6131 *(gpointer*)code = NULL;
6134 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6136 g_assert (ins->opcode == OP_REGOFFSET);
6138 if (arm_is_imm12 (ins->inst_offset)) {
6139 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6141 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6142 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6146 /* Initialize ss_trigger_page_var */
6147 if (!cfg->soft_breakpoints) {
6148 MonoInst *info_var = cfg->arch.seq_point_info_var;
6149 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6150 int dreg = ARMREG_LR;
6153 g_assert (info_var->opcode == OP_REGOFFSET);
6154 g_assert (arm_is_imm12 (info_var->inst_offset));
6156 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6157 /* Load the trigger page addr */
6158 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6159 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6163 if (cfg->arch.seq_point_read_var) {
6164 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6165 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6166 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6167 #ifdef USE_JUMP_TABLES
6170 g_assert (read_ins->opcode == OP_REGOFFSET);
6171 g_assert (arm_is_imm12 (read_ins->inst_offset));
6172 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6173 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6174 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6175 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6177 #ifdef USE_JUMP_TABLES
6178 jte = mono_jumptable_add_entries (3);
6179 jte [0] = (gpointer)&ss_trigger_var;
6180 jte [1] = single_step_func_wrapper;
6181 jte [2] = breakpoint_func_wrapper;
6182 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6184 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6186 *(volatile int **)code = &ss_trigger_var;
6188 *(gpointer*)code = single_step_func_wrapper;
6190 *(gpointer*)code = breakpoint_func_wrapper;
6194 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6195 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6196 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6197 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6198 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6199 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6202 cfg->code_len = code - cfg->native_code;
6203 g_assert (cfg->code_len < cfg->code_size);
6210 mono_arch_emit_epilog (MonoCompile *cfg)
6212 MonoMethod *method = cfg->method;
6213 int pos, i, rot_amount;
6214 int max_epilog_size = 16 + 20*4;
6218 if (cfg->method->save_lmf)
6219 max_epilog_size += 128;
6221 if (mono_jit_trace_calls != NULL)
6222 max_epilog_size += 50;
6224 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6225 max_epilog_size += 50;
6227 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6228 cfg->code_size *= 2;
6229 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6230 cfg->stat_code_reallocs++;
6234 * Keep in sync with OP_JMP
6236 code = cfg->native_code + cfg->code_len;
6238 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6239 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6243 /* Load returned vtypes into registers if needed */
6244 cinfo = cfg->arch.cinfo;
6245 if (cinfo->ret.storage == RegTypeStructByVal) {
6246 MonoInst *ins = cfg->ret;
6248 if (arm_is_imm12 (ins->inst_offset)) {
6249 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6251 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6252 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6256 if (method->save_lmf) {
6257 int lmf_offset, reg, sp_adj, regmask;
6258 /* all but r0-r3, sp and pc */
6259 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6262 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6264 /* This points to r4 inside MonoLMF->iregs */
6265 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6267 regmask = 0x9ff0; /* restore lr to pc */
6268 /* Skip caller saved registers not used by the method */
6269 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6270 regmask &= ~(1 << reg);
6275 /* Restored later */
6276 regmask &= ~(1 << ARMREG_PC);
6277 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6278 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6280 ARM_POP (code, regmask);
6282 /* Restore saved r7, restore LR to PC */
6283 /* Skip lr from the lmf */
6284 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6285 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6288 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6289 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6291 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6292 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6296 /* Restore saved gregs */
6297 if (cfg->used_int_regs)
6298 ARM_POP (code, cfg->used_int_regs);
6299 /* Restore saved r7, restore LR to PC */
6300 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6302 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6306 cfg->code_len = code - cfg->native_code;
6308 g_assert (cfg->code_len < cfg->code_size);
6313 mono_arch_emit_exceptions (MonoCompile *cfg)
6315 MonoJumpInfo *patch_info;
6318 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6319 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6320 int max_epilog_size = 50;
6322 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6323 exc_throw_pos [i] = NULL;
6324 exc_throw_found [i] = 0;
6327 /* count the number of exception infos */
6330 * make sure we have enough space for exceptions
6332 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6333 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6334 i = mini_exception_id_by_name (patch_info->data.target);
6335 if (!exc_throw_found [i]) {
6336 max_epilog_size += 32;
6337 exc_throw_found [i] = TRUE;
6342 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6343 cfg->code_size *= 2;
6344 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6345 cfg->stat_code_reallocs++;
6348 code = cfg->native_code + cfg->code_len;
6350 /* add code to raise exceptions */
6351 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6352 switch (patch_info->type) {
6353 case MONO_PATCH_INFO_EXC: {
6354 MonoClass *exc_class;
6355 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6357 i = mini_exception_id_by_name (patch_info->data.target);
6358 if (exc_throw_pos [i]) {
6359 arm_patch (ip, exc_throw_pos [i]);
6360 patch_info->type = MONO_PATCH_INFO_NONE;
6363 exc_throw_pos [i] = code;
6365 arm_patch (ip, code);
6367 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6368 g_assert (exc_class);
6370 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6371 #ifdef USE_JUMP_TABLES
6373 gpointer *jte = mono_jumptable_add_entries (2);
6374 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6375 patch_info->data.name = "mono_arch_throw_corlib_exception";
6376 patch_info->ip.i = code - cfg->native_code;
6377 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6378 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6379 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6380 ARM_BLX_REG (code, ARMREG_IP);
6381 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6384 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6385 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6386 patch_info->data.name = "mono_arch_throw_corlib_exception";
6387 patch_info->ip.i = code - cfg->native_code;
6389 *(guint32*)(gpointer)code = exc_class->type_token;
6400 cfg->code_len = code - cfg->native_code;
6402 g_assert (cfg->code_len < cfg->code_size);
6406 #endif /* #ifndef DISABLE_JIT */
6409 mono_arch_finish_init (void)
6414 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6419 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6426 mono_arch_print_tree (MonoInst *tree, int arity)
6436 mono_arch_get_patch_offset (guint8 *code)
6443 mono_arch_flush_register_windows (void)
6450 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6452 int method_reg = mono_alloc_ireg (cfg);
6453 #ifdef USE_JUMP_TABLES
6454 int use_jumptables = TRUE;
6456 int use_jumptables = FALSE;
6459 if (cfg->compile_aot) {
6462 call->dynamic_imt_arg = TRUE;
6465 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6467 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6468 ins->dreg = method_reg;
6469 ins->inst_p0 = call->method;
6470 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6471 MONO_ADD_INS (cfg->cbb, ins);
6473 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6474 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6475 /* Always pass in a register for simplicity */
6476 call->dynamic_imt_arg = TRUE;
6478 cfg->uses_rgctx_reg = TRUE;
6481 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6485 MONO_INST_NEW (cfg, ins, OP_PCONST);
6486 ins->inst_p0 = call->method;
6487 ins->dreg = method_reg;
6488 MONO_ADD_INS (cfg->cbb, ins);
6491 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6495 #endif /* DISABLE_JIT */
6498 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6500 #ifdef USE_JUMP_TABLES
6501 return (MonoMethod*)regs [ARMREG_V5];
6504 guint32 *code_ptr = (guint32*)code;
6506 method = GUINT_TO_POINTER (code_ptr [1]);
6510 return (MonoMethod*)regs [ARMREG_V5];
6512 /* The IMT value is stored in the code stream right after the LDC instruction. */
6513 /* This is no longer true for the gsharedvt_in trampoline */
6515 if (!IS_LDR_PC (code_ptr [0])) {
6516 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6517 g_assert (IS_LDR_PC (code_ptr [0]));
6521 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6522 return (MonoMethod*)regs [ARMREG_V5];
6524 return (MonoMethod*) method;
6529 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6531 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6534 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6535 #define BASE_SIZE (6 * 4)
6536 #define BSEARCH_ENTRY_SIZE (4 * 4)
6537 #define CMP_SIZE (3 * 4)
6538 #define BRANCH_SIZE (1 * 4)
6539 #define CALL_SIZE (2 * 4)
6540 #define WMC_SIZE (8 * 4)
6541 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6543 #ifdef USE_JUMP_TABLES
6545 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6547 g_assert (base [index] == NULL);
6548 base [index] = value;
6551 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6553 if (arm_is_imm12 (jti * 4)) {
6554 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6556 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6557 if ((jti * 4) >> 16)
6558 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6559 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6565 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6567 guint32 delta = DISTANCE (target, code);
6569 g_assert (delta >= 0 && delta <= 0xFFF);
6570 *target = *target | delta;
6576 #ifdef ENABLE_WRONG_METHOD_CHECK
6578 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6580 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6586 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6587 gpointer fail_tramp)
6590 arminstr_t *code, *start;
6591 #ifdef USE_JUMP_TABLES
6594 gboolean large_offsets = FALSE;
6595 guint32 **constant_pool_starts;
6596 arminstr_t *vtable_target = NULL;
6597 int extra_space = 0;
6599 #ifdef ENABLE_WRONG_METHOD_CHECK
6604 #ifdef USE_JUMP_TABLES
6605 for (i = 0; i < count; ++i) {
6606 MonoIMTCheckItem *item = imt_entries [i];
6607 item->chunk_size += 4 * 16;
6608 if (!item->is_equals)
6609 imt_entries [item->check_target_idx]->compare_done = TRUE;
6610 size += item->chunk_size;
6613 constant_pool_starts = g_new0 (guint32*, count);
6615 for (i = 0; i < count; ++i) {
6616 MonoIMTCheckItem *item = imt_entries [i];
6617 if (item->is_equals) {
6618 gboolean fail_case = !item->check_target_idx && fail_tramp;
6620 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6621 item->chunk_size += 32;
6622 large_offsets = TRUE;
6625 if (item->check_target_idx || fail_case) {
6626 if (!item->compare_done || fail_case)
6627 item->chunk_size += CMP_SIZE;
6628 item->chunk_size += BRANCH_SIZE;
6630 #ifdef ENABLE_WRONG_METHOD_CHECK
6631 item->chunk_size += WMC_SIZE;
6635 item->chunk_size += 16;
6636 large_offsets = TRUE;
6638 item->chunk_size += CALL_SIZE;
6640 item->chunk_size += BSEARCH_ENTRY_SIZE;
6641 imt_entries [item->check_target_idx]->compare_done = TRUE;
6643 size += item->chunk_size;
6647 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6651 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6653 code = mono_domain_code_reserve (domain, size);
6657 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6658 for (i = 0; i < count; ++i) {
6659 MonoIMTCheckItem *item = imt_entries [i];
6660 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6664 #ifdef USE_JUMP_TABLES
6665 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6666 /* If jumptables we always pass the IMT method in R5 */
6667 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6668 #define VTABLE_JTI 0
6669 #define IMT_METHOD_OFFSET 0
6670 #define TARGET_CODE_OFFSET 1
6671 #define JUMP_CODE_OFFSET 2
6672 #define RECORDS_PER_ENTRY 3
6673 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6674 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6675 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6677 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6678 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6679 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6680 set_jumptable_element (jte, VTABLE_JTI, vtable);
6683 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6685 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6686 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6687 vtable_target = code;
6688 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6690 if (mono_use_llvm) {
6691 /* LLVM always passes the IMT method in R5 */
6692 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6694 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6695 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6696 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6700 for (i = 0; i < count; ++i) {
6701 MonoIMTCheckItem *item = imt_entries [i];
6702 #ifdef USE_JUMP_TABLES
6703 guint32 imt_method_jti = 0, target_code_jti = 0;
6705 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6707 gint32 vtable_offset;
6709 item->code_target = (guint8*)code;
6711 if (item->is_equals) {
6712 gboolean fail_case = !item->check_target_idx && fail_tramp;
6714 if (item->check_target_idx || fail_case) {
6715 if (!item->compare_done || fail_case) {
6716 #ifdef USE_JUMP_TABLES
6717 imt_method_jti = IMT_METHOD_JTI (i);
6718 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6721 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6723 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6725 #ifdef USE_JUMP_TABLES
6726 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6727 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6728 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6730 item->jmp_code = (guint8*)code;
6731 ARM_B_COND (code, ARMCOND_NE, 0);
6734 /*Enable the commented code to assert on wrong method*/
6735 #ifdef ENABLE_WRONG_METHOD_CHECK
6736 #ifdef USE_JUMP_TABLES
6737 imt_method_jti = IMT_METHOD_JTI (i);
6738 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6741 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6743 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6745 ARM_B_COND (code, ARMCOND_EQ, 0);
6747 /* Define this if your system is so bad that gdb is failing. */
6748 #ifdef BROKEN_DEV_ENV
6749 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6751 arm_patch (code - 1, mini_dump_bad_imt);
6755 arm_patch (cond, code);
6759 if (item->has_target_code) {
6760 /* Load target address */
6761 #ifdef USE_JUMP_TABLES
6762 target_code_jti = TARGET_CODE_JTI (i);
6763 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6764 /* Restore registers */
6765 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6767 ARM_BX (code, ARMREG_R1);
6768 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6770 target_code_ins = code;
6771 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6772 /* Save it to the fourth slot */
6773 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6774 /* Restore registers and branch */
6775 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6777 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6780 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6781 if (!arm_is_imm12 (vtable_offset)) {
6783 * We need to branch to a computed address but we don't have
6784 * a free register to store it, since IP must contain the
6785 * vtable address. So we push the two values to the stack, and
6786 * load them both using LDM.
6788 /* Compute target address */
6789 #ifdef USE_JUMP_TABLES
6790 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6791 if (vtable_offset >> 16)
6792 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6793 /* IP had vtable base. */
6794 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6795 /* Restore registers and branch */
6796 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6797 ARM_BX (code, ARMREG_IP);
6799 vtable_offset_ins = code;
6800 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6801 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6802 /* Save it to the fourth slot */
6803 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6804 /* Restore registers and branch */
6805 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6807 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6810 #ifdef USE_JUMP_TABLES
6811 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6812 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6813 ARM_BX (code, ARMREG_IP);
6815 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6817 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6818 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6824 #ifdef USE_JUMP_TABLES
6825 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6826 target_code_jti = TARGET_CODE_JTI (i);
6827 /* Load target address */
6828 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6829 /* Restore registers */
6830 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6832 ARM_BX (code, ARMREG_R1);
6833 set_jumptable_element (jte, target_code_jti, fail_tramp);
6835 arm_patch (item->jmp_code, (guchar*)code);
6837 target_code_ins = code;
6838 /* Load target address */
6839 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6840 /* Save it to the fourth slot */
6841 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6842 /* Restore registers and branch */
6843 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6845 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6847 item->jmp_code = NULL;
6850 #ifdef USE_JUMP_TABLES
6852 set_jumptable_element (jte, imt_method_jti, item->key);
6855 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6857 /*must emit after unconditional branch*/
6858 if (vtable_target) {
6859 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6860 item->chunk_size += 4;
6861 vtable_target = NULL;
6864 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6865 constant_pool_starts [i] = code;
6867 code += extra_space;
6872 #ifdef USE_JUMP_TABLES
6873 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6874 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6875 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6876 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6877 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6879 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6880 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6882 item->jmp_code = (guint8*)code;
6883 ARM_B_COND (code, ARMCOND_HS, 0);
6889 for (i = 0; i < count; ++i) {
6890 MonoIMTCheckItem *item = imt_entries [i];
6891 if (item->jmp_code) {
6892 if (item->check_target_idx)
6893 #ifdef USE_JUMP_TABLES
6894 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6896 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6899 if (i > 0 && item->is_equals) {
6901 #ifdef USE_JUMP_TABLES
6902 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6903 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6905 arminstr_t *space_start = constant_pool_starts [i];
6906 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6907 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6915 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6916 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6921 #ifndef USE_JUMP_TABLES
6922 g_free (constant_pool_starts);
6925 mono_arch_flush_icache ((guint8*)start, size);
6926 mono_stats.imt_thunks_size += code - start;
6928 g_assert (DISTANCE (start, code) <= size);
6933 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6935 return ctx->regs [reg];
6939 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6941 ctx->regs [reg] = val;
6945 * mono_arch_get_trampolines:
6947 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6951 mono_arch_get_trampolines (gboolean aot)
6953 return mono_arm_get_exception_trampolines (aot);
6957 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
6964 bp = MONO_CONTEXT_GET_BP (ctx);
6965 lr_loc = (gpointer*)(bp + clause->exvar_offset);
6967 old_value = *lr_loc;
6968 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
6971 *lr_loc = new_value;
6976 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6978 * mono_arch_set_breakpoint:
6980 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6981 * The location should contain code emitted by OP_SEQ_POINT.
6984 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6987 guint32 native_offset = ip - (guint8*)ji->code_start;
6988 MonoDebugOptions *opt = mini_get_debug_options ();
6990 if (opt->soft_breakpoints) {
6991 g_assert (!ji->from_aot);
6993 ARM_BLX_REG (code, ARMREG_LR);
6994 mono_arch_flush_icache (code - 4, 4);
6995 } else if (ji->from_aot) {
6996 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6998 g_assert (native_offset % 4 == 0);
6999 g_assert (info->bp_addrs [native_offset / 4] == 0);
7000 info->bp_addrs [native_offset / 4] = bp_trigger_page;
7002 int dreg = ARMREG_LR;
7004 /* Read from another trigger page */
7005 #ifdef USE_JUMP_TABLES
7006 gpointer *jte = mono_jumptable_add_entry ();
7007 code = mono_arm_load_jumptable_entry (code, jte, dreg);
7008 jte [0] = bp_trigger_page;
7010 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7012 *(int*)code = (int)bp_trigger_page;
7015 ARM_LDR_IMM (code, dreg, dreg, 0);
7017 mono_arch_flush_icache (code - 16, 16);
7020 /* This is currently implemented by emitting an SWI instruction, which
7021 * qemu/linux seems to convert to a SIGILL.
7023 *(int*)code = (0xef << 24) | 8;
7025 mono_arch_flush_icache (code - 4, 4);
7031 * mono_arch_clear_breakpoint:
7033 * Clear the breakpoint at IP.
7036 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7038 MonoDebugOptions *opt = mini_get_debug_options ();
7042 if (opt->soft_breakpoints) {
7043 g_assert (!ji->from_aot);
7046 mono_arch_flush_icache (code - 4, 4);
7047 } else if (ji->from_aot) {
7048 guint32 native_offset = ip - (guint8*)ji->code_start;
7049 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7051 g_assert (native_offset % 4 == 0);
7052 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7053 info->bp_addrs [native_offset / 4] = 0;
7055 for (i = 0; i < 4; ++i)
7058 mono_arch_flush_icache (ip, code - ip);
7063 * mono_arch_start_single_stepping:
7065 * Start single stepping.
7068 mono_arch_start_single_stepping (void)
7070 if (ss_trigger_page)
7071 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7077 * mono_arch_stop_single_stepping:
7079 * Stop single stepping.
7082 mono_arch_stop_single_stepping (void)
7084 if (ss_trigger_page)
7085 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7091 #define DBG_SIGNAL SIGBUS
7093 #define DBG_SIGNAL SIGSEGV
7097 * mono_arch_is_single_step_event:
7099 * Return whenever the machine state in SIGCTX corresponds to a single
7103 mono_arch_is_single_step_event (void *info, void *sigctx)
7105 siginfo_t *sinfo = info;
7107 if (!ss_trigger_page)
7110 /* Sometimes the address is off by 4 */
7111 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7118 * mono_arch_is_breakpoint_event:
7120 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7123 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7125 siginfo_t *sinfo = info;
7127 if (!ss_trigger_page)
7130 if (sinfo->si_signo == DBG_SIGNAL) {
7131 /* Sometimes the address is off by 4 */
7132 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7142 * mono_arch_skip_breakpoint:
7144 * See mini-amd64.c for docs.
7147 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7149 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7153 * mono_arch_skip_single_step:
7155 * See mini-amd64.c for docs.
7158 mono_arch_skip_single_step (MonoContext *ctx)
7160 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7163 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7166 * mono_arch_get_seq_point_info:
7168 * See mini-amd64.c for docs.
7171 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7176 // FIXME: Add a free function
7178 mono_domain_lock (domain);
7179 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7181 mono_domain_unlock (domain);
7184 ji = mono_jit_info_table_find (domain, (char*)code);
7187 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7189 info->ss_trigger_page = ss_trigger_page;
7190 info->bp_trigger_page = bp_trigger_page;
7192 mono_domain_lock (domain);
7193 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7195 mono_domain_unlock (domain);
7202 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7204 ext->lmf.previous_lmf = prev_lmf;
7205 /* Mark that this is a MonoLMFExt */
7206 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7207 ext->lmf.sp = (gssize)ext;
7211 * mono_arch_set_target:
7213 * Set the target architecture the JIT backend should generate code for, in the form
7214 * of a GNU target triplet. Only used in AOT mode.
7217 mono_arch_set_target (char *mtriple)
7219 /* The GNU target triple format is not very well documented */
7220 if (strstr (mtriple, "armv7")) {
7221 v5_supported = TRUE;
7222 v6_supported = TRUE;
7223 v7_supported = TRUE;
7225 if (strstr (mtriple, "armv6")) {
7226 v5_supported = TRUE;
7227 v6_supported = TRUE;
7229 if (strstr (mtriple, "armv7s")) {
7230 v7s_supported = TRUE;
7232 if (strstr (mtriple, "thumbv7s")) {
7233 v5_supported = TRUE;
7234 v6_supported = TRUE;
7235 v7_supported = TRUE;
7236 v7s_supported = TRUE;
7237 thumb_supported = TRUE;
7238 thumb2_supported = TRUE;
7240 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7241 v5_supported = TRUE;
7242 v6_supported = TRUE;
7243 thumb_supported = TRUE;
7246 if (strstr (mtriple, "gnueabi"))
7247 eabi_supported = TRUE;
7251 mono_arch_opcode_supported (int opcode)
7254 case OP_ATOMIC_ADD_I4:
7255 case OP_ATOMIC_EXCHANGE_I4:
7256 case OP_ATOMIC_CAS_I4:
7257 return v7_supported;
7263 #if defined(ENABLE_GSHAREDVT)
7265 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7267 #endif /* !MONOTOUCH */