2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
251 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
252 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
254 code = mono_arm_emit_load_imm (code, dreg, imm);
255 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
260 /* If dreg == sreg, this clobbers IP */
262 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
264 int imm8, rot_amount;
265 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
266 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
270 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
271 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
273 code = mono_arm_emit_load_imm (code, dreg, imm);
274 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
280 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
282 /* we can use r0-r3, since this is called only for incoming args on the stack */
283 if (size > sizeof (gpointer) * 4) {
285 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
286 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
287 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
288 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
289 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
290 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
291 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
292 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
293 ARM_B_COND (code, ARMCOND_NE, 0);
294 arm_patch (code - 4, start_loop);
297 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
298 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
300 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
301 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
307 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
308 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
309 doffset = soffset = 0;
311 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
312 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
318 g_assert (size == 0);
323 emit_call_reg (guint8 *code, int reg)
326 ARM_BLX_REG (code, reg);
328 #ifdef USE_JUMP_TABLES
329 g_assert_not_reached ();
331 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
335 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
341 emit_call_seq (MonoCompile *cfg, guint8 *code)
343 #ifdef USE_JUMP_TABLES
344 code = mono_arm_patchable_bl (code, ARMCOND_AL);
346 if (cfg->method->dynamic) {
347 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
349 *(gpointer*)code = NULL;
351 code = emit_call_reg (code, ARMREG_IP);
360 mono_arm_patchable_b (guint8 *code, int cond)
362 #ifdef USE_JUMP_TABLES
365 jte = mono_jumptable_add_entry ();
366 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
367 ARM_BX_COND (code, cond, ARMREG_IP);
369 ARM_B_COND (code, cond, 0);
375 mono_arm_patchable_bl (guint8 *code, int cond)
377 #ifdef USE_JUMP_TABLES
380 jte = mono_jumptable_add_entry ();
381 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
382 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
384 ARM_BL_COND (code, cond, 0);
389 #ifdef USE_JUMP_TABLES
391 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
393 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
394 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
399 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
401 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
402 ARM_LDR_IMM (code, reg, reg, 0);
408 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
410 switch (ins->opcode) {
413 case OP_FCALL_MEMBASE:
415 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
416 if (sig_ret->type == MONO_TYPE_R4) {
418 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
420 ARM_FMSR (code, ins->dreg, ARMREG_R0);
421 ARM_CVTS (code, ins->dreg, ins->dreg);
425 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
427 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
440 * Emit code to push an LMF structure on the LMF stack.
441 * On arm, this is intermixed with the initialization of other fields of the structure.
444 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
446 gboolean get_lmf_fast = FALSE;
449 #ifdef HAVE_AEABI_READ_TP
450 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
452 if (lmf_addr_tls_offset != -1) {
455 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
456 (gpointer)"__aeabi_read_tp");
457 code = emit_call_seq (cfg, code);
459 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
465 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
468 /* Inline mono_get_lmf_addr () */
469 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
471 /* Load mono_jit_tls_id */
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
474 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
476 *(gpointer*)code = NULL;
478 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
479 /* call pthread_getspecific () */
480 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
481 (gpointer)"pthread_getspecific");
482 code = emit_call_seq (cfg, code);
483 /* lmf_addr = &jit_tls->lmf */
484 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
485 g_assert (arm_is_imm8 (lmf_offset));
486 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
493 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
494 (gpointer)"mono_get_lmf_addr");
495 code = emit_call_seq (cfg, code);
497 /* we build the MonoLMF structure on the stack - see mini-arm.h */
498 /* lmf_offset is the offset from the previous stack pointer,
499 * alloc_size is the total stack space allocated, so the offset
500 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
501 * The pointer to the struct is put in r1 (new_lmf).
502 * ip is used as scratch
503 * The callee-saved registers are already in the MonoLMF structure
505 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
506 /* r0 is the result from mono_get_lmf_addr () */
507 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
508 /* new_lmf->previous_lmf = *lmf_addr */
509 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
510 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
511 /* *(lmf_addr) = r1 */
512 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
513 /* Skip method (only needed for trampoline LMF frames) */
514 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
515 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
516 /* save the current IP */
517 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
518 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
520 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
521 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
532 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
536 for (list = inst->float_args; list; list = list->next) {
537 FloatArgData *fad = list->data;
538 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
539 gboolean imm = arm_is_fpimm8 (var->inst_offset);
541 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
547 if (*offset + *max_len > cfg->code_size) {
548 cfg->code_size += *max_len;
549 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
551 code = cfg->native_code + *offset;
555 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
556 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
558 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
560 *offset = code - cfg->native_code;
567 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FSTD (code, reg, ARMREG_LR, 0);
580 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
587 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
591 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
593 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
596 if (!arm_is_fpimm8 (inst->inst_offset)) {
597 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
598 ARM_FLDD (code, reg, ARMREG_LR, 0);
600 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
609 * Emit code to pop an LMF structure from the LMF stack.
612 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
616 if (lmf_offset < 32) {
617 basereg = cfg->frame_reg;
622 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
625 /* ip = previous_lmf */
626 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
628 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
629 /* *(lmf_addr) = previous_lmf */
630 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
635 #endif /* #ifndef DISABLE_JIT */
638 * mono_arch_get_argument_info:
639 * @csig: a method signature
640 * @param_count: the number of parameters to consider
641 * @arg_info: an array to store the result infos
643 * Gathers information on parameters such as size, alignment and
644 * padding. arg_info should be large enought to hold param_count + 1 entries.
646 * Returns the size of the activation frame.
649 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
651 int k, frame_size = 0;
652 guint32 size, align, pad;
656 t = mini_type_get_underlying_type (gsctx, csig->ret);
657 if (MONO_TYPE_ISSTRUCT (t)) {
658 frame_size += sizeof (gpointer);
662 arg_info [0].offset = offset;
665 frame_size += sizeof (gpointer);
669 arg_info [0].size = frame_size;
671 for (k = 0; k < param_count; k++) {
672 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
674 /* ignore alignment for now */
677 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
678 arg_info [k].pad = pad;
680 arg_info [k + 1].pad = 0;
681 arg_info [k + 1].size = size;
683 arg_info [k + 1].offset = offset;
687 align = MONO_ARCH_FRAME_ALIGNMENT;
688 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
689 arg_info [k].pad = pad;
694 #define MAX_ARCH_DELEGATE_PARAMS 3
697 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
699 guint8 *code, *start;
702 start = code = mono_global_codeman_reserve (12);
704 /* Replace the this argument with the target */
705 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
706 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
707 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
709 g_assert ((code - start) <= 12);
711 mono_arch_flush_icache (start, 12);
715 size = 8 + param_count * 4;
716 start = code = mono_global_codeman_reserve (size);
718 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
719 /* slide down the arguments */
720 for (i = 0; i < param_count; ++i) {
721 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
723 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
725 g_assert ((code - start) <= size);
727 mono_arch_flush_icache (start, size);
731 *code_size = code - start;
737 * mono_arch_get_delegate_invoke_impls:
739 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
743 mono_arch_get_delegate_invoke_impls (void)
751 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
752 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
754 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
755 code = get_delegate_invoke_impl (FALSE, i, &code_len);
756 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
757 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
765 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
767 guint8 *code, *start;
770 /* FIXME: Support more cases */
771 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
772 if (MONO_TYPE_ISSTRUCT (sig_ret))
776 static guint8* cached = NULL;
777 mono_mini_arch_lock ();
779 mono_mini_arch_unlock ();
784 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
786 start = get_delegate_invoke_impl (TRUE, 0, NULL);
788 mono_mini_arch_unlock ();
791 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
794 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
796 for (i = 0; i < sig->param_count; ++i)
797 if (!mono_is_regsize_var (sig->params [i]))
800 mono_mini_arch_lock ();
801 code = cache [sig->param_count];
803 mono_mini_arch_unlock ();
808 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
809 start = mono_aot_get_trampoline (name);
812 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
814 cache [sig->param_count] = start;
815 mono_mini_arch_unlock ();
823 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
829 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
831 return (gpointer)regs [ARMREG_R0];
835 * Initialize the cpu to execute managed code.
838 mono_arch_cpu_init (void)
840 i8_align = MONO_ABI_ALIGNOF (gint64);
841 #ifdef MONO_CROSS_COMPILE
842 /* Need to set the alignment of i8 since it can different on the target */
843 #ifdef TARGET_ANDROID
845 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
851 create_function_wrapper (gpointer function)
853 guint8 *start, *code;
855 start = code = mono_global_codeman_reserve (96);
858 * Construct the MonoContext structure on the stack.
861 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
863 /* save ip, lr and pc into their correspodings ctx.regs slots. */
864 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
865 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
866 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
868 /* save r0..r10 and fp */
869 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
870 ARM_STM (code, ARMREG_IP, 0x0fff);
872 /* now we can update fp. */
873 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
875 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
876 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
877 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
878 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
880 /* make ctx.eip hold the address of the call. */
881 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
882 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
884 /* r0 now points to the MonoContext */
885 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
888 #ifdef USE_JUMP_TABLES
890 gpointer *jte = mono_jumptable_add_entry ();
891 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
895 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
897 *(gpointer*)code = function;
900 ARM_BLX_REG (code, ARMREG_IP);
902 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
903 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
904 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
905 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
907 /* make ip point to the regs array, then restore everything, including pc. */
908 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
909 ARM_LDM (code, ARMREG_IP, 0xffff);
911 mono_arch_flush_icache (start, code - start);
917 * Initialize architecture specific code.
920 mono_arch_init (void)
922 const char *cpu_arch;
924 mono_mutex_init_recursive (&mini_arch_mutex);
925 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
926 if (mini_get_debug_options ()->soft_breakpoints) {
927 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
928 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
933 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
934 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
935 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
938 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
939 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
940 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
941 #if defined(ENABLE_GSHAREDVT)
942 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
945 #if defined(__ARM_EABI__)
946 eabi_supported = TRUE;
949 #if defined(ARM_FPU_VFP_HARD)
950 arm_fpu = MONO_ARM_FPU_VFP_HARD;
952 arm_fpu = MONO_ARM_FPU_VFP;
954 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
955 /* If we're compiling with a soft float fallback and it
956 turns out that no VFP unit is available, we need to
957 switch to soft float. We don't do this for iOS, since
958 iOS devices always have a VFP unit. */
959 if (!mono_hwcap_arm_has_vfp)
960 arm_fpu = MONO_ARM_FPU_NONE;
964 v5_supported = mono_hwcap_arm_is_v5;
965 v6_supported = mono_hwcap_arm_is_v6;
966 v7_supported = mono_hwcap_arm_is_v7;
967 v7s_supported = mono_hwcap_arm_is_v7s;
969 #if defined(__APPLE__)
970 /* iOS is special-cased here because we don't yet
971 have a way to properly detect CPU features on it. */
972 thumb_supported = TRUE;
975 thumb_supported = mono_hwcap_arm_has_thumb;
976 thumb2_supported = mono_hwcap_arm_has_thumb2;
979 /* Format: armv(5|6|7[s])[-thumb[2]] */
980 cpu_arch = g_getenv ("MONO_CPU_ARCH");
982 /* Do this here so it overrides any detection. */
984 if (strncmp (cpu_arch, "armv", 4) == 0) {
985 v5_supported = cpu_arch [4] >= '5';
986 v6_supported = cpu_arch [4] >= '6';
987 v7_supported = cpu_arch [4] >= '7';
988 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
991 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
992 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
997 * Cleanup architecture specific code.
1000 mono_arch_cleanup (void)
1005 * This function returns the optimizations supported on this cpu.
1008 mono_arch_cpu_optimizations (guint32 *exclude_mask)
1010 /* no arm-specific optimizations yet */
1016 * This function test for all SIMD functions supported.
1018 * Returns a bitmask corresponding to all supported versions.
1022 mono_arch_cpu_enumerate_simd_versions (void)
1024 /* SIMD is currently unimplemented */
1032 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1034 if (v7s_supported) {
1048 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1050 mono_arch_is_soft_float (void)
1052 return arm_fpu == MONO_ARM_FPU_NONE;
1057 mono_arm_is_hard_float (void)
1059 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1063 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1066 t = mini_type_get_underlying_type (gsctx, t);
1073 case MONO_TYPE_FNPTR:
1075 case MONO_TYPE_OBJECT:
1076 case MONO_TYPE_STRING:
1077 case MONO_TYPE_CLASS:
1078 case MONO_TYPE_SZARRAY:
1079 case MONO_TYPE_ARRAY:
1081 case MONO_TYPE_GENERICINST:
1082 if (!mono_type_generic_inst_is_valuetype (t))
1085 case MONO_TYPE_VALUETYPE:
1092 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1097 for (i = 0; i < cfg->num_varinfo; i++) {
1098 MonoInst *ins = cfg->varinfo [i];
1099 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1102 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1105 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1108 /* we can only allocate 32 bit values */
1109 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1110 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1111 g_assert (i == vmv->idx);
1112 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1119 #define USE_EXTRA_TEMPS 0
1122 mono_arch_get_global_int_regs (MonoCompile *cfg)
1126 mono_arch_compute_omit_fp (cfg);
1129 * FIXME: Interface calls might go through a static rgctx trampoline which
1130 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1133 if (cfg->flags & MONO_CFG_HAS_CALLS)
1134 cfg->uses_rgctx_reg = TRUE;
1136 if (cfg->arch.omit_fp)
1137 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1138 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1139 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1140 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1142 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1143 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1145 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1146 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1147 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1148 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1149 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1150 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1156 * mono_arch_regalloc_cost:
1158 * Return the cost, in number of memory references, of the action of
1159 * allocating the variable VMV into a register during global register
1163 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1169 #endif /* #ifndef DISABLE_JIT */
1171 #ifndef __GNUC_PREREQ
1172 #define __GNUC_PREREQ(maj, min) (0)
1176 mono_arch_flush_icache (guint8 *code, gint size)
1178 #if defined(__native_client__)
1179 // For Native Client we don't have to flush i-cache here,
1180 // as it's being done by dyncode interface.
1183 #ifdef MONO_CROSS_COMPILE
1185 sys_icache_invalidate (code, size);
1186 #elif __GNUC_PREREQ(4, 1)
1187 __clear_cache (code, code + size);
1188 #elif defined(PLATFORM_ANDROID)
1189 const int syscall = 0xf0002;
1197 : "r" (code), "r" (code + size), "r" (syscall)
1198 : "r0", "r1", "r7", "r2"
1201 __asm __volatile ("mov r0, %0\n"
1204 "swi 0x9f0002 @ sys_cacheflush"
1206 : "r" (code), "r" (code + size), "r" (0)
1207 : "r0", "r1", "r3" );
1209 #endif /* !__native_client__ */
1220 RegTypeStructByAddr,
1221 /* gsharedvt argument passed by addr in greg */
1222 RegTypeGSharedVtInReg,
1223 /* gsharedvt argument passed by addr on stack */
1224 RegTypeGSharedVtOnStack,
1229 guint16 vtsize; /* in param area */
1233 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1238 guint32 stack_usage;
1239 gboolean vtype_retaddr;
1240 /* The index of the vret arg in the argument list */
1250 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1253 if (*gr > ARMREG_R3) {
1255 ainfo->offset = *stack_size;
1256 ainfo->reg = ARMREG_SP; /* in the caller */
1257 ainfo->storage = RegTypeBase;
1260 ainfo->storage = RegTypeGeneral;
1267 split = i8_align == 4;
1272 if (*gr == ARMREG_R3 && split) {
1273 /* first word in r3 and the second on the stack */
1274 ainfo->offset = *stack_size;
1275 ainfo->reg = ARMREG_SP; /* in the caller */
1276 ainfo->storage = RegTypeBaseGen;
1278 } else if (*gr >= ARMREG_R3) {
1279 if (eabi_supported) {
1280 /* darwin aligns longs to 4 byte only */
1281 if (i8_align == 8) {
1286 ainfo->offset = *stack_size;
1287 ainfo->reg = ARMREG_SP; /* in the caller */
1288 ainfo->storage = RegTypeBase;
1291 if (eabi_supported) {
1292 if (i8_align == 8 && ((*gr) & 1))
1295 ainfo->storage = RegTypeIRegPair;
1304 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1307 * If we're calling a function like this:
1309 * void foo(float a, double b, float c)
1311 * We pass a in s0 and b in d1. That leaves us
1312 * with s1 being unused. The armhf ABI recognizes
1313 * this and requires register assignment to then
1314 * use that for the next single-precision arg,
1315 * i.e. c in this example. So float_spare either
1316 * tells us which reg to use for the next single-
1317 * precision arg, or it's -1, meaning use *fpr.
1319 * Note that even though most of the JIT speaks
1320 * double-precision, fpr represents single-
1321 * precision registers.
1323 * See parts 5.5 and 6.1.2 of the AAPCS for how
1327 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1328 ainfo->storage = RegTypeFP;
1332 * If we're passing a double-precision value
1333 * and *fpr is odd (e.g. it's s1, s3, ...)
1334 * we need to use the next even register. So
1335 * we mark the current *fpr as a spare that
1336 * can be used for the next single-precision
1340 *float_spare = *fpr;
1345 * At this point, we have an even register
1346 * so we assign that and move along.
1350 } else if (*float_spare >= 0) {
1352 * We're passing a single-precision value
1353 * and it looks like a spare single-
1354 * precision register is available. Let's
1358 ainfo->reg = *float_spare;
1362 * If we hit this branch, we're passing a
1363 * single-precision value and we can simply
1364 * use the next available register.
1372 * We've exhausted available floating point
1373 * regs, so pass the rest on the stack.
1381 ainfo->offset = *stack_size;
1382 ainfo->reg = ARMREG_SP;
1383 ainfo->storage = RegTypeBase;
1390 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1392 guint i, gr, fpr, pstart;
1394 int n = sig->hasthis + sig->param_count;
1395 MonoType *simpletype;
1396 guint32 stack_size = 0;
1398 gboolean is_pinvoke = sig->pinvoke;
1402 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1404 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1411 t = mini_type_get_underlying_type (gsctx, sig->ret);
1412 if (MONO_TYPE_ISSTRUCT (t)) {
1415 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1416 cinfo->ret.storage = RegTypeStructByVal;
1418 cinfo->vtype_retaddr = TRUE;
1420 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1421 cinfo->vtype_retaddr = TRUE;
1427 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1428 * the first argument, allowing 'this' to be always passed in the first arg reg.
1429 * Also do this if the first argument is a reference type, since virtual calls
1430 * are sometimes made using calli without sig->hasthis set, like in the delegate
1433 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1435 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1437 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1441 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1442 cinfo->vret_arg_index = 1;
1446 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1450 if (cinfo->vtype_retaddr)
1451 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1454 DEBUG(printf("params: %d\n", sig->param_count));
1455 for (i = pstart; i < sig->param_count; ++i) {
1456 ArgInfo *ainfo = &cinfo->args [n];
1458 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1459 /* Prevent implicit arguments and sig_cookie from
1460 being passed in registers */
1463 /* Emit the signature cookie just before the implicit arguments */
1464 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1466 DEBUG(printf("param %d: ", i));
1467 if (sig->params [i]->byref) {
1468 DEBUG(printf("byref\n"));
1469 add_general (&gr, &stack_size, ainfo, TRUE);
1473 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1474 switch (simpletype->type) {
1475 case MONO_TYPE_BOOLEAN:
1478 cinfo->args [n].size = 1;
1479 add_general (&gr, &stack_size, ainfo, TRUE);
1482 case MONO_TYPE_CHAR:
1485 cinfo->args [n].size = 2;
1486 add_general (&gr, &stack_size, ainfo, TRUE);
1491 cinfo->args [n].size = 4;
1492 add_general (&gr, &stack_size, ainfo, TRUE);
1498 case MONO_TYPE_FNPTR:
1499 case MONO_TYPE_CLASS:
1500 case MONO_TYPE_OBJECT:
1501 case MONO_TYPE_STRING:
1502 case MONO_TYPE_SZARRAY:
1503 case MONO_TYPE_ARRAY:
1504 cinfo->args [n].size = sizeof (gpointer);
1505 add_general (&gr, &stack_size, ainfo, TRUE);
1508 case MONO_TYPE_GENERICINST:
1509 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1510 cinfo->args [n].size = sizeof (gpointer);
1511 add_general (&gr, &stack_size, ainfo, TRUE);
1515 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1516 /* gsharedvt arguments are passed by ref */
1517 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1518 add_general (&gr, &stack_size, ainfo, TRUE);
1519 switch (ainfo->storage) {
1520 case RegTypeGeneral:
1521 ainfo->storage = RegTypeGSharedVtInReg;
1524 ainfo->storage = RegTypeGSharedVtOnStack;
1527 g_assert_not_reached ();
1533 case MONO_TYPE_TYPEDBYREF:
1534 case MONO_TYPE_VALUETYPE: {
1540 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1541 size = sizeof (MonoTypedRef);
1542 align = sizeof (gpointer);
1544 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1546 size = mono_class_native_size (klass, &align);
1548 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1550 DEBUG(printf ("load %d bytes struct\n", size));
1553 align_size += (sizeof (gpointer) - 1);
1554 align_size &= ~(sizeof (gpointer) - 1);
1555 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1556 ainfo->storage = RegTypeStructByVal;
1557 ainfo->struct_size = size;
1558 /* FIXME: align stack_size if needed */
1559 if (eabi_supported) {
1560 if (align >= 8 && (gr & 1))
1563 if (gr > ARMREG_R3) {
1565 ainfo->vtsize = nwords;
1567 int rest = ARMREG_R3 - gr + 1;
1568 int n_in_regs = rest >= nwords? nwords: rest;
1570 ainfo->size = n_in_regs;
1571 ainfo->vtsize = nwords - n_in_regs;
1574 nwords -= n_in_regs;
1576 if (sig->call_convention == MONO_CALL_VARARG)
1577 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1578 stack_size = ALIGN_TO (stack_size, align);
1579 ainfo->offset = stack_size;
1580 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1581 stack_size += nwords * sizeof (gpointer);
1588 add_general (&gr, &stack_size, ainfo, FALSE);
1595 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1597 add_general (&gr, &stack_size, ainfo, TRUE);
1605 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1607 add_general (&gr, &stack_size, ainfo, FALSE);
1612 case MONO_TYPE_MVAR:
1613 /* gsharedvt arguments are passed by ref */
1614 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1615 add_general (&gr, &stack_size, ainfo, TRUE);
1616 switch (ainfo->storage) {
1617 case RegTypeGeneral:
1618 ainfo->storage = RegTypeGSharedVtInReg;
1621 ainfo->storage = RegTypeGSharedVtOnStack;
1624 g_assert_not_reached ();
1629 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1633 /* Handle the case where there are no implicit arguments */
1634 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1635 /* Prevent implicit arguments and sig_cookie from
1636 being passed in registers */
1639 /* Emit the signature cookie just before the implicit arguments */
1640 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1644 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1645 switch (simpletype->type) {
1646 case MONO_TYPE_BOOLEAN:
1651 case MONO_TYPE_CHAR:
1657 case MONO_TYPE_FNPTR:
1658 case MONO_TYPE_CLASS:
1659 case MONO_TYPE_OBJECT:
1660 case MONO_TYPE_SZARRAY:
1661 case MONO_TYPE_ARRAY:
1662 case MONO_TYPE_STRING:
1663 cinfo->ret.storage = RegTypeGeneral;
1664 cinfo->ret.reg = ARMREG_R0;
1668 cinfo->ret.storage = RegTypeIRegPair;
1669 cinfo->ret.reg = ARMREG_R0;
1673 cinfo->ret.storage = RegTypeFP;
1675 if (IS_HARD_FLOAT) {
1676 cinfo->ret.reg = ARM_VFP_F0;
1678 cinfo->ret.reg = ARMREG_R0;
1682 case MONO_TYPE_GENERICINST:
1683 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1684 cinfo->ret.storage = RegTypeGeneral;
1685 cinfo->ret.reg = ARMREG_R0;
1688 // FIXME: Only for variable types
1689 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1690 cinfo->ret.storage = RegTypeStructByAddr;
1691 g_assert (cinfo->vtype_retaddr);
1695 case MONO_TYPE_VALUETYPE:
1696 case MONO_TYPE_TYPEDBYREF:
1697 if (cinfo->ret.storage != RegTypeStructByVal)
1698 cinfo->ret.storage = RegTypeStructByAddr;
1701 case MONO_TYPE_MVAR:
1702 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1703 cinfo->ret.storage = RegTypeStructByAddr;
1704 g_assert (cinfo->vtype_retaddr);
1706 case MONO_TYPE_VOID:
1709 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1713 /* align stack size to 8 */
1714 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1715 stack_size = (stack_size + 7) & ~7;
1717 cinfo->stack_usage = stack_size;
1723 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1725 MonoType *callee_ret;
1729 if (cfg->compile_aot && !cfg->full_aot)
1730 /* OP_TAILCALL doesn't work with AOT */
1733 c1 = get_call_info (NULL, NULL, caller_sig);
1734 c2 = get_call_info (NULL, NULL, callee_sig);
1737 * Tail calls with more callee stack usage than the caller cannot be supported, since
1738 * the extra stack space would be left on the stack after the tail call.
1740 res = c1->stack_usage >= c2->stack_usage;
1741 callee_ret = mini_replace_type (callee_sig->ret);
1742 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1743 /* An address on the callee's stack is passed as the first argument */
1746 if (c2->stack_usage > 16 * 4)
1758 debug_omit_fp (void)
1761 return mono_debug_count ();
1768 * mono_arch_compute_omit_fp:
1770 * Determine whenever the frame pointer can be eliminated.
1773 mono_arch_compute_omit_fp (MonoCompile *cfg)
1775 MonoMethodSignature *sig;
1776 MonoMethodHeader *header;
1780 if (cfg->arch.omit_fp_computed)
1783 header = cfg->header;
1785 sig = mono_method_signature (cfg->method);
1787 if (!cfg->arch.cinfo)
1788 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1789 cinfo = cfg->arch.cinfo;
1792 * FIXME: Remove some of the restrictions.
1794 cfg->arch.omit_fp = TRUE;
1795 cfg->arch.omit_fp_computed = TRUE;
1797 if (cfg->disable_omit_fp)
1798 cfg->arch.omit_fp = FALSE;
1799 if (!debug_omit_fp ())
1800 cfg->arch.omit_fp = FALSE;
1802 if (cfg->method->save_lmf)
1803 cfg->arch.omit_fp = FALSE;
1805 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1806 cfg->arch.omit_fp = FALSE;
1807 if (header->num_clauses)
1808 cfg->arch.omit_fp = FALSE;
1809 if (cfg->param_area)
1810 cfg->arch.omit_fp = FALSE;
1811 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1812 cfg->arch.omit_fp = FALSE;
1813 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1814 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1815 cfg->arch.omit_fp = FALSE;
1816 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1817 ArgInfo *ainfo = &cinfo->args [i];
1819 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1821 * The stack offset can only be determined when the frame
1824 cfg->arch.omit_fp = FALSE;
1829 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1830 MonoInst *ins = cfg->varinfo [i];
1833 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1838 * Set var information according to the calling convention. arm version.
1839 * The locals var stuff should most likely be split in another method.
1842 mono_arch_allocate_vars (MonoCompile *cfg)
1844 MonoMethodSignature *sig;
1845 MonoMethodHeader *header;
1848 int i, offset, size, align, curinst;
1852 sig = mono_method_signature (cfg->method);
1854 if (!cfg->arch.cinfo)
1855 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1856 cinfo = cfg->arch.cinfo;
1857 sig_ret = mini_replace_type (sig->ret);
1859 mono_arch_compute_omit_fp (cfg);
1861 if (cfg->arch.omit_fp)
1862 cfg->frame_reg = ARMREG_SP;
1864 cfg->frame_reg = ARMREG_FP;
1866 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1868 /* allow room for the vararg method args: void* and long/double */
1869 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1870 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1872 header = cfg->header;
1874 /* See mono_arch_get_global_int_regs () */
1875 if (cfg->flags & MONO_CFG_HAS_CALLS)
1876 cfg->uses_rgctx_reg = TRUE;
1878 if (cfg->frame_reg != ARMREG_SP)
1879 cfg->used_int_regs |= 1 << cfg->frame_reg;
1881 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1882 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1883 cfg->used_int_regs |= (1 << ARMREG_V5);
1887 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1888 if (sig_ret->type != MONO_TYPE_VOID) {
1889 cfg->ret->opcode = OP_REGVAR;
1890 cfg->ret->inst_c0 = ARMREG_R0;
1893 /* local vars are at a positive offset from the stack pointer */
1895 * also note that if the function uses alloca, we use FP
1896 * to point at the local variables.
1898 offset = 0; /* linkage area */
1899 /* align the offset to 16 bytes: not sure this is needed here */
1901 //offset &= ~(8 - 1);
1903 /* add parameter area size for called functions */
1904 offset += cfg->param_area;
1907 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1910 /* allow room to save the return value */
1911 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1914 /* the MonoLMF structure is stored just below the stack pointer */
1915 if (cinfo->ret.storage == RegTypeStructByVal) {
1916 cfg->ret->opcode = OP_REGOFFSET;
1917 cfg->ret->inst_basereg = cfg->frame_reg;
1918 offset += sizeof (gpointer) - 1;
1919 offset &= ~(sizeof (gpointer) - 1);
1920 cfg->ret->inst_offset = - offset;
1921 offset += sizeof(gpointer);
1922 } else if (cinfo->vtype_retaddr) {
1923 ins = cfg->vret_addr;
1924 offset += sizeof(gpointer) - 1;
1925 offset &= ~(sizeof(gpointer) - 1);
1926 ins->inst_offset = offset;
1927 ins->opcode = OP_REGOFFSET;
1928 ins->inst_basereg = cfg->frame_reg;
1929 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1930 printf ("vret_addr =");
1931 mono_print_ins (cfg->vret_addr);
1933 offset += sizeof(gpointer);
1936 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1937 if (cfg->arch.seq_point_info_var) {
1940 ins = cfg->arch.seq_point_info_var;
1944 offset += align - 1;
1945 offset &= ~(align - 1);
1946 ins->opcode = OP_REGOFFSET;
1947 ins->inst_basereg = cfg->frame_reg;
1948 ins->inst_offset = offset;
1951 ins = cfg->arch.ss_trigger_page_var;
1954 offset += align - 1;
1955 offset &= ~(align - 1);
1956 ins->opcode = OP_REGOFFSET;
1957 ins->inst_basereg = cfg->frame_reg;
1958 ins->inst_offset = offset;
1962 if (cfg->arch.seq_point_read_var) {
1965 ins = cfg->arch.seq_point_read_var;
1969 offset += align - 1;
1970 offset &= ~(align - 1);
1971 ins->opcode = OP_REGOFFSET;
1972 ins->inst_basereg = cfg->frame_reg;
1973 ins->inst_offset = offset;
1976 ins = cfg->arch.seq_point_ss_method_var;
1979 offset += align - 1;
1980 offset &= ~(align - 1);
1981 ins->opcode = OP_REGOFFSET;
1982 ins->inst_basereg = cfg->frame_reg;
1983 ins->inst_offset = offset;
1986 ins = cfg->arch.seq_point_bp_method_var;
1989 offset += align - 1;
1990 offset &= ~(align - 1);
1991 ins->opcode = OP_REGOFFSET;
1992 ins->inst_basereg = cfg->frame_reg;
1993 ins->inst_offset = offset;
1997 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1998 /* Allocate a temporary used by the atomic ops */
2002 /* Allocate a local slot to hold the sig cookie address */
2003 offset += align - 1;
2004 offset &= ~(align - 1);
2005 cfg->arch.atomic_tmp_offset = offset;
2008 cfg->arch.atomic_tmp_offset = -1;
2011 cfg->locals_min_stack_offset = offset;
2013 curinst = cfg->locals_start;
2014 for (i = curinst; i < cfg->num_varinfo; ++i) {
2017 ins = cfg->varinfo [i];
2018 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2021 t = ins->inst_vtype;
2022 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2025 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2026 * pinvoke wrappers when they call functions returning structure */
2027 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2028 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2032 size = mono_type_size (t, &align);
2034 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2035 * since it loads/stores misaligned words, which don't do the right thing.
2037 if (align < 4 && size >= 4)
2039 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2040 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2041 offset += align - 1;
2042 offset &= ~(align - 1);
2043 ins->opcode = OP_REGOFFSET;
2044 ins->inst_offset = offset;
2045 ins->inst_basereg = cfg->frame_reg;
2047 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2050 cfg->locals_max_stack_offset = offset;
2054 ins = cfg->args [curinst];
2055 if (ins->opcode != OP_REGVAR) {
2056 ins->opcode = OP_REGOFFSET;
2057 ins->inst_basereg = cfg->frame_reg;
2058 offset += sizeof (gpointer) - 1;
2059 offset &= ~(sizeof (gpointer) - 1);
2060 ins->inst_offset = offset;
2061 offset += sizeof (gpointer);
2066 if (sig->call_convention == MONO_CALL_VARARG) {
2070 /* Allocate a local slot to hold the sig cookie address */
2071 offset += align - 1;
2072 offset &= ~(align - 1);
2073 cfg->sig_cookie = offset;
2077 for (i = 0; i < sig->param_count; ++i) {
2078 ins = cfg->args [curinst];
2080 if (ins->opcode != OP_REGVAR) {
2081 ins->opcode = OP_REGOFFSET;
2082 ins->inst_basereg = cfg->frame_reg;
2083 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2085 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2086 * since it loads/stores misaligned words, which don't do the right thing.
2088 if (align < 4 && size >= 4)
2090 /* The code in the prolog () stores words when storing vtypes received in a register */
2091 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2093 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2094 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2095 offset += align - 1;
2096 offset &= ~(align - 1);
2097 ins->inst_offset = offset;
2103 /* align the offset to 8 bytes */
2104 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2105 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2110 cfg->stack_offset = offset;
2114 mono_arch_create_vars (MonoCompile *cfg)
2116 MonoMethodSignature *sig;
2120 sig = mono_method_signature (cfg->method);
2122 if (!cfg->arch.cinfo)
2123 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2124 cinfo = cfg->arch.cinfo;
2126 if (IS_HARD_FLOAT) {
2127 for (i = 0; i < 2; i++) {
2128 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2129 inst->flags |= MONO_INST_VOLATILE;
2131 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2135 if (cinfo->ret.storage == RegTypeStructByVal)
2136 cfg->ret_var_is_local = TRUE;
2138 if (cinfo->vtype_retaddr) {
2139 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2140 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2141 printf ("vret_addr = ");
2142 mono_print_ins (cfg->vret_addr);
2146 if (cfg->gen_seq_points) {
2147 if (cfg->soft_breakpoints) {
2148 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2149 ins->flags |= MONO_INST_VOLATILE;
2150 cfg->arch.seq_point_read_var = ins;
2152 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2153 ins->flags |= MONO_INST_VOLATILE;
2154 cfg->arch.seq_point_ss_method_var = ins;
2156 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2157 ins->flags |= MONO_INST_VOLATILE;
2158 cfg->arch.seq_point_bp_method_var = ins;
2160 g_assert (!cfg->compile_aot);
2161 } else if (cfg->compile_aot) {
2162 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2163 ins->flags |= MONO_INST_VOLATILE;
2164 cfg->arch.seq_point_info_var = ins;
2166 /* Allocate a separate variable for this to save 1 load per seq point */
2167 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2168 ins->flags |= MONO_INST_VOLATILE;
2169 cfg->arch.ss_trigger_page_var = ins;
2175 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2177 MonoMethodSignature *tmp_sig;
2180 if (call->tail_call)
2183 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2186 * mono_ArgIterator_Setup assumes the signature cookie is
2187 * passed first and all the arguments which were before it are
2188 * passed on the stack after the signature. So compensate by
2189 * passing a different signature.
2191 tmp_sig = mono_metadata_signature_dup (call->signature);
2192 tmp_sig->param_count -= call->signature->sentinelpos;
2193 tmp_sig->sentinelpos = 0;
2194 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2196 sig_reg = mono_alloc_ireg (cfg);
2197 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2199 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2204 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2209 LLVMCallInfo *linfo;
2211 n = sig->param_count + sig->hasthis;
2213 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2215 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2218 * LLVM always uses the native ABI while we use our own ABI, the
2219 * only difference is the handling of vtypes:
2220 * - we only pass/receive them in registers in some cases, and only
2221 * in 1 or 2 integer registers.
2223 if (cinfo->vtype_retaddr) {
2224 /* Vtype returned using a hidden argument */
2225 linfo->ret.storage = LLVMArgVtypeRetAddr;
2226 linfo->vret_arg_index = cinfo->vret_arg_index;
2227 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2228 cfg->exception_message = g_strdup ("unknown ret conv");
2229 cfg->disable_llvm = TRUE;
2233 for (i = 0; i < n; ++i) {
2234 ainfo = cinfo->args + i;
2236 linfo->args [i].storage = LLVMArgNone;
2238 switch (ainfo->storage) {
2239 case RegTypeGeneral:
2240 case RegTypeIRegPair:
2242 linfo->args [i].storage = LLVMArgInIReg;
2244 case RegTypeStructByVal:
2245 // FIXME: Passing entirely on the stack or split reg/stack
2246 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2247 linfo->args [i].storage = LLVMArgVtypeInReg;
2248 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2249 if (ainfo->size == 2)
2250 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2252 linfo->args [i].pair_storage [1] = LLVMArgNone;
2254 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2255 cfg->disable_llvm = TRUE;
2259 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2260 cfg->disable_llvm = TRUE;
2270 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2273 MonoMethodSignature *sig;
2277 sig = call->signature;
2278 n = sig->param_count + sig->hasthis;
2280 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2282 for (i = 0; i < n; ++i) {
2283 ArgInfo *ainfo = cinfo->args + i;
2286 if (i >= sig->hasthis)
2287 t = sig->params [i - sig->hasthis];
2289 t = &mono_defaults.int_class->byval_arg;
2290 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2292 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2293 /* Emit the signature cookie just before the implicit arguments */
2294 emit_sig_cookie (cfg, call, cinfo);
2297 in = call->args [i];
2299 switch (ainfo->storage) {
2300 case RegTypeGeneral:
2301 case RegTypeIRegPair:
2302 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2303 MONO_INST_NEW (cfg, ins, OP_MOVE);
2304 ins->dreg = mono_alloc_ireg (cfg);
2305 ins->sreg1 = in->dreg + 1;
2306 MONO_ADD_INS (cfg->cbb, ins);
2307 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2309 MONO_INST_NEW (cfg, ins, OP_MOVE);
2310 ins->dreg = mono_alloc_ireg (cfg);
2311 ins->sreg1 = in->dreg + 2;
2312 MONO_ADD_INS (cfg->cbb, ins);
2313 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2314 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2315 if (ainfo->size == 4) {
2316 if (IS_SOFT_FLOAT) {
2317 /* mono_emit_call_args () have already done the r8->r4 conversion */
2318 /* The converted value is in an int vreg */
2319 MONO_INST_NEW (cfg, ins, OP_MOVE);
2320 ins->dreg = mono_alloc_ireg (cfg);
2321 ins->sreg1 = in->dreg;
2322 MONO_ADD_INS (cfg->cbb, ins);
2323 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2327 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2328 creg = mono_alloc_ireg (cfg);
2329 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2330 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2333 if (IS_SOFT_FLOAT) {
2334 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2335 ins->dreg = mono_alloc_ireg (cfg);
2336 ins->sreg1 = in->dreg;
2337 MONO_ADD_INS (cfg->cbb, ins);
2338 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2340 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2341 ins->dreg = mono_alloc_ireg (cfg);
2342 ins->sreg1 = in->dreg;
2343 MONO_ADD_INS (cfg->cbb, ins);
2344 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2348 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2349 creg = mono_alloc_ireg (cfg);
2350 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2351 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2352 creg = mono_alloc_ireg (cfg);
2353 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2354 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2357 cfg->flags |= MONO_CFG_HAS_FPOUT;
2359 MONO_INST_NEW (cfg, ins, OP_MOVE);
2360 ins->dreg = mono_alloc_ireg (cfg);
2361 ins->sreg1 = in->dreg;
2362 MONO_ADD_INS (cfg->cbb, ins);
2364 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2367 case RegTypeStructByAddr:
2370 /* FIXME: where si the data allocated? */
2371 arg->backend.reg3 = ainfo->reg;
2372 call->used_iregs |= 1 << ainfo->reg;
2373 g_assert_not_reached ();
2376 case RegTypeStructByVal:
2377 case RegTypeGSharedVtInReg:
2378 case RegTypeGSharedVtOnStack:
2379 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2380 ins->opcode = OP_OUTARG_VT;
2381 ins->sreg1 = in->dreg;
2382 ins->klass = in->klass;
2383 ins->inst_p0 = call;
2384 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2385 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2386 mono_call_inst_add_outarg_vt (cfg, call, ins);
2387 MONO_ADD_INS (cfg->cbb, ins);
2390 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2392 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2393 if (t->type == MONO_TYPE_R8) {
2394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2397 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2402 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2405 case RegTypeBaseGen:
2406 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2407 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2408 MONO_INST_NEW (cfg, ins, OP_MOVE);
2409 ins->dreg = mono_alloc_ireg (cfg);
2410 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2411 MONO_ADD_INS (cfg->cbb, ins);
2412 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2413 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2416 /* This should work for soft-float as well */
2418 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2419 creg = mono_alloc_ireg (cfg);
2420 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2421 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2422 creg = mono_alloc_ireg (cfg);
2423 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2424 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2425 cfg->flags |= MONO_CFG_HAS_FPOUT;
2427 g_assert_not_reached ();
2431 int fdreg = mono_alloc_freg (cfg);
2433 if (ainfo->size == 8) {
2434 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2435 ins->sreg1 = in->dreg;
2437 MONO_ADD_INS (cfg->cbb, ins);
2439 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2444 * Mono's register allocator doesn't speak single-precision registers that
2445 * overlap double-precision registers (i.e. armhf). So we have to work around
2446 * the register allocator and load the value from memory manually.
2448 * So we create a variable for the float argument and an instruction to store
2449 * the argument into the variable. We then store the list of these arguments
2450 * in cfg->float_args. This list is then used by emit_float_args later to
2451 * pass the arguments in the various call opcodes.
2453 * This is not very nice, and we should really try to fix the allocator.
2456 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2458 /* Make sure the instruction isn't seen as pointless and removed.
2460 float_arg->flags |= MONO_INST_VOLATILE;
2462 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2464 /* We use the dreg to look up the instruction later. The hreg is used to
2465 * emit the instruction that loads the value into the FP reg.
2467 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2468 fad->vreg = float_arg->dreg;
2469 fad->hreg = ainfo->reg;
2471 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2474 call->used_iregs |= 1 << ainfo->reg;
2475 cfg->flags |= MONO_CFG_HAS_FPOUT;
2479 g_assert_not_reached ();
2483 /* Handle the case where there are no implicit arguments */
2484 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2485 emit_sig_cookie (cfg, call, cinfo);
2487 if (cinfo->ret.storage == RegTypeStructByVal) {
2488 /* The JIT will transform this into a normal call */
2489 call->vret_in_reg = TRUE;
2490 } else if (cinfo->vtype_retaddr) {
2492 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2493 vtarg->sreg1 = call->vret_var->dreg;
2494 vtarg->dreg = mono_alloc_preg (cfg);
2495 MONO_ADD_INS (cfg->cbb, vtarg);
2497 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2500 call->stack_usage = cinfo->stack_usage;
2506 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2508 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2509 ArgInfo *ainfo = ins->inst_p1;
2510 int ovf_size = ainfo->vtsize;
2511 int doffset = ainfo->offset;
2512 int struct_size = ainfo->struct_size;
2513 int i, soffset, dreg, tmpreg;
2515 if (ainfo->storage == RegTypeGSharedVtInReg) {
2517 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2520 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2521 /* Pass by addr on stack */
2522 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2527 for (i = 0; i < ainfo->size; ++i) {
2528 dreg = mono_alloc_ireg (cfg);
2529 switch (struct_size) {
2531 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2534 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2537 tmpreg = mono_alloc_ireg (cfg);
2538 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2539 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2541 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2542 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2543 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2544 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2547 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2550 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2551 soffset += sizeof (gpointer);
2552 struct_size -= sizeof (gpointer);
2554 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2556 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2560 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2562 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2565 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2568 if (COMPILE_LLVM (cfg)) {
2569 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2571 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2572 ins->sreg1 = val->dreg + 1;
2573 ins->sreg2 = val->dreg + 2;
2574 MONO_ADD_INS (cfg->cbb, ins);
2579 case MONO_ARM_FPU_NONE:
2580 if (ret->type == MONO_TYPE_R8) {
2583 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2584 ins->dreg = cfg->ret->dreg;
2585 ins->sreg1 = val->dreg;
2586 MONO_ADD_INS (cfg->cbb, ins);
2589 if (ret->type == MONO_TYPE_R4) {
2590 /* Already converted to an int in method_to_ir () */
2591 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2595 case MONO_ARM_FPU_VFP:
2596 case MONO_ARM_FPU_VFP_HARD:
2597 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2600 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2601 ins->dreg = cfg->ret->dreg;
2602 ins->sreg1 = val->dreg;
2603 MONO_ADD_INS (cfg->cbb, ins);
2608 g_assert_not_reached ();
2612 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2615 #endif /* #ifndef DISABLE_JIT */
2618 mono_arch_is_inst_imm (gint64 imm)
2624 MonoMethodSignature *sig;
2627 MonoType **param_types;
2631 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2635 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2638 switch (cinfo->ret.storage) {
2640 case RegTypeGeneral:
2641 case RegTypeIRegPair:
2642 case RegTypeStructByAddr:
2653 for (i = 0; i < cinfo->nargs; ++i) {
2654 ArgInfo *ainfo = &cinfo->args [i];
2657 switch (ainfo->storage) {
2658 case RegTypeGeneral:
2660 case RegTypeIRegPair:
2663 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2666 case RegTypeStructByVal:
2667 if (ainfo->size == 0)
2668 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2670 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2671 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2679 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2680 for (i = 0; i < sig->param_count; ++i) {
2681 MonoType *t = sig->params [i];
2686 t = mini_replace_type (t);
2709 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2711 ArchDynCallInfo *info;
2715 cinfo = get_call_info (NULL, NULL, sig);
2717 if (!dyn_call_supported (cinfo, sig)) {
2722 info = g_new0 (ArchDynCallInfo, 1);
2723 // FIXME: Preprocess the info to speed up start_dyn_call ()
2725 info->cinfo = cinfo;
2726 info->rtype = mini_replace_type (sig->ret);
2727 info->param_types = g_new0 (MonoType*, sig->param_count);
2728 for (i = 0; i < sig->param_count; ++i)
2729 info->param_types [i] = mini_replace_type (sig->params [i]);
2731 return (MonoDynCallInfo*)info;
2735 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2737 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2739 g_free (ainfo->cinfo);
2744 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2746 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2747 DynCallArgs *p = (DynCallArgs*)buf;
2748 int arg_index, greg, i, j, pindex;
2749 MonoMethodSignature *sig = dinfo->sig;
2751 g_assert (buf_len >= sizeof (DynCallArgs));
2760 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2761 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2766 if (dinfo->cinfo->vtype_retaddr)
2767 p->regs [greg ++] = (mgreg_t)ret;
2769 for (i = pindex; i < sig->param_count; i++) {
2770 MonoType *t = dinfo->param_types [i];
2771 gpointer *arg = args [arg_index ++];
2772 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2775 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2777 else if (ainfo->storage == RegTypeBase)
2778 slot = PARAM_REGS + (ainfo->offset / 4);
2780 g_assert_not_reached ();
2783 p->regs [slot] = (mgreg_t)*arg;
2788 case MONO_TYPE_STRING:
2789 case MONO_TYPE_CLASS:
2790 case MONO_TYPE_ARRAY:
2791 case MONO_TYPE_SZARRAY:
2792 case MONO_TYPE_OBJECT:
2796 p->regs [slot] = (mgreg_t)*arg;
2798 case MONO_TYPE_BOOLEAN:
2800 p->regs [slot] = *(guint8*)arg;
2803 p->regs [slot] = *(gint8*)arg;
2806 p->regs [slot] = *(gint16*)arg;
2809 case MONO_TYPE_CHAR:
2810 p->regs [slot] = *(guint16*)arg;
2813 p->regs [slot] = *(gint32*)arg;
2816 p->regs [slot] = *(guint32*)arg;
2820 p->regs [slot ++] = (mgreg_t)arg [0];
2821 p->regs [slot] = (mgreg_t)arg [1];
2824 p->regs [slot] = *(mgreg_t*)arg;
2827 p->regs [slot ++] = (mgreg_t)arg [0];
2828 p->regs [slot] = (mgreg_t)arg [1];
2830 case MONO_TYPE_GENERICINST:
2831 if (MONO_TYPE_IS_REFERENCE (t)) {
2832 p->regs [slot] = (mgreg_t)*arg;
2837 case MONO_TYPE_VALUETYPE:
2838 g_assert (ainfo->storage == RegTypeStructByVal);
2840 if (ainfo->size == 0)
2841 slot = PARAM_REGS + (ainfo->offset / 4);
2845 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2846 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2849 g_assert_not_reached ();
2855 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2857 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2858 MonoType *ptype = ainfo->rtype;
2859 guint8 *ret = ((DynCallArgs*)buf)->ret;
2860 mgreg_t res = ((DynCallArgs*)buf)->res;
2861 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2863 switch (ptype->type) {
2864 case MONO_TYPE_VOID:
2865 *(gpointer*)ret = NULL;
2867 case MONO_TYPE_STRING:
2868 case MONO_TYPE_CLASS:
2869 case MONO_TYPE_ARRAY:
2870 case MONO_TYPE_SZARRAY:
2871 case MONO_TYPE_OBJECT:
2875 *(gpointer*)ret = (gpointer)res;
2881 case MONO_TYPE_BOOLEAN:
2882 *(guint8*)ret = res;
2885 *(gint16*)ret = res;
2888 case MONO_TYPE_CHAR:
2889 *(guint16*)ret = res;
2892 *(gint32*)ret = res;
2895 *(guint32*)ret = res;
2899 /* This handles endianness as well */
2900 ((gint32*)ret) [0] = res;
2901 ((gint32*)ret) [1] = res2;
2903 case MONO_TYPE_GENERICINST:
2904 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2905 *(gpointer*)ret = (gpointer)res;
2910 case MONO_TYPE_VALUETYPE:
2911 g_assert (ainfo->cinfo->vtype_retaddr);
2916 *(float*)ret = *(float*)&res;
2918 case MONO_TYPE_R8: {
2925 *(double*)ret = *(double*)®s;
2929 g_assert_not_reached ();
2936 * Allow tracing to work with this interface (with an optional argument)
2940 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2944 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2945 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2946 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2947 code = emit_call_reg (code, ARMREG_R2);
2961 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2964 int save_mode = SAVE_NONE;
2966 MonoMethod *method = cfg->method;
2967 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2968 int rtype = ret_type->type;
2969 int save_offset = cfg->param_area;
2973 offset = code - cfg->native_code;
2974 /* we need about 16 instructions */
2975 if (offset > (cfg->code_size - 16 * 4)) {
2976 cfg->code_size *= 2;
2977 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2978 code = cfg->native_code + offset;
2981 case MONO_TYPE_VOID:
2982 /* special case string .ctor icall */
2983 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2984 save_mode = SAVE_ONE;
2986 save_mode = SAVE_NONE;
2990 save_mode = SAVE_TWO;
2994 save_mode = SAVE_ONE_FP;
2996 save_mode = SAVE_ONE;
3000 save_mode = SAVE_TWO_FP;
3002 save_mode = SAVE_TWO;
3004 case MONO_TYPE_GENERICINST:
3005 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
3006 save_mode = SAVE_ONE;
3010 case MONO_TYPE_VALUETYPE:
3011 save_mode = SAVE_STRUCT;
3014 save_mode = SAVE_ONE;
3018 switch (save_mode) {
3020 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3021 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3022 if (enable_arguments) {
3023 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3024 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3028 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3029 if (enable_arguments) {
3030 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3034 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3035 if (enable_arguments) {
3036 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3040 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3041 if (enable_arguments) {
3042 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3046 if (enable_arguments) {
3047 /* FIXME: get the actual address */
3048 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3056 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3057 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3058 code = emit_call_reg (code, ARMREG_IP);
3060 switch (save_mode) {
3062 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3063 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3066 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3069 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3072 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3083 * The immediate field for cond branches is big enough for all reasonable methods
3085 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3086 if (0 && ins->inst_true_bb->native_offset) { \
3087 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3089 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3090 ARM_B_COND (code, (condcode), 0); \
3093 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3095 /* emit an exception if condition is fail
3097 * We assign the extra code used to throw the implicit exceptions
3098 * to cfg->bb_exit as far as the big branch handling is concerned
3100 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3102 mono_add_patch_info (cfg, code - cfg->native_code, \
3103 MONO_PATCH_INFO_EXC, exc_name); \
3104 ARM_BL_COND (code, (condcode), 0); \
3107 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3110 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3115 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3117 MonoInst *ins, *n, *last_ins = NULL;
3119 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3120 switch (ins->opcode) {
3123 /* Already done by an arch-independent pass */
3125 case OP_LOAD_MEMBASE:
3126 case OP_LOADI4_MEMBASE:
3128 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3129 * OP_LOAD_MEMBASE offset(basereg), reg
3131 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3132 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3133 ins->inst_basereg == last_ins->inst_destbasereg &&
3134 ins->inst_offset == last_ins->inst_offset) {
3135 if (ins->dreg == last_ins->sreg1) {
3136 MONO_DELETE_INS (bb, ins);
3139 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3140 ins->opcode = OP_MOVE;
3141 ins->sreg1 = last_ins->sreg1;
3145 * Note: reg1 must be different from the basereg in the second load
3146 * OP_LOAD_MEMBASE offset(basereg), reg1
3147 * OP_LOAD_MEMBASE offset(basereg), reg2
3149 * OP_LOAD_MEMBASE offset(basereg), reg1
3150 * OP_MOVE reg1, reg2
3152 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3153 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3154 ins->inst_basereg != last_ins->dreg &&
3155 ins->inst_basereg == last_ins->inst_basereg &&
3156 ins->inst_offset == last_ins->inst_offset) {
3158 if (ins->dreg == last_ins->dreg) {
3159 MONO_DELETE_INS (bb, ins);
3162 ins->opcode = OP_MOVE;
3163 ins->sreg1 = last_ins->dreg;
3166 //g_assert_not_reached ();
3170 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3171 * OP_LOAD_MEMBASE offset(basereg), reg
3173 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3174 * OP_ICONST reg, imm
3176 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3177 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3178 ins->inst_basereg == last_ins->inst_destbasereg &&
3179 ins->inst_offset == last_ins->inst_offset) {
3180 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3181 ins->opcode = OP_ICONST;
3182 ins->inst_c0 = last_ins->inst_imm;
3183 g_assert_not_reached (); // check this rule
3187 case OP_LOADU1_MEMBASE:
3188 case OP_LOADI1_MEMBASE:
3189 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3190 ins->inst_basereg == last_ins->inst_destbasereg &&
3191 ins->inst_offset == last_ins->inst_offset) {
3192 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3193 ins->sreg1 = last_ins->sreg1;
3196 case OP_LOADU2_MEMBASE:
3197 case OP_LOADI2_MEMBASE:
3198 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3199 ins->inst_basereg == last_ins->inst_destbasereg &&
3200 ins->inst_offset == last_ins->inst_offset) {
3201 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3202 ins->sreg1 = last_ins->sreg1;
3206 ins->opcode = OP_MOVE;
3210 if (ins->dreg == ins->sreg1) {
3211 MONO_DELETE_INS (bb, ins);
3215 * OP_MOVE sreg, dreg
3216 * OP_MOVE dreg, sreg
3218 if (last_ins && last_ins->opcode == OP_MOVE &&
3219 ins->sreg1 == last_ins->dreg &&
3220 ins->dreg == last_ins->sreg1) {
3221 MONO_DELETE_INS (bb, ins);
3229 bb->last_ins = last_ins;
3233 * the branch_cc_table should maintain the order of these
3247 branch_cc_table [] = {
3261 #define ADD_NEW_INS(cfg,dest,op) do { \
3262 MONO_INST_NEW ((cfg), (dest), (op)); \
3263 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3267 map_to_reg_reg_op (int op)
3276 case OP_COMPARE_IMM:
3278 case OP_ICOMPARE_IMM:
3292 case OP_LOAD_MEMBASE:
3293 return OP_LOAD_MEMINDEX;
3294 case OP_LOADI4_MEMBASE:
3295 return OP_LOADI4_MEMINDEX;
3296 case OP_LOADU4_MEMBASE:
3297 return OP_LOADU4_MEMINDEX;
3298 case OP_LOADU1_MEMBASE:
3299 return OP_LOADU1_MEMINDEX;
3300 case OP_LOADI2_MEMBASE:
3301 return OP_LOADI2_MEMINDEX;
3302 case OP_LOADU2_MEMBASE:
3303 return OP_LOADU2_MEMINDEX;
3304 case OP_LOADI1_MEMBASE:
3305 return OP_LOADI1_MEMINDEX;
3306 case OP_STOREI1_MEMBASE_REG:
3307 return OP_STOREI1_MEMINDEX;
3308 case OP_STOREI2_MEMBASE_REG:
3309 return OP_STOREI2_MEMINDEX;
3310 case OP_STOREI4_MEMBASE_REG:
3311 return OP_STOREI4_MEMINDEX;
3312 case OP_STORE_MEMBASE_REG:
3313 return OP_STORE_MEMINDEX;
3314 case OP_STORER4_MEMBASE_REG:
3315 return OP_STORER4_MEMINDEX;
3316 case OP_STORER8_MEMBASE_REG:
3317 return OP_STORER8_MEMINDEX;
3318 case OP_STORE_MEMBASE_IMM:
3319 return OP_STORE_MEMBASE_REG;
3320 case OP_STOREI1_MEMBASE_IMM:
3321 return OP_STOREI1_MEMBASE_REG;
3322 case OP_STOREI2_MEMBASE_IMM:
3323 return OP_STOREI2_MEMBASE_REG;
3324 case OP_STOREI4_MEMBASE_IMM:
3325 return OP_STOREI4_MEMBASE_REG;
3327 g_assert_not_reached ();
3331 * Remove from the instruction list the instructions that can't be
3332 * represented with very simple instructions with no register
3336 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3338 MonoInst *ins, *temp, *last_ins = NULL;
3339 int rot_amount, imm8, low_imm;
3341 MONO_BB_FOR_EACH_INS (bb, ins) {
3343 switch (ins->opcode) {
3347 case OP_COMPARE_IMM:
3348 case OP_ICOMPARE_IMM:
3362 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3363 ADD_NEW_INS (cfg, temp, OP_ICONST);
3364 temp->inst_c0 = ins->inst_imm;
3365 temp->dreg = mono_alloc_ireg (cfg);
3366 ins->sreg2 = temp->dreg;
3367 ins->opcode = mono_op_imm_to_op (ins->opcode);
3369 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3375 if (ins->inst_imm == 1) {
3376 ins->opcode = OP_MOVE;
3379 if (ins->inst_imm == 0) {
3380 ins->opcode = OP_ICONST;
3384 imm8 = mono_is_power_of_two (ins->inst_imm);
3386 ins->opcode = OP_SHL_IMM;
3387 ins->inst_imm = imm8;
3390 ADD_NEW_INS (cfg, temp, OP_ICONST);
3391 temp->inst_c0 = ins->inst_imm;
3392 temp->dreg = mono_alloc_ireg (cfg);
3393 ins->sreg2 = temp->dreg;
3394 ins->opcode = OP_IMUL;
3400 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3401 /* ARM sets the C flag to 1 if there was _no_ overflow */
3402 ins->next->opcode = OP_COND_EXC_NC;
3405 case OP_IDIV_UN_IMM:
3407 case OP_IREM_UN_IMM:
3408 ADD_NEW_INS (cfg, temp, OP_ICONST);
3409 temp->inst_c0 = ins->inst_imm;
3410 temp->dreg = mono_alloc_ireg (cfg);
3411 ins->sreg2 = temp->dreg;
3412 ins->opcode = mono_op_imm_to_op (ins->opcode);
3414 case OP_LOCALLOC_IMM:
3415 ADD_NEW_INS (cfg, temp, OP_ICONST);
3416 temp->inst_c0 = ins->inst_imm;
3417 temp->dreg = mono_alloc_ireg (cfg);
3418 ins->sreg1 = temp->dreg;
3419 ins->opcode = OP_LOCALLOC;
3421 case OP_LOAD_MEMBASE:
3422 case OP_LOADI4_MEMBASE:
3423 case OP_LOADU4_MEMBASE:
3424 case OP_LOADU1_MEMBASE:
3425 /* we can do two things: load the immed in a register
3426 * and use an indexed load, or see if the immed can be
3427 * represented as an ad_imm + a load with a smaller offset
3428 * that fits. We just do the first for now, optimize later.
3430 if (arm_is_imm12 (ins->inst_offset))
3432 ADD_NEW_INS (cfg, temp, OP_ICONST);
3433 temp->inst_c0 = ins->inst_offset;
3434 temp->dreg = mono_alloc_ireg (cfg);
3435 ins->sreg2 = temp->dreg;
3436 ins->opcode = map_to_reg_reg_op (ins->opcode);
3438 case OP_LOADI2_MEMBASE:
3439 case OP_LOADU2_MEMBASE:
3440 case OP_LOADI1_MEMBASE:
3441 if (arm_is_imm8 (ins->inst_offset))
3443 ADD_NEW_INS (cfg, temp, OP_ICONST);
3444 temp->inst_c0 = ins->inst_offset;
3445 temp->dreg = mono_alloc_ireg (cfg);
3446 ins->sreg2 = temp->dreg;
3447 ins->opcode = map_to_reg_reg_op (ins->opcode);
3449 case OP_LOADR4_MEMBASE:
3450 case OP_LOADR8_MEMBASE:
3451 if (arm_is_fpimm8 (ins->inst_offset))
3453 low_imm = ins->inst_offset & 0x1ff;
3454 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3455 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3456 temp->inst_imm = ins->inst_offset & ~0x1ff;
3457 temp->sreg1 = ins->inst_basereg;
3458 temp->dreg = mono_alloc_ireg (cfg);
3459 ins->inst_basereg = temp->dreg;
3460 ins->inst_offset = low_imm;
3464 ADD_NEW_INS (cfg, temp, OP_ICONST);
3465 temp->inst_c0 = ins->inst_offset;
3466 temp->dreg = mono_alloc_ireg (cfg);
3468 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3469 add_ins->sreg1 = ins->inst_basereg;
3470 add_ins->sreg2 = temp->dreg;
3471 add_ins->dreg = mono_alloc_ireg (cfg);
3473 ins->inst_basereg = add_ins->dreg;
3474 ins->inst_offset = 0;
3477 case OP_STORE_MEMBASE_REG:
3478 case OP_STOREI4_MEMBASE_REG:
3479 case OP_STOREI1_MEMBASE_REG:
3480 if (arm_is_imm12 (ins->inst_offset))
3482 ADD_NEW_INS (cfg, temp, OP_ICONST);
3483 temp->inst_c0 = ins->inst_offset;
3484 temp->dreg = mono_alloc_ireg (cfg);
3485 ins->sreg2 = temp->dreg;
3486 ins->opcode = map_to_reg_reg_op (ins->opcode);
3488 case OP_STOREI2_MEMBASE_REG:
3489 if (arm_is_imm8 (ins->inst_offset))
3491 ADD_NEW_INS (cfg, temp, OP_ICONST);
3492 temp->inst_c0 = ins->inst_offset;
3493 temp->dreg = mono_alloc_ireg (cfg);
3494 ins->sreg2 = temp->dreg;
3495 ins->opcode = map_to_reg_reg_op (ins->opcode);
3497 case OP_STORER4_MEMBASE_REG:
3498 case OP_STORER8_MEMBASE_REG:
3499 if (arm_is_fpimm8 (ins->inst_offset))
3501 low_imm = ins->inst_offset & 0x1ff;
3502 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3503 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3504 temp->inst_imm = ins->inst_offset & ~0x1ff;
3505 temp->sreg1 = ins->inst_destbasereg;
3506 temp->dreg = mono_alloc_ireg (cfg);
3507 ins->inst_destbasereg = temp->dreg;
3508 ins->inst_offset = low_imm;
3512 ADD_NEW_INS (cfg, temp, OP_ICONST);
3513 temp->inst_c0 = ins->inst_offset;
3514 temp->dreg = mono_alloc_ireg (cfg);
3516 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3517 add_ins->sreg1 = ins->inst_destbasereg;
3518 add_ins->sreg2 = temp->dreg;
3519 add_ins->dreg = mono_alloc_ireg (cfg);
3521 ins->inst_destbasereg = add_ins->dreg;
3522 ins->inst_offset = 0;
3525 case OP_STORE_MEMBASE_IMM:
3526 case OP_STOREI1_MEMBASE_IMM:
3527 case OP_STOREI2_MEMBASE_IMM:
3528 case OP_STOREI4_MEMBASE_IMM:
3529 ADD_NEW_INS (cfg, temp, OP_ICONST);
3530 temp->inst_c0 = ins->inst_imm;
3531 temp->dreg = mono_alloc_ireg (cfg);
3532 ins->sreg1 = temp->dreg;
3533 ins->opcode = map_to_reg_reg_op (ins->opcode);
3535 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3537 gboolean swap = FALSE;
3541 /* Optimized away */
3546 /* Some fp compares require swapped operands */
3547 switch (ins->next->opcode) {
3549 ins->next->opcode = OP_FBLT;
3553 ins->next->opcode = OP_FBLT_UN;
3557 ins->next->opcode = OP_FBGE;
3561 ins->next->opcode = OP_FBGE_UN;
3569 ins->sreg1 = ins->sreg2;
3578 bb->last_ins = last_ins;
3579 bb->max_vreg = cfg->next_vreg;
3583 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3587 if (long_ins->opcode == OP_LNEG) {
3589 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3590 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3596 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3598 /* sreg is a float, dreg is an integer reg */
3600 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3602 ARM_TOSIZD (code, vfp_scratch1, sreg);
3604 ARM_TOUIZD (code, vfp_scratch1, sreg);
3605 ARM_FMRS (code, dreg, vfp_scratch1);
3606 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3610 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3611 else if (size == 2) {
3612 ARM_SHL_IMM (code, dreg, dreg, 16);
3613 ARM_SHR_IMM (code, dreg, dreg, 16);
3617 ARM_SHL_IMM (code, dreg, dreg, 24);
3618 ARM_SAR_IMM (code, dreg, dreg, 24);
3619 } else if (size == 2) {
3620 ARM_SHL_IMM (code, dreg, dreg, 16);
3621 ARM_SAR_IMM (code, dreg, dreg, 16);
3627 #endif /* #ifndef DISABLE_JIT */
3631 const guchar *target;
3636 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3639 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3640 PatchData *pdata = (PatchData*)user_data;
3641 guchar *code = data;
3642 guint32 *thunks = data;
3643 guint32 *endthunks = (guint32*)(code + bsize);
3645 int difflow, diffhigh;
3647 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3648 difflow = (char*)pdata->code - (char*)thunks;
3649 diffhigh = (char*)pdata->code - (char*)endthunks;
3650 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3654 * The thunk is composed of 3 words:
3655 * load constant from thunks [2] into ARM_IP
3658 * Note that the LR register is already setup
3660 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3661 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3662 while (thunks < endthunks) {
3663 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3664 if (thunks [2] == (guint32)pdata->target) {
3665 arm_patch (pdata->code, (guchar*)thunks);
3666 mono_arch_flush_icache (pdata->code, 4);
3669 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3670 /* found a free slot instead: emit thunk */
3671 /* ARMREG_IP is fine to use since this can't be an IMT call
3674 code = (guchar*)thunks;
3675 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3676 if (thumb_supported)
3677 ARM_BX (code, ARMREG_IP);
3679 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3680 thunks [2] = (guint32)pdata->target;
3681 mono_arch_flush_icache ((guchar*)thunks, 12);
3683 arm_patch (pdata->code, (guchar*)thunks);
3684 mono_arch_flush_icache (pdata->code, 4);
3688 /* skip 12 bytes, the size of the thunk */
3692 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3698 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3703 domain = mono_domain_get ();
3706 pdata.target = target;
3707 pdata.absolute = absolute;
3711 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3714 if (pdata.found != 1) {
3715 mono_domain_lock (domain);
3716 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3719 /* this uses the first available slot */
3721 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3723 mono_domain_unlock (domain);
3726 if (pdata.found != 1) {
3728 GHashTableIter iter;
3729 MonoJitDynamicMethodInfo *ji;
3732 * This might be a dynamic method, search its code manager. We can only
3733 * use the dynamic method containing CODE, since the others might be freed later.
3737 mono_domain_lock (domain);
3738 hash = domain_jit_info (domain)->dynamic_code_hash;
3740 /* FIXME: Speed this up */
3741 g_hash_table_iter_init (&iter, hash);
3742 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3743 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3744 if (pdata.found == 1)
3748 mono_domain_unlock (domain);
3750 if (pdata.found != 1)
3751 g_print ("thunk failed for %p from %p\n", target, code);
3752 g_assert (pdata.found == 1);
3756 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3758 guint32 *code32 = (void*)code;
3759 guint32 ins = *code32;
3760 guint32 prim = (ins >> 25) & 7;
3761 guint32 tval = GPOINTER_TO_UINT (target);
3763 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3764 if (prim == 5) { /* 101b */
3765 /* the diff starts 8 bytes from the branch opcode */
3766 gint diff = target - code - 8;
3768 gint tmask = 0xffffffff;
3769 if (tval & 1) { /* entering thumb mode */
3770 diff = target - 1 - code - 8;
3771 g_assert (thumb_supported);
3772 tbits = 0xf << 28; /* bl->blx bit pattern */
3773 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3774 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3778 tmask = ~(1 << 24); /* clear the link bit */
3779 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3784 if (diff <= 33554431) {
3786 ins = (ins & 0xff000000) | diff;
3788 *code32 = ins | tbits;
3792 /* diff between 0 and -33554432 */
3793 if (diff >= -33554432) {
3795 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3797 *code32 = ins | tbits;
3802 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3806 #ifdef USE_JUMP_TABLES
3808 gpointer *jte = mono_jumptable_get_entry (code);
3810 jte [0] = (gpointer) target;
3814 * The alternative call sequences looks like this:
3816 * ldr ip, [pc] // loads the address constant
3817 * b 1f // jumps around the constant
3818 * address constant embedded in the code
3823 * There are two cases for patching:
3824 * a) at the end of method emission: in this case code points to the start
3825 * of the call sequence
3826 * b) during runtime patching of the call site: in this case code points
3827 * to the mov pc, ip instruction
3829 * We have to handle also the thunk jump code sequence:
3833 * address constant // execution never reaches here
3835 if ((ins & 0x0ffffff0) == 0x12fff10) {
3836 /* Branch and exchange: the address is constructed in a reg
3837 * We can patch BX when the code sequence is the following:
3838 * ldr ip, [pc, #0] ; 0x8
3845 guint8 *emit = (guint8*)ccode;
3846 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3848 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3849 ARM_BX (emit, ARMREG_IP);
3851 /*patching from magic trampoline*/
3852 if (ins == ccode [3]) {
3853 g_assert (code32 [-4] == ccode [0]);
3854 g_assert (code32 [-3] == ccode [1]);
3855 g_assert (code32 [-1] == ccode [2]);
3856 code32 [-2] = (guint32)target;
3859 /*patching from JIT*/
3860 if (ins == ccode [0]) {
3861 g_assert (code32 [1] == ccode [1]);
3862 g_assert (code32 [3] == ccode [2]);
3863 g_assert (code32 [4] == ccode [3]);
3864 code32 [2] = (guint32)target;
3867 g_assert_not_reached ();
3868 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3876 guint8 *emit = (guint8*)ccode;
3877 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3879 ARM_BLX_REG (emit, ARMREG_IP);
3881 g_assert (code32 [-3] == ccode [0]);
3882 g_assert (code32 [-2] == ccode [1]);
3883 g_assert (code32 [0] == ccode [2]);
3885 code32 [-1] = (guint32)target;
3888 guint32 *tmp = ccode;
3889 guint8 *emit = (guint8*)tmp;
3890 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3891 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3892 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3893 ARM_BX (emit, ARMREG_IP);
3894 if (ins == ccode [2]) {
3895 g_assert_not_reached (); // should be -2 ...
3896 code32 [-1] = (guint32)target;
3899 if (ins == ccode [0]) {
3900 /* handles both thunk jump code and the far call sequence */
3901 code32 [2] = (guint32)target;
3904 g_assert_not_reached ();
3906 // g_print ("patched with 0x%08x\n", ins);
3911 arm_patch (guchar *code, const guchar *target)
3913 arm_patch_general (NULL, code, target, NULL);
3917 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3918 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3919 * to be used with the emit macros.
3920 * Return -1 otherwise.
3923 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3926 for (i = 0; i < 31; i+= 2) {
3927 res = (val << (32 - i)) | (val >> i);
3930 *rot_amount = i? 32 - i: 0;
3937 * Emits in code a sequence of instructions that load the value 'val'
3938 * into the dreg register. Uses at most 4 instructions.
3941 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3943 int imm8, rot_amount;
3945 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3946 /* skip the constant pool */
3952 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3953 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3954 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3955 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3958 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3960 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3964 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3966 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3968 if (val & 0xFF0000) {
3969 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3971 if (val & 0xFF000000) {
3972 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3974 } else if (val & 0xFF00) {
3975 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3976 if (val & 0xFF0000) {
3977 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3979 if (val & 0xFF000000) {
3980 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3982 } else if (val & 0xFF0000) {
3983 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3984 if (val & 0xFF000000) {
3985 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3988 //g_assert_not_reached ();
3994 mono_arm_thumb_supported (void)
3996 return thumb_supported;
4002 * emit_load_volatile_arguments:
4004 * Load volatile arguments from the stack to the original input registers.
4005 * Required before a tail call.
4008 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
4010 MonoMethod *method = cfg->method;
4011 MonoMethodSignature *sig;
4016 /* FIXME: Generate intermediate code instead */
4018 sig = mono_method_signature (method);
4020 /* This is the opposite of the code in emit_prolog */
4024 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4026 if (cinfo->vtype_retaddr) {
4027 ArgInfo *ainfo = &cinfo->ret;
4028 inst = cfg->vret_addr;
4029 g_assert (arm_is_imm12 (inst->inst_offset));
4030 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4032 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4033 ArgInfo *ainfo = cinfo->args + i;
4034 inst = cfg->args [pos];
4036 if (cfg->verbose_level > 2)
4037 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4038 if (inst->opcode == OP_REGVAR) {
4039 if (ainfo->storage == RegTypeGeneral)
4040 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4041 else if (ainfo->storage == RegTypeFP) {
4042 g_assert_not_reached ();
4043 } else if (ainfo->storage == RegTypeBase) {
4047 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4048 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4050 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4051 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4055 g_assert_not_reached ();
4057 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4058 switch (ainfo->size) {
4065 g_assert (arm_is_imm12 (inst->inst_offset));
4066 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4067 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4068 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4071 if (arm_is_imm12 (inst->inst_offset)) {
4072 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4074 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4075 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4079 } else if (ainfo->storage == RegTypeBaseGen) {
4082 } else if (ainfo->storage == RegTypeBase) {
4084 } else if (ainfo->storage == RegTypeFP) {
4085 g_assert_not_reached ();
4086 } else if (ainfo->storage == RegTypeStructByVal) {
4087 int doffset = inst->inst_offset;
4091 if (mono_class_from_mono_type (inst->inst_vtype))
4092 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4093 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4094 if (arm_is_imm12 (doffset)) {
4095 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4097 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4098 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4100 soffset += sizeof (gpointer);
4101 doffset += sizeof (gpointer);
4106 } else if (ainfo->storage == RegTypeStructByAddr) {
4121 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4126 guint8 *code = cfg->native_code + cfg->code_len;
4127 MonoInst *last_ins = NULL;
4128 guint last_offset = 0;
4130 int imm8, rot_amount;
4132 /* we don't align basic blocks of loops on arm */
4134 if (cfg->verbose_level > 2)
4135 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4137 cpos = bb->max_offset;
4139 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4140 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4141 //g_assert (!mono_compile_aot);
4144 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4145 /* this is not thread save, but good enough */
4146 /* fixme: howto handle overflows? */
4147 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4150 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4151 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4152 (gpointer)"mono_break");
4153 code = emit_call_seq (cfg, code);
4156 MONO_BB_FOR_EACH_INS (bb, ins) {
4157 offset = code - cfg->native_code;
4159 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4161 if (offset > (cfg->code_size - max_len - 16)) {
4162 cfg->code_size *= 2;
4163 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4164 code = cfg->native_code + offset;
4166 // if (ins->cil_code)
4167 // g_print ("cil code\n");
4168 mono_debug_record_line_number (cfg, ins, offset);
4170 switch (ins->opcode) {
4171 case OP_MEMORY_BARRIER:
4173 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4174 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4178 #ifdef HAVE_AEABI_READ_TP
4179 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4180 (gpointer)"__aeabi_read_tp");
4181 code = emit_call_seq (cfg, code);
4183 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4185 g_assert_not_reached ();
4188 case OP_ATOMIC_EXCHANGE_I4:
4189 case OP_ATOMIC_CAS_I4:
4190 case OP_ATOMIC_ADD_I4: {
4194 g_assert (v7_supported);
4197 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4199 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4201 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4205 g_assert (cfg->arch.atomic_tmp_offset != -1);
4206 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4208 switch (ins->opcode) {
4209 case OP_ATOMIC_EXCHANGE_I4:
4211 ARM_DMB (code, ARM_DMB_SY);
4212 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4213 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4214 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4216 ARM_B_COND (code, ARMCOND_NE, 0);
4217 arm_patch (buf [1], buf [0]);
4219 case OP_ATOMIC_CAS_I4:
4220 ARM_DMB (code, ARM_DMB_SY);
4222 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4223 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4225 ARM_B_COND (code, ARMCOND_NE, 0);
4226 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4227 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4229 ARM_B_COND (code, ARMCOND_NE, 0);
4230 arm_patch (buf [2], buf [0]);
4231 arm_patch (buf [1], code);
4233 case OP_ATOMIC_ADD_I4:
4235 ARM_DMB (code, ARM_DMB_SY);
4236 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4237 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4238 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4239 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4241 ARM_B_COND (code, ARMCOND_NE, 0);
4242 arm_patch (buf [1], buf [0]);
4245 g_assert_not_reached ();
4248 ARM_DMB (code, ARM_DMB_SY);
4249 if (tmpreg != ins->dreg)
4250 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4251 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4256 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4257 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4260 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4261 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4263 case OP_STOREI1_MEMBASE_IMM:
4264 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4265 g_assert (arm_is_imm12 (ins->inst_offset));
4266 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4268 case OP_STOREI2_MEMBASE_IMM:
4269 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4270 g_assert (arm_is_imm8 (ins->inst_offset));
4271 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4273 case OP_STORE_MEMBASE_IMM:
4274 case OP_STOREI4_MEMBASE_IMM:
4275 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4276 g_assert (arm_is_imm12 (ins->inst_offset));
4277 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4279 case OP_STOREI1_MEMBASE_REG:
4280 g_assert (arm_is_imm12 (ins->inst_offset));
4281 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4283 case OP_STOREI2_MEMBASE_REG:
4284 g_assert (arm_is_imm8 (ins->inst_offset));
4285 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4287 case OP_STORE_MEMBASE_REG:
4288 case OP_STOREI4_MEMBASE_REG:
4289 /* this case is special, since it happens for spill code after lowering has been called */
4290 if (arm_is_imm12 (ins->inst_offset)) {
4291 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4293 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4294 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4297 case OP_STOREI1_MEMINDEX:
4298 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4300 case OP_STOREI2_MEMINDEX:
4301 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4303 case OP_STORE_MEMINDEX:
4304 case OP_STOREI4_MEMINDEX:
4305 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4308 g_assert_not_reached ();
4310 case OP_LOAD_MEMINDEX:
4311 case OP_LOADI4_MEMINDEX:
4312 case OP_LOADU4_MEMINDEX:
4313 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4315 case OP_LOADI1_MEMINDEX:
4316 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4318 case OP_LOADU1_MEMINDEX:
4319 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4321 case OP_LOADI2_MEMINDEX:
4322 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4324 case OP_LOADU2_MEMINDEX:
4325 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4327 case OP_LOAD_MEMBASE:
4328 case OP_LOADI4_MEMBASE:
4329 case OP_LOADU4_MEMBASE:
4330 /* this case is special, since it happens for spill code after lowering has been called */
4331 if (arm_is_imm12 (ins->inst_offset)) {
4332 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4334 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4335 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4338 case OP_LOADI1_MEMBASE:
4339 g_assert (arm_is_imm8 (ins->inst_offset));
4340 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4342 case OP_LOADU1_MEMBASE:
4343 g_assert (arm_is_imm12 (ins->inst_offset));
4344 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4346 case OP_LOADU2_MEMBASE:
4347 g_assert (arm_is_imm8 (ins->inst_offset));
4348 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4350 case OP_LOADI2_MEMBASE:
4351 g_assert (arm_is_imm8 (ins->inst_offset));
4352 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4354 case OP_ICONV_TO_I1:
4355 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4356 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4358 case OP_ICONV_TO_I2:
4359 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4360 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4362 case OP_ICONV_TO_U1:
4363 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4365 case OP_ICONV_TO_U2:
4366 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4367 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4371 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4373 case OP_COMPARE_IMM:
4374 case OP_ICOMPARE_IMM:
4375 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4376 g_assert (imm8 >= 0);
4377 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4381 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4382 * So instead of emitting a trap, we emit a call a C function and place a
4385 //*(int*)code = 0xef9f0001;
4388 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4389 (gpointer)"mono_break");
4390 code = emit_call_seq (cfg, code);
4392 case OP_RELAXED_NOP:
4397 case OP_DUMMY_STORE:
4398 case OP_DUMMY_ICONST:
4399 case OP_DUMMY_R8CONST:
4400 case OP_NOT_REACHED:
4403 case OP_SEQ_POINT: {
4405 MonoInst *info_var = cfg->arch.seq_point_info_var;
4406 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4407 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4408 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4409 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4411 int dreg = ARMREG_LR;
4413 if (cfg->soft_breakpoints) {
4414 g_assert (!cfg->compile_aot);
4418 * For AOT, we use one got slot per method, which will point to a
4419 * SeqPointInfo structure, containing all the information required
4420 * by the code below.
4422 if (cfg->compile_aot) {
4423 g_assert (info_var);
4424 g_assert (info_var->opcode == OP_REGOFFSET);
4425 g_assert (arm_is_imm12 (info_var->inst_offset));
4428 if (!cfg->soft_breakpoints) {
4430 * Read from the single stepping trigger page. This will cause a
4431 * SIGSEGV when single stepping is enabled.
4432 * We do this _before_ the breakpoint, so single stepping after
4433 * a breakpoint is hit will step to the next IL offset.
4435 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4438 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4439 if (cfg->soft_breakpoints) {
4440 /* Load the address of the sequence point trigger variable. */
4443 g_assert (var->opcode == OP_REGOFFSET);
4444 g_assert (arm_is_imm12 (var->inst_offset));
4445 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4447 /* Read the value and check whether it is non-zero. */
4448 ARM_LDR_IMM (code, dreg, dreg, 0);
4449 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4451 /* Load the address of the sequence point method. */
4452 var = ss_method_var;
4454 g_assert (var->opcode == OP_REGOFFSET);
4455 g_assert (arm_is_imm12 (var->inst_offset));
4456 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4458 /* Call it conditionally. */
4459 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4461 if (cfg->compile_aot) {
4462 /* Load the trigger page addr from the variable initialized in the prolog */
4463 var = ss_trigger_page_var;
4465 g_assert (var->opcode == OP_REGOFFSET);
4466 g_assert (arm_is_imm12 (var->inst_offset));
4467 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4469 #ifdef USE_JUMP_TABLES
4470 gpointer *jte = mono_jumptable_add_entry ();
4471 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4472 jte [0] = ss_trigger_page;
4474 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4476 *(int*)code = (int)ss_trigger_page;
4480 ARM_LDR_IMM (code, dreg, dreg, 0);
4484 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4486 if (cfg->soft_breakpoints) {
4487 /* Load the address of the breakpoint method into ip. */
4488 var = bp_method_var;
4490 g_assert (var->opcode == OP_REGOFFSET);
4491 g_assert (arm_is_imm12 (var->inst_offset));
4492 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4495 * A placeholder for a possible breakpoint inserted by
4496 * mono_arch_set_breakpoint ().
4499 } else if (cfg->compile_aot) {
4500 guint32 offset = code - cfg->native_code;
4503 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4504 /* Add the offset */
4505 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4506 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4507 if (arm_is_imm12 ((int)val)) {
4508 ARM_LDR_IMM (code, dreg, dreg, val);
4510 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4512 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4514 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4515 g_assert (!(val & 0xFF000000));
4517 ARM_LDR_IMM (code, dreg, dreg, 0);
4519 /* What is faster, a branch or a load ? */
4520 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4521 /* The breakpoint instruction */
4522 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4525 * A placeholder for a possible breakpoint inserted by
4526 * mono_arch_set_breakpoint ().
4528 for (i = 0; i < 4; ++i)
4535 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4538 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4542 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4545 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4546 g_assert (imm8 >= 0);
4547 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4551 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4552 g_assert (imm8 >= 0);
4553 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4557 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4558 g_assert (imm8 >= 0);
4559 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4562 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4563 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4565 case OP_IADD_OVF_UN:
4566 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4567 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4570 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4571 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4573 case OP_ISUB_OVF_UN:
4574 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4575 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4577 case OP_ADD_OVF_CARRY:
4578 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4579 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4581 case OP_ADD_OVF_UN_CARRY:
4582 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4583 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4585 case OP_SUB_OVF_CARRY:
4586 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4587 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4589 case OP_SUB_OVF_UN_CARRY:
4590 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4591 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4595 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4598 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4599 g_assert (imm8 >= 0);
4600 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4603 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4607 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4611 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4612 g_assert (imm8 >= 0);
4613 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4617 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4618 g_assert (imm8 >= 0);
4619 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4621 case OP_ARM_RSBS_IMM:
4622 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4623 g_assert (imm8 >= 0);
4624 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4626 case OP_ARM_RSC_IMM:
4627 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4628 g_assert (imm8 >= 0);
4629 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4632 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4636 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4637 g_assert (imm8 >= 0);
4638 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4641 g_assert (v7s_supported);
4642 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4645 g_assert (v7s_supported);
4646 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4649 g_assert (v7s_supported);
4650 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4651 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4654 g_assert (v7s_supported);
4655 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4656 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4660 g_assert_not_reached ();
4662 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4666 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4667 g_assert (imm8 >= 0);
4668 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4671 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4675 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4676 g_assert (imm8 >= 0);
4677 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4680 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4685 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4686 else if (ins->dreg != ins->sreg1)
4687 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4690 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4695 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4696 else if (ins->dreg != ins->sreg1)
4697 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4700 case OP_ISHR_UN_IMM:
4702 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4703 else if (ins->dreg != ins->sreg1)
4704 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4707 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4710 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4713 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4716 if (ins->dreg == ins->sreg2)
4717 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4719 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4722 g_assert_not_reached ();
4725 /* FIXME: handle ovf/ sreg2 != dreg */
4726 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4727 /* FIXME: MUL doesn't set the C/O flags on ARM */
4729 case OP_IMUL_OVF_UN:
4730 /* FIXME: handle ovf/ sreg2 != dreg */
4731 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4732 /* FIXME: MUL doesn't set the C/O flags on ARM */
4735 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4738 /* Load the GOT offset */
4739 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4740 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4742 *(gpointer*)code = NULL;
4744 /* Load the value from the GOT */
4745 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4747 case OP_OBJC_GET_SELECTOR:
4748 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4749 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4751 *(gpointer*)code = NULL;
4753 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4755 case OP_ICONV_TO_I4:
4756 case OP_ICONV_TO_U4:
4758 if (ins->dreg != ins->sreg1)
4759 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4762 int saved = ins->sreg2;
4763 if (ins->sreg2 == ARM_LSW_REG) {
4764 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4767 if (ins->sreg1 != ARM_LSW_REG)
4768 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4769 if (saved != ARM_MSW_REG)
4770 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4775 ARM_CPYD (code, ins->dreg, ins->sreg1);
4777 case OP_FCONV_TO_R4:
4779 ARM_CVTD (code, ins->dreg, ins->sreg1);
4780 ARM_CVTS (code, ins->dreg, ins->dreg);
4785 * Keep in sync with mono_arch_emit_epilog
4787 g_assert (!cfg->method->save_lmf);
4789 code = emit_load_volatile_arguments (cfg, code);
4791 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4793 if (cfg->used_int_regs)
4794 ARM_POP (code, cfg->used_int_regs);
4795 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4797 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4799 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4800 if (cfg->compile_aot) {
4801 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4803 *(gpointer*)code = NULL;
4805 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4807 code = mono_arm_patchable_b (code, ARMCOND_AL);
4811 MonoCallInst *call = (MonoCallInst*)ins;
4814 * The stack looks like the following:
4815 * <caller argument area>
4818 * <callee argument area>
4819 * Need to copy the arguments from the callee argument area to
4820 * the caller argument area, and pop the frame.
4822 if (call->stack_usage) {
4823 int i, prev_sp_offset = 0;
4825 /* Compute size of saved registers restored below */
4827 prev_sp_offset = 2 * 4;
4829 prev_sp_offset = 1 * 4;
4830 for (i = 0; i < 16; ++i) {
4831 if (cfg->used_int_regs & (1 << i))
4832 prev_sp_offset += 4;
4835 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4837 /* Copy arguments on the stack to our argument area */
4838 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4839 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4840 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4845 * Keep in sync with mono_arch_emit_epilog
4847 g_assert (!cfg->method->save_lmf);
4849 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4851 if (cfg->used_int_regs)
4852 ARM_POP (code, cfg->used_int_regs);
4853 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4855 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4858 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4859 if (cfg->compile_aot) {
4860 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4862 *(gpointer*)code = NULL;
4864 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4866 code = mono_arm_patchable_b (code, ARMCOND_AL);
4871 /* ensure ins->sreg1 is not NULL */
4872 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4875 g_assert (cfg->sig_cookie < 128);
4876 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4877 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4886 call = (MonoCallInst*)ins;
4889 code = emit_float_args (cfg, call, code, &max_len, &offset);
4891 if (ins->flags & MONO_INST_HAS_METHOD)
4892 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4894 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4895 code = emit_call_seq (cfg, code);
4896 ins->flags |= MONO_INST_GC_CALLSITE;
4897 ins->backend.pc_offset = code - cfg->native_code;
4898 code = emit_move_return_value (cfg, ins, code);
4904 case OP_VOIDCALL_REG:
4907 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4909 code = emit_call_reg (code, ins->sreg1);
4910 ins->flags |= MONO_INST_GC_CALLSITE;
4911 ins->backend.pc_offset = code - cfg->native_code;
4912 code = emit_move_return_value (cfg, ins, code);
4914 case OP_FCALL_MEMBASE:
4915 case OP_LCALL_MEMBASE:
4916 case OP_VCALL_MEMBASE:
4917 case OP_VCALL2_MEMBASE:
4918 case OP_VOIDCALL_MEMBASE:
4919 case OP_CALL_MEMBASE: {
4920 gboolean imt_arg = FALSE;
4922 g_assert (ins->sreg1 != ARMREG_LR);
4923 call = (MonoCallInst*)ins;
4926 code = emit_float_args (cfg, call, code, &max_len, &offset);
4928 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4930 if (!arm_is_imm12 (ins->inst_offset))
4931 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4932 #ifdef USE_JUMP_TABLES
4938 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4940 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4942 if (!arm_is_imm12 (ins->inst_offset))
4943 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4945 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4948 * We can't embed the method in the code stream in PIC code, or
4950 * Instead, we put it in V5 in code emitted by
4951 * mono_arch_emit_imt_argument (), and embed NULL here to
4952 * signal the IMT thunk that the value is in V5.
4954 #ifdef USE_JUMP_TABLES
4955 /* In case of jumptables we always use value in V5. */
4958 if (call->dynamic_imt_arg)
4959 *((gpointer*)code) = NULL;
4961 *((gpointer*)code) = (gpointer)call->method;
4965 ins->flags |= MONO_INST_GC_CALLSITE;
4966 ins->backend.pc_offset = code - cfg->native_code;
4967 code = emit_move_return_value (cfg, ins, code);
4971 /* round the size to 8 bytes */
4972 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4973 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4974 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4975 /* memzero the area: dreg holds the size, sp is the pointer */
4976 if (ins->flags & MONO_INST_INIT) {
4977 guint8 *start_loop, *branch_to_cond;
4978 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4979 branch_to_cond = code;
4982 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4983 arm_patch (branch_to_cond, code);
4984 /* decrement by 4 and set flags */
4985 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4986 ARM_B_COND (code, ARMCOND_GE, 0);
4987 arm_patch (code - 4, start_loop);
4989 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
4990 if (cfg->param_area)
4991 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
4996 MonoInst *var = cfg->dyn_call_var;
4998 g_assert (var->opcode == OP_REGOFFSET);
4999 g_assert (arm_is_imm12 (var->inst_offset));
5001 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5002 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
5004 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
5006 /* Save args buffer */
5007 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5009 /* Set stack slots using R0 as scratch reg */
5010 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5011 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5012 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5013 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5016 /* Set argument registers */
5017 for (i = 0; i < PARAM_REGS; ++i)
5018 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5021 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5022 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5025 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5026 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5027 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5031 if (ins->sreg1 != ARMREG_R0)
5032 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5033 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5034 (gpointer)"mono_arch_throw_exception");
5035 code = emit_call_seq (cfg, code);
5039 if (ins->sreg1 != ARMREG_R0)
5040 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5041 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5042 (gpointer)"mono_arch_rethrow_exception");
5043 code = emit_call_seq (cfg, code);
5046 case OP_START_HANDLER: {
5047 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5050 /* Reserve a param area, see filter-stack.exe */
5051 if (cfg->param_area) {
5052 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5053 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5055 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5056 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5060 if (arm_is_imm12 (spvar->inst_offset)) {
5061 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5063 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5064 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5068 case OP_ENDFILTER: {
5069 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5072 /* Free the param area */
5073 if (cfg->param_area) {
5074 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5075 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5077 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5078 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5082 if (ins->sreg1 != ARMREG_R0)
5083 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5084 if (arm_is_imm12 (spvar->inst_offset)) {
5085 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5087 g_assert (ARMREG_IP != spvar->inst_basereg);
5088 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5089 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5091 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5094 case OP_ENDFINALLY: {
5095 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5098 /* Free the param area */
5099 if (cfg->param_area) {
5100 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5101 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5103 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5104 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5108 if (arm_is_imm12 (spvar->inst_offset)) {
5109 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5111 g_assert (ARMREG_IP != spvar->inst_basereg);
5112 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5113 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5115 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5118 case OP_CALL_HANDLER:
5119 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5120 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5121 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5124 ins->inst_c0 = code - cfg->native_code;
5127 /*if (ins->inst_target_bb->native_offset) {
5129 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5131 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5132 code = mono_arm_patchable_b (code, ARMCOND_AL);
5136 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5140 * In the normal case we have:
5141 * ldr pc, [pc, ins->sreg1 << 2]
5144 * ldr lr, [pc, ins->sreg1 << 2]
5146 * After follows the data.
5147 * FIXME: add aot support.
5149 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5150 #ifdef USE_JUMP_TABLES
5152 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5153 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5154 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5158 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5159 if (offset + max_len > (cfg->code_size - 16)) {
5160 cfg->code_size += max_len;
5161 cfg->code_size *= 2;
5162 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5163 code = cfg->native_code + offset;
5165 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5167 code += 4 * GPOINTER_TO_INT (ins->klass);
5172 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5173 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5177 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5178 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5182 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5183 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5187 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5192 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5193 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5196 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5200 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5201 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5204 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5205 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5209 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5210 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5212 case OP_COND_EXC_EQ:
5213 case OP_COND_EXC_NE_UN:
5214 case OP_COND_EXC_LT:
5215 case OP_COND_EXC_LT_UN:
5216 case OP_COND_EXC_GT:
5217 case OP_COND_EXC_GT_UN:
5218 case OP_COND_EXC_GE:
5219 case OP_COND_EXC_GE_UN:
5220 case OP_COND_EXC_LE:
5221 case OP_COND_EXC_LE_UN:
5222 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5224 case OP_COND_EXC_IEQ:
5225 case OP_COND_EXC_INE_UN:
5226 case OP_COND_EXC_ILT:
5227 case OP_COND_EXC_ILT_UN:
5228 case OP_COND_EXC_IGT:
5229 case OP_COND_EXC_IGT_UN:
5230 case OP_COND_EXC_IGE:
5231 case OP_COND_EXC_IGE_UN:
5232 case OP_COND_EXC_ILE:
5233 case OP_COND_EXC_ILE_UN:
5234 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5237 case OP_COND_EXC_IC:
5238 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5240 case OP_COND_EXC_OV:
5241 case OP_COND_EXC_IOV:
5242 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5244 case OP_COND_EXC_NC:
5245 case OP_COND_EXC_INC:
5246 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5248 case OP_COND_EXC_NO:
5249 case OP_COND_EXC_INO:
5250 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5262 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5265 /* floating point opcodes */
5267 if (cfg->compile_aot) {
5268 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5270 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5272 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5275 /* FIXME: we can optimize the imm load by dealing with part of
5276 * the displacement in LDFD (aligning to 512).
5278 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5279 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5283 if (cfg->compile_aot) {
5284 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5286 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5288 ARM_CVTS (code, ins->dreg, ins->dreg);
5290 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5291 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5292 ARM_CVTS (code, ins->dreg, ins->dreg);
5295 case OP_STORER8_MEMBASE_REG:
5296 /* This is generated by the local regalloc pass which runs after the lowering pass */
5297 if (!arm_is_fpimm8 (ins->inst_offset)) {
5298 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5299 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5300 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5302 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5305 case OP_LOADR8_MEMBASE:
5306 /* This is generated by the local regalloc pass which runs after the lowering pass */
5307 if (!arm_is_fpimm8 (ins->inst_offset)) {
5308 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5309 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5310 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5312 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5315 case OP_STORER4_MEMBASE_REG:
5316 g_assert (arm_is_fpimm8 (ins->inst_offset));
5317 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5318 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5319 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5320 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5322 case OP_LOADR4_MEMBASE:
5323 g_assert (arm_is_fpimm8 (ins->inst_offset));
5324 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5325 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5326 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5327 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5329 case OP_ICONV_TO_R_UN: {
5330 g_assert_not_reached ();
5333 case OP_ICONV_TO_R4:
5334 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5335 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5336 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5337 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5338 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5340 case OP_ICONV_TO_R8:
5341 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5342 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5343 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5344 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5348 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5349 if (sig_ret->type == MONO_TYPE_R4) {
5350 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5352 if (!IS_HARD_FLOAT) {
5353 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5356 if (IS_HARD_FLOAT) {
5357 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5359 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5364 case OP_FCONV_TO_I1:
5365 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5367 case OP_FCONV_TO_U1:
5368 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5370 case OP_FCONV_TO_I2:
5371 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5373 case OP_FCONV_TO_U2:
5374 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5376 case OP_FCONV_TO_I4:
5378 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5380 case OP_FCONV_TO_U4:
5382 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5384 case OP_FCONV_TO_I8:
5385 case OP_FCONV_TO_U8:
5386 g_assert_not_reached ();
5387 /* Implemented as helper calls */
5389 case OP_LCONV_TO_R_UN:
5390 g_assert_not_reached ();
5391 /* Implemented as helper calls */
5393 case OP_LCONV_TO_OVF_I4_2: {
5394 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5396 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5399 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5400 high_bit_not_set = code;
5401 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5403 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5404 valid_negative = code;
5405 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5406 invalid_negative = code;
5407 ARM_B_COND (code, ARMCOND_AL, 0);
5409 arm_patch (high_bit_not_set, code);
5411 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5412 valid_positive = code;
5413 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5415 arm_patch (invalid_negative, code);
5416 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5418 arm_patch (valid_negative, code);
5419 arm_patch (valid_positive, code);
5421 if (ins->dreg != ins->sreg1)
5422 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5426 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5429 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5432 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5435 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5438 ARM_NEGD (code, ins->dreg, ins->sreg1);
5442 g_assert_not_reached ();
5446 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5452 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5455 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5456 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5460 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5463 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5464 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5468 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5471 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5472 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5473 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5477 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5480 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5481 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5485 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5488 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5489 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5490 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5494 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5497 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5498 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5502 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5505 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5506 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5510 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5513 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5514 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5517 /* ARM FPA flags table:
5518 * N Less than ARMCOND_MI
5519 * Z Equal ARMCOND_EQ
5520 * C Greater Than or Equal ARMCOND_CS
5521 * V Unordered ARMCOND_VS
5524 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5527 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5530 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5533 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5534 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5540 g_assert_not_reached ();
5544 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5546 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5547 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5548 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5552 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5553 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5558 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5559 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5561 #ifdef USE_JUMP_TABLES
5563 gpointer *jte = mono_jumptable_add_entries (2);
5564 jte [0] = GUINT_TO_POINTER (0xffffffff);
5565 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5566 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5567 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5570 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5571 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5573 *(guint32*)code = 0xffffffff;
5575 *(guint32*)code = 0x7fefffff;
5578 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5580 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5581 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5583 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5584 ARM_CPYD (code, ins->dreg, ins->sreg1);
5586 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5587 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5592 case OP_GC_LIVENESS_DEF:
5593 case OP_GC_LIVENESS_USE:
5594 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5595 ins->backend.pc_offset = code - cfg->native_code;
5597 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5598 ins->backend.pc_offset = code - cfg->native_code;
5599 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5603 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5604 g_assert_not_reached ();
5607 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5608 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5609 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5610 g_assert_not_reached ();
5616 last_offset = offset;
5619 cfg->code_len = code - cfg->native_code;
5622 #endif /* DISABLE_JIT */
5624 #ifdef HAVE_AEABI_READ_TP
5625 void __aeabi_read_tp (void);
5629 mono_arch_register_lowlevel_calls (void)
5631 /* The signature doesn't matter */
5632 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5633 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5635 #ifndef MONO_CROSS_COMPILE
5636 #ifdef HAVE_AEABI_READ_TP
5637 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5642 #define patch_lis_ori(ip,val) do {\
5643 guint16 *__lis_ori = (guint16*)(ip); \
5644 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5645 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5649 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5651 MonoJumpInfo *patch_info;
5652 gboolean compile_aot = !run_cctors;
5654 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5655 unsigned char *ip = patch_info->ip.i + code;
5656 const unsigned char *target;
5658 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5659 #ifdef USE_JUMP_TABLES
5660 gpointer *jt = mono_jumptable_get_entry (ip);
5662 gpointer *jt = (gpointer*)(ip + 8);
5665 /* jt is the inlined jump table, 2 instructions after ip
5666 * In the normal case we store the absolute addresses,
5667 * otherwise the displacements.
5669 for (i = 0; i < patch_info->data.table->table_size; i++)
5670 jt [i] = code + (int)patch_info->data.table->table [i];
5675 switch (patch_info->type) {
5676 case MONO_PATCH_INFO_BB:
5677 case MONO_PATCH_INFO_LABEL:
5680 /* No need to patch these */
5685 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5687 switch (patch_info->type) {
5688 case MONO_PATCH_INFO_IP:
5689 g_assert_not_reached ();
5690 patch_lis_ori (ip, ip);
5692 case MONO_PATCH_INFO_METHOD_REL:
5693 g_assert_not_reached ();
5694 *((gpointer *)(ip)) = code + patch_info->data.offset;
5696 case MONO_PATCH_INFO_METHODCONST:
5697 case MONO_PATCH_INFO_CLASS:
5698 case MONO_PATCH_INFO_IMAGE:
5699 case MONO_PATCH_INFO_FIELD:
5700 case MONO_PATCH_INFO_VTABLE:
5701 case MONO_PATCH_INFO_IID:
5702 case MONO_PATCH_INFO_SFLDA:
5703 case MONO_PATCH_INFO_LDSTR:
5704 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5705 case MONO_PATCH_INFO_LDTOKEN:
5706 g_assert_not_reached ();
5707 /* from OP_AOTCONST : lis + ori */
5708 patch_lis_ori (ip, target);
5710 case MONO_PATCH_INFO_R4:
5711 case MONO_PATCH_INFO_R8:
5712 g_assert_not_reached ();
5713 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5715 case MONO_PATCH_INFO_EXC_NAME:
5716 g_assert_not_reached ();
5717 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5719 case MONO_PATCH_INFO_NONE:
5720 case MONO_PATCH_INFO_BB_OVF:
5721 case MONO_PATCH_INFO_EXC_OVF:
5722 /* everything is dealt with at epilog output time */
5727 arm_patch_general (domain, ip, target, dyn_code_mp);
5734 * Stack frame layout:
5736 * ------------------- fp
5737 * MonoLMF structure or saved registers
5738 * -------------------
5740 * -------------------
5742 * -------------------
5743 * optional 8 bytes for tracing
5744 * -------------------
5745 * param area size is cfg->param_area
5746 * ------------------- sp
5749 mono_arch_emit_prolog (MonoCompile *cfg)
5751 MonoMethod *method = cfg->method;
5753 MonoMethodSignature *sig;
5755 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5760 int prev_sp_offset, reg_offset;
5762 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5765 sig = mono_method_signature (method);
5766 cfg->code_size = 256 + sig->param_count * 64;
5767 code = cfg->native_code = g_malloc (cfg->code_size);
5769 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5771 alloc_size = cfg->stack_offset;
5777 * The iphone uses R7 as the frame pointer, and it points at the saved
5782 * We can't use r7 as a frame pointer since it points into the middle of
5783 * the frame, so we keep using our own frame pointer.
5784 * FIXME: Optimize this.
5786 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5787 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5788 prev_sp_offset += 8; /* r7 and lr */
5789 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5790 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5793 if (!method->save_lmf) {
5795 /* No need to push LR again */
5796 if (cfg->used_int_regs)
5797 ARM_PUSH (code, cfg->used_int_regs);
5799 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5800 prev_sp_offset += 4;
5802 for (i = 0; i < 16; ++i) {
5803 if (cfg->used_int_regs & (1 << i))
5804 prev_sp_offset += 4;
5806 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5808 for (i = 0; i < 16; ++i) {
5809 if ((cfg->used_int_regs & (1 << i))) {
5810 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5811 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5816 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5817 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5819 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5820 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5823 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5824 ARM_PUSH (code, 0x5ff0);
5825 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5826 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5828 for (i = 0; i < 16; ++i) {
5829 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5830 /* The original r7 is saved at the start */
5831 if (!(iphone_abi && i == ARMREG_R7))
5832 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5836 g_assert (reg_offset == 4 * 10);
5837 pos += sizeof (MonoLMF) - (4 * 10);
5841 orig_alloc_size = alloc_size;
5842 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5843 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5844 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5845 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5848 /* the stack used in the pushed regs */
5849 if (prev_sp_offset & 4)
5851 cfg->stack_usage = alloc_size;
5853 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5854 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5856 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5857 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5859 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5861 if (cfg->frame_reg != ARMREG_SP) {
5862 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5863 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5865 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5866 prev_sp_offset += alloc_size;
5868 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5869 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5871 /* compute max_offset in order to use short forward jumps
5872 * we could skip do it on arm because the immediate displacement
5873 * for jumps is large enough, it may be useful later for constant pools
5876 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5877 MonoInst *ins = bb->code;
5878 bb->max_offset = max_offset;
5880 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5883 MONO_BB_FOR_EACH_INS (bb, ins)
5884 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5887 /* store runtime generic context */
5888 if (cfg->rgctx_var) {
5889 MonoInst *ins = cfg->rgctx_var;
5891 g_assert (ins->opcode == OP_REGOFFSET);
5893 if (arm_is_imm12 (ins->inst_offset)) {
5894 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5896 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5897 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5901 /* load arguments allocated to register from the stack */
5904 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5906 if (cinfo->vtype_retaddr) {
5907 ArgInfo *ainfo = &cinfo->ret;
5908 inst = cfg->vret_addr;
5909 g_assert (arm_is_imm12 (inst->inst_offset));
5910 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5913 if (sig->call_convention == MONO_CALL_VARARG) {
5914 ArgInfo *cookie = &cinfo->sig_cookie;
5916 /* Save the sig cookie address */
5917 g_assert (cookie->storage == RegTypeBase);
5919 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5920 g_assert (arm_is_imm12 (cfg->sig_cookie));
5921 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5922 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5925 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5926 ArgInfo *ainfo = cinfo->args + i;
5927 inst = cfg->args [pos];
5929 if (cfg->verbose_level > 2)
5930 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5931 if (inst->opcode == OP_REGVAR) {
5932 if (ainfo->storage == RegTypeGeneral)
5933 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5934 else if (ainfo->storage == RegTypeFP) {
5935 g_assert_not_reached ();
5936 } else if (ainfo->storage == RegTypeBase) {
5937 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5938 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5940 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5941 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5944 g_assert_not_reached ();
5946 if (cfg->verbose_level > 2)
5947 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5949 /* the argument should be put on the stack: FIXME handle size != word */
5950 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5951 switch (ainfo->size) {
5953 if (arm_is_imm12 (inst->inst_offset))
5954 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5956 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5957 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5961 if (arm_is_imm8 (inst->inst_offset)) {
5962 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5964 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5965 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5969 if (arm_is_imm12 (inst->inst_offset)) {
5970 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5972 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5973 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5975 if (arm_is_imm12 (inst->inst_offset + 4)) {
5976 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5978 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5979 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5983 if (arm_is_imm12 (inst->inst_offset)) {
5984 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5986 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5987 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5991 } else if (ainfo->storage == RegTypeBaseGen) {
5992 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5993 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5995 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5996 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5998 if (arm_is_imm12 (inst->inst_offset + 4)) {
5999 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6000 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6002 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6003 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6004 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6005 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6007 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
6008 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6009 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6011 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6012 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6015 switch (ainfo->size) {
6017 if (arm_is_imm8 (inst->inst_offset)) {
6018 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6020 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6021 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6025 if (arm_is_imm8 (inst->inst_offset)) {
6026 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6028 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6029 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6033 if (arm_is_imm12 (inst->inst_offset)) {
6034 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6036 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6037 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6039 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6040 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6042 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6043 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6045 if (arm_is_imm12 (inst->inst_offset + 4)) {
6046 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6048 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6049 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6053 if (arm_is_imm12 (inst->inst_offset)) {
6054 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6056 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6057 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6061 } else if (ainfo->storage == RegTypeFP) {
6062 int imm8, rot_amount;
6064 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6065 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6066 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6068 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6070 if (ainfo->size == 8)
6071 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6073 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6074 } else if (ainfo->storage == RegTypeStructByVal) {
6075 int doffset = inst->inst_offset;
6079 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6080 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6081 if (arm_is_imm12 (doffset)) {
6082 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6084 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6085 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6087 soffset += sizeof (gpointer);
6088 doffset += sizeof (gpointer);
6090 if (ainfo->vtsize) {
6091 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6092 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6093 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6095 } else if (ainfo->storage == RegTypeStructByAddr) {
6096 g_assert_not_reached ();
6097 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6098 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6100 g_assert_not_reached ();
6105 if (method->save_lmf)
6106 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6109 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6111 if (cfg->arch.seq_point_info_var) {
6112 MonoInst *ins = cfg->arch.seq_point_info_var;
6114 /* Initialize the variable from a GOT slot */
6115 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6116 #ifdef USE_JUMP_TABLES
6118 gpointer *jte = mono_jumptable_add_entry ();
6119 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6120 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6122 /** XXX: is it correct? */
6124 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6126 *(gpointer*)code = NULL;
6129 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6131 g_assert (ins->opcode == OP_REGOFFSET);
6133 if (arm_is_imm12 (ins->inst_offset)) {
6134 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6136 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6137 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6141 /* Initialize ss_trigger_page_var */
6142 if (!cfg->soft_breakpoints) {
6143 MonoInst *info_var = cfg->arch.seq_point_info_var;
6144 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6145 int dreg = ARMREG_LR;
6148 g_assert (info_var->opcode == OP_REGOFFSET);
6149 g_assert (arm_is_imm12 (info_var->inst_offset));
6151 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6152 /* Load the trigger page addr */
6153 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6154 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6158 if (cfg->arch.seq_point_read_var) {
6159 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6160 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6161 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6162 #ifdef USE_JUMP_TABLES
6165 g_assert (read_ins->opcode == OP_REGOFFSET);
6166 g_assert (arm_is_imm12 (read_ins->inst_offset));
6167 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6168 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6169 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6170 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6172 #ifdef USE_JUMP_TABLES
6173 jte = mono_jumptable_add_entries (3);
6174 jte [0] = (gpointer)&ss_trigger_var;
6175 jte [1] = single_step_func_wrapper;
6176 jte [2] = breakpoint_func_wrapper;
6177 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6179 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6181 *(volatile int **)code = &ss_trigger_var;
6183 *(gpointer*)code = single_step_func_wrapper;
6185 *(gpointer*)code = breakpoint_func_wrapper;
6189 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6190 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6191 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6192 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6193 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6194 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6197 cfg->code_len = code - cfg->native_code;
6198 g_assert (cfg->code_len < cfg->code_size);
6205 mono_arch_emit_epilog (MonoCompile *cfg)
6207 MonoMethod *method = cfg->method;
6208 int pos, i, rot_amount;
6209 int max_epilog_size = 16 + 20*4;
6213 if (cfg->method->save_lmf)
6214 max_epilog_size += 128;
6216 if (mono_jit_trace_calls != NULL)
6217 max_epilog_size += 50;
6219 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6220 max_epilog_size += 50;
6222 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6223 cfg->code_size *= 2;
6224 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6225 cfg->stat_code_reallocs++;
6229 * Keep in sync with OP_JMP
6231 code = cfg->native_code + cfg->code_len;
6233 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6234 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6238 /* Load returned vtypes into registers if needed */
6239 cinfo = cfg->arch.cinfo;
6240 if (cinfo->ret.storage == RegTypeStructByVal) {
6241 MonoInst *ins = cfg->ret;
6243 if (arm_is_imm12 (ins->inst_offset)) {
6244 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6246 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6247 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6251 if (method->save_lmf) {
6252 int lmf_offset, reg, sp_adj, regmask;
6253 /* all but r0-r3, sp and pc */
6254 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6257 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6259 /* This points to r4 inside MonoLMF->iregs */
6260 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6262 regmask = 0x9ff0; /* restore lr to pc */
6263 /* Skip caller saved registers not used by the method */
6264 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6265 regmask &= ~(1 << reg);
6270 /* Restored later */
6271 regmask &= ~(1 << ARMREG_PC);
6272 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6273 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6275 ARM_POP (code, regmask);
6277 /* Restore saved r7, restore LR to PC */
6278 /* Skip lr from the lmf */
6279 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6280 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6283 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6284 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6286 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6287 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6291 /* Restore saved gregs */
6292 if (cfg->used_int_regs)
6293 ARM_POP (code, cfg->used_int_regs);
6294 /* Restore saved r7, restore LR to PC */
6295 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6297 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6301 cfg->code_len = code - cfg->native_code;
6303 g_assert (cfg->code_len < cfg->code_size);
6308 mono_arch_emit_exceptions (MonoCompile *cfg)
6310 MonoJumpInfo *patch_info;
6313 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6314 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6315 int max_epilog_size = 50;
6317 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6318 exc_throw_pos [i] = NULL;
6319 exc_throw_found [i] = 0;
6322 /* count the number of exception infos */
6325 * make sure we have enough space for exceptions
6327 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6328 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6329 i = mini_exception_id_by_name (patch_info->data.target);
6330 if (!exc_throw_found [i]) {
6331 max_epilog_size += 32;
6332 exc_throw_found [i] = TRUE;
6337 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6338 cfg->code_size *= 2;
6339 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6340 cfg->stat_code_reallocs++;
6343 code = cfg->native_code + cfg->code_len;
6345 /* add code to raise exceptions */
6346 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6347 switch (patch_info->type) {
6348 case MONO_PATCH_INFO_EXC: {
6349 MonoClass *exc_class;
6350 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6352 i = mini_exception_id_by_name (patch_info->data.target);
6353 if (exc_throw_pos [i]) {
6354 arm_patch (ip, exc_throw_pos [i]);
6355 patch_info->type = MONO_PATCH_INFO_NONE;
6358 exc_throw_pos [i] = code;
6360 arm_patch (ip, code);
6362 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6363 g_assert (exc_class);
6365 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6366 #ifdef USE_JUMP_TABLES
6368 gpointer *jte = mono_jumptable_add_entries (2);
6369 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6370 patch_info->data.name = "mono_arch_throw_corlib_exception";
6371 patch_info->ip.i = code - cfg->native_code;
6372 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6373 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6374 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6375 ARM_BLX_REG (code, ARMREG_IP);
6376 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6379 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6380 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6381 patch_info->data.name = "mono_arch_throw_corlib_exception";
6382 patch_info->ip.i = code - cfg->native_code;
6384 *(guint32*)(gpointer)code = exc_class->type_token;
6395 cfg->code_len = code - cfg->native_code;
6397 g_assert (cfg->code_len < cfg->code_size);
6401 #endif /* #ifndef DISABLE_JIT */
6404 mono_arch_finish_init (void)
6409 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6414 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6421 mono_arch_print_tree (MonoInst *tree, int arity)
6431 mono_arch_get_patch_offset (guint8 *code)
6438 mono_arch_flush_register_windows (void)
6445 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6447 int method_reg = mono_alloc_ireg (cfg);
6448 #ifdef USE_JUMP_TABLES
6449 int use_jumptables = TRUE;
6451 int use_jumptables = FALSE;
6454 if (cfg->compile_aot) {
6457 call->dynamic_imt_arg = TRUE;
6460 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6462 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6463 ins->dreg = method_reg;
6464 ins->inst_p0 = call->method;
6465 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6466 MONO_ADD_INS (cfg->cbb, ins);
6468 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6469 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6470 /* Always pass in a register for simplicity */
6471 call->dynamic_imt_arg = TRUE;
6473 cfg->uses_rgctx_reg = TRUE;
6476 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6480 MONO_INST_NEW (cfg, ins, OP_PCONST);
6481 ins->inst_p0 = call->method;
6482 ins->dreg = method_reg;
6483 MONO_ADD_INS (cfg->cbb, ins);
6486 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6490 #endif /* DISABLE_JIT */
6493 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6495 #ifdef USE_JUMP_TABLES
6496 return (MonoMethod*)regs [ARMREG_V5];
6499 guint32 *code_ptr = (guint32*)code;
6501 method = GUINT_TO_POINTER (code_ptr [1]);
6505 return (MonoMethod*)regs [ARMREG_V5];
6507 /* The IMT value is stored in the code stream right after the LDC instruction. */
6508 /* This is no longer true for the gsharedvt_in trampoline */
6510 if (!IS_LDR_PC (code_ptr [0])) {
6511 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6512 g_assert (IS_LDR_PC (code_ptr [0]));
6516 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6517 return (MonoMethod*)regs [ARMREG_V5];
6519 return (MonoMethod*) method;
6524 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6526 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6529 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6530 #define BASE_SIZE (6 * 4)
6531 #define BSEARCH_ENTRY_SIZE (4 * 4)
6532 #define CMP_SIZE (3 * 4)
6533 #define BRANCH_SIZE (1 * 4)
6534 #define CALL_SIZE (2 * 4)
6535 #define WMC_SIZE (8 * 4)
6536 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6538 #ifdef USE_JUMP_TABLES
6540 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6542 g_assert (base [index] == NULL);
6543 base [index] = value;
6546 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6548 if (arm_is_imm12 (jti * 4)) {
6549 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6551 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6552 if ((jti * 4) >> 16)
6553 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6554 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6560 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6562 guint32 delta = DISTANCE (target, code);
6564 g_assert (delta >= 0 && delta <= 0xFFF);
6565 *target = *target | delta;
6571 #ifdef ENABLE_WRONG_METHOD_CHECK
6573 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6575 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6581 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6582 gpointer fail_tramp)
6585 arminstr_t *code, *start;
6586 #ifdef USE_JUMP_TABLES
6589 gboolean large_offsets = FALSE;
6590 guint32 **constant_pool_starts;
6591 arminstr_t *vtable_target = NULL;
6592 int extra_space = 0;
6594 #ifdef ENABLE_WRONG_METHOD_CHECK
6599 #ifdef USE_JUMP_TABLES
6600 for (i = 0; i < count; ++i) {
6601 MonoIMTCheckItem *item = imt_entries [i];
6602 item->chunk_size += 4 * 16;
6603 if (!item->is_equals)
6604 imt_entries [item->check_target_idx]->compare_done = TRUE;
6605 size += item->chunk_size;
6608 constant_pool_starts = g_new0 (guint32*, count);
6610 for (i = 0; i < count; ++i) {
6611 MonoIMTCheckItem *item = imt_entries [i];
6612 if (item->is_equals) {
6613 gboolean fail_case = !item->check_target_idx && fail_tramp;
6615 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6616 item->chunk_size += 32;
6617 large_offsets = TRUE;
6620 if (item->check_target_idx || fail_case) {
6621 if (!item->compare_done || fail_case)
6622 item->chunk_size += CMP_SIZE;
6623 item->chunk_size += BRANCH_SIZE;
6625 #ifdef ENABLE_WRONG_METHOD_CHECK
6626 item->chunk_size += WMC_SIZE;
6630 item->chunk_size += 16;
6631 large_offsets = TRUE;
6633 item->chunk_size += CALL_SIZE;
6635 item->chunk_size += BSEARCH_ENTRY_SIZE;
6636 imt_entries [item->check_target_idx]->compare_done = TRUE;
6638 size += item->chunk_size;
6642 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6646 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6648 code = mono_domain_code_reserve (domain, size);
6652 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6653 for (i = 0; i < count; ++i) {
6654 MonoIMTCheckItem *item = imt_entries [i];
6655 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6659 #ifdef USE_JUMP_TABLES
6660 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6661 /* If jumptables we always pass the IMT method in R5 */
6662 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6663 #define VTABLE_JTI 0
6664 #define IMT_METHOD_OFFSET 0
6665 #define TARGET_CODE_OFFSET 1
6666 #define JUMP_CODE_OFFSET 2
6667 #define RECORDS_PER_ENTRY 3
6668 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6669 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6670 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6672 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6673 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6674 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6675 set_jumptable_element (jte, VTABLE_JTI, vtable);
6678 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6680 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6681 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6682 vtable_target = code;
6683 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6685 if (mono_use_llvm) {
6686 /* LLVM always passes the IMT method in R5 */
6687 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6689 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6690 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6691 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6695 for (i = 0; i < count; ++i) {
6696 MonoIMTCheckItem *item = imt_entries [i];
6697 #ifdef USE_JUMP_TABLES
6698 guint32 imt_method_jti = 0, target_code_jti = 0;
6700 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6702 gint32 vtable_offset;
6704 item->code_target = (guint8*)code;
6706 if (item->is_equals) {
6707 gboolean fail_case = !item->check_target_idx && fail_tramp;
6709 if (item->check_target_idx || fail_case) {
6710 if (!item->compare_done || fail_case) {
6711 #ifdef USE_JUMP_TABLES
6712 imt_method_jti = IMT_METHOD_JTI (i);
6713 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6716 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6718 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6720 #ifdef USE_JUMP_TABLES
6721 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6722 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6723 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6725 item->jmp_code = (guint8*)code;
6726 ARM_B_COND (code, ARMCOND_NE, 0);
6729 /*Enable the commented code to assert on wrong method*/
6730 #ifdef ENABLE_WRONG_METHOD_CHECK
6731 #ifdef USE_JUMP_TABLES
6732 imt_method_jti = IMT_METHOD_JTI (i);
6733 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6736 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6738 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6740 ARM_B_COND (code, ARMCOND_EQ, 0);
6742 /* Define this if your system is so bad that gdb is failing. */
6743 #ifdef BROKEN_DEV_ENV
6744 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6746 arm_patch (code - 1, mini_dump_bad_imt);
6750 arm_patch (cond, code);
6754 if (item->has_target_code) {
6755 /* Load target address */
6756 #ifdef USE_JUMP_TABLES
6757 target_code_jti = TARGET_CODE_JTI (i);
6758 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6759 /* Restore registers */
6760 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6762 ARM_BX (code, ARMREG_R1);
6763 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6765 target_code_ins = code;
6766 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6767 /* Save it to the fourth slot */
6768 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6769 /* Restore registers and branch */
6770 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6772 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6775 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6776 if (!arm_is_imm12 (vtable_offset)) {
6778 * We need to branch to a computed address but we don't have
6779 * a free register to store it, since IP must contain the
6780 * vtable address. So we push the two values to the stack, and
6781 * load them both using LDM.
6783 /* Compute target address */
6784 #ifdef USE_JUMP_TABLES
6785 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6786 if (vtable_offset >> 16)
6787 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6788 /* IP had vtable base. */
6789 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6790 /* Restore registers and branch */
6791 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6792 ARM_BX (code, ARMREG_IP);
6794 vtable_offset_ins = code;
6795 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6796 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6797 /* Save it to the fourth slot */
6798 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6799 /* Restore registers and branch */
6800 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6802 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6805 #ifdef USE_JUMP_TABLES
6806 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6807 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6808 ARM_BX (code, ARMREG_IP);
6810 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6812 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6813 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6819 #ifdef USE_JUMP_TABLES
6820 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6821 target_code_jti = TARGET_CODE_JTI (i);
6822 /* Load target address */
6823 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6824 /* Restore registers */
6825 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6827 ARM_BX (code, ARMREG_R1);
6828 set_jumptable_element (jte, target_code_jti, fail_tramp);
6830 arm_patch (item->jmp_code, (guchar*)code);
6832 target_code_ins = code;
6833 /* Load target address */
6834 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6835 /* Save it to the fourth slot */
6836 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6837 /* Restore registers and branch */
6838 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6840 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6842 item->jmp_code = NULL;
6845 #ifdef USE_JUMP_TABLES
6847 set_jumptable_element (jte, imt_method_jti, item->key);
6850 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6852 /*must emit after unconditional branch*/
6853 if (vtable_target) {
6854 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6855 item->chunk_size += 4;
6856 vtable_target = NULL;
6859 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6860 constant_pool_starts [i] = code;
6862 code += extra_space;
6867 #ifdef USE_JUMP_TABLES
6868 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6869 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6870 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6871 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6872 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6874 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6875 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6877 item->jmp_code = (guint8*)code;
6878 ARM_B_COND (code, ARMCOND_HS, 0);
6884 for (i = 0; i < count; ++i) {
6885 MonoIMTCheckItem *item = imt_entries [i];
6886 if (item->jmp_code) {
6887 if (item->check_target_idx)
6888 #ifdef USE_JUMP_TABLES
6889 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6891 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6894 if (i > 0 && item->is_equals) {
6896 #ifdef USE_JUMP_TABLES
6897 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6898 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6900 arminstr_t *space_start = constant_pool_starts [i];
6901 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6902 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6910 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6911 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6916 #ifndef USE_JUMP_TABLES
6917 g_free (constant_pool_starts);
6920 mono_arch_flush_icache ((guint8*)start, size);
6921 mono_stats.imt_thunks_size += code - start;
6923 g_assert (DISTANCE (start, code) <= size);
6928 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6930 return ctx->regs [reg];
6934 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6936 ctx->regs [reg] = val;
6940 * mono_arch_get_trampolines:
6942 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6946 mono_arch_get_trampolines (gboolean aot)
6948 return mono_arm_get_exception_trampolines (aot);
6952 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
6959 bp = MONO_CONTEXT_GET_BP (ctx);
6960 lr_loc = (gpointer*)(bp + clause->exvar_offset);
6962 old_value = *lr_loc;
6963 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
6966 *lr_loc = new_value;
6971 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6973 * mono_arch_set_breakpoint:
6975 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6976 * The location should contain code emitted by OP_SEQ_POINT.
6979 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6982 guint32 native_offset = ip - (guint8*)ji->code_start;
6983 MonoDebugOptions *opt = mini_get_debug_options ();
6985 if (opt->soft_breakpoints) {
6986 g_assert (!ji->from_aot);
6988 ARM_BLX_REG (code, ARMREG_LR);
6989 mono_arch_flush_icache (code - 4, 4);
6990 } else if (ji->from_aot) {
6991 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6993 g_assert (native_offset % 4 == 0);
6994 g_assert (info->bp_addrs [native_offset / 4] == 0);
6995 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6997 int dreg = ARMREG_LR;
6999 /* Read from another trigger page */
7000 #ifdef USE_JUMP_TABLES
7001 gpointer *jte = mono_jumptable_add_entry ();
7002 code = mono_arm_load_jumptable_entry (code, jte, dreg);
7003 jte [0] = bp_trigger_page;
7005 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7007 *(int*)code = (int)bp_trigger_page;
7010 ARM_LDR_IMM (code, dreg, dreg, 0);
7012 mono_arch_flush_icache (code - 16, 16);
7015 /* This is currently implemented by emitting an SWI instruction, which
7016 * qemu/linux seems to convert to a SIGILL.
7018 *(int*)code = (0xef << 24) | 8;
7020 mono_arch_flush_icache (code - 4, 4);
7026 * mono_arch_clear_breakpoint:
7028 * Clear the breakpoint at IP.
7031 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7033 MonoDebugOptions *opt = mini_get_debug_options ();
7037 if (opt->soft_breakpoints) {
7038 g_assert (!ji->from_aot);
7041 mono_arch_flush_icache (code - 4, 4);
7042 } else if (ji->from_aot) {
7043 guint32 native_offset = ip - (guint8*)ji->code_start;
7044 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7046 g_assert (native_offset % 4 == 0);
7047 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7048 info->bp_addrs [native_offset / 4] = 0;
7050 for (i = 0; i < 4; ++i)
7053 mono_arch_flush_icache (ip, code - ip);
7058 * mono_arch_start_single_stepping:
7060 * Start single stepping.
7063 mono_arch_start_single_stepping (void)
7065 if (ss_trigger_page)
7066 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7072 * mono_arch_stop_single_stepping:
7074 * Stop single stepping.
7077 mono_arch_stop_single_stepping (void)
7079 if (ss_trigger_page)
7080 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7086 #define DBG_SIGNAL SIGBUS
7088 #define DBG_SIGNAL SIGSEGV
7092 * mono_arch_is_single_step_event:
7094 * Return whenever the machine state in SIGCTX corresponds to a single
7098 mono_arch_is_single_step_event (void *info, void *sigctx)
7100 siginfo_t *sinfo = info;
7102 if (!ss_trigger_page)
7105 /* Sometimes the address is off by 4 */
7106 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7113 * mono_arch_is_breakpoint_event:
7115 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7118 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7120 siginfo_t *sinfo = info;
7122 if (!ss_trigger_page)
7125 if (sinfo->si_signo == DBG_SIGNAL) {
7126 /* Sometimes the address is off by 4 */
7127 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7137 * mono_arch_skip_breakpoint:
7139 * See mini-amd64.c for docs.
7142 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7144 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7148 * mono_arch_skip_single_step:
7150 * See mini-amd64.c for docs.
7153 mono_arch_skip_single_step (MonoContext *ctx)
7155 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7158 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7161 * mono_arch_get_seq_point_info:
7163 * See mini-amd64.c for docs.
7166 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7171 // FIXME: Add a free function
7173 mono_domain_lock (domain);
7174 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7176 mono_domain_unlock (domain);
7179 ji = mono_jit_info_table_find (domain, (char*)code);
7182 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7184 info->ss_trigger_page = ss_trigger_page;
7185 info->bp_trigger_page = bp_trigger_page;
7187 mono_domain_lock (domain);
7188 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7190 mono_domain_unlock (domain);
7197 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7199 ext->lmf.previous_lmf = prev_lmf;
7200 /* Mark that this is a MonoLMFExt */
7201 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7202 ext->lmf.sp = (gssize)ext;
7206 * mono_arch_set_target:
7208 * Set the target architecture the JIT backend should generate code for, in the form
7209 * of a GNU target triplet. Only used in AOT mode.
7212 mono_arch_set_target (char *mtriple)
7214 /* The GNU target triple format is not very well documented */
7215 if (strstr (mtriple, "armv7")) {
7216 v5_supported = TRUE;
7217 v6_supported = TRUE;
7218 v7_supported = TRUE;
7220 if (strstr (mtriple, "armv6")) {
7221 v5_supported = TRUE;
7222 v6_supported = TRUE;
7224 if (strstr (mtriple, "armv7s")) {
7225 v7s_supported = TRUE;
7227 if (strstr (mtriple, "thumbv7s")) {
7228 v5_supported = TRUE;
7229 v6_supported = TRUE;
7230 v7_supported = TRUE;
7231 v7s_supported = TRUE;
7232 thumb_supported = TRUE;
7233 thumb2_supported = TRUE;
7235 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7236 v5_supported = TRUE;
7237 v6_supported = TRUE;
7238 thumb_supported = TRUE;
7241 if (strstr (mtriple, "gnueabi"))
7242 eabi_supported = TRUE;
7246 mono_arch_opcode_supported (int opcode)
7249 case OP_ATOMIC_ADD_I4:
7250 case OP_ATOMIC_EXCHANGE_I4:
7251 case OP_ATOMIC_CAS_I4:
7252 return v7_supported;
7258 #if defined(ENABLE_GSHAREDVT)
7260 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7262 #endif /* !MONOTOUCH */