2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/profiler-private.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-mmap.h>
20 #include <mono/utils/mono-hwcap-arm.h>
21 #include <mono/utils/mono-memory-model.h>
27 #include "debugger-agent.h"
29 #include "mono/arch/arm/arm-vfp-codegen.h"
31 /* Sanity check: This makes no sense */
32 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
33 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
37 * IS_SOFT_FLOAT: Is full software floating point used?
38 * IS_HARD_FLOAT: Is full hardware floating point used?
39 * IS_VFP: Is hardware floating point with software ABI used?
41 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
42 * IS_VFP may delegate to mono_arch_is_soft_float ().
45 #if defined(ARM_FPU_VFP_HARD)
46 #define IS_SOFT_FLOAT (FALSE)
47 #define IS_HARD_FLOAT (TRUE)
49 #elif defined(ARM_FPU_NONE)
50 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
51 #define IS_HARD_FLOAT (FALSE)
52 #define IS_VFP (!mono_arch_is_soft_float ())
54 #define IS_SOFT_FLOAT (FALSE)
55 #define IS_HARD_FLOAT (FALSE)
59 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
60 #define HAVE_AEABI_READ_TP 1
63 #ifdef __native_client_codegen__
64 const guint kNaClAlignment = kNaClAlignmentARM;
65 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
66 gint8 nacl_align_byte = -1; /* 0xff */
69 mono_arch_nacl_pad (guint8 *code, int pad)
71 /* Not yet properly implemented. */
72 g_assert_not_reached ();
77 mono_arch_nacl_skip_nops (guint8 *code)
79 /* Not yet properly implemented. */
80 g_assert_not_reached ();
84 #endif /* __native_client_codegen__ */
86 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
89 void sys_icache_invalidate (void *start, size_t len);
92 /* This mutex protects architecture specific caches */
93 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
94 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
95 static mono_mutex_t mini_arch_mutex;
97 static gboolean v5_supported = FALSE;
98 static gboolean v6_supported = FALSE;
99 static gboolean v7_supported = FALSE;
100 static gboolean v7s_supported = FALSE;
101 static gboolean thumb_supported = FALSE;
102 static gboolean thumb2_supported = FALSE;
104 * Whenever to use the ARM EABI
106 static gboolean eabi_supported = FALSE;
109 * Whenever to use the iphone ABI extensions:
110 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
111 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
112 * This is required for debugging/profiling tools to work, but it has some overhead so it should
113 * only be turned on in debug builds.
115 static gboolean iphone_abi = FALSE;
118 * The FPU we are generating code for. This is NOT runtime configurable right now,
119 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
121 static MonoArmFPU arm_fpu;
123 #if defined(ARM_FPU_VFP_HARD)
125 * On armhf, d0-d7 are used for argument passing and d8-d15
126 * must be preserved across calls, which leaves us no room
127 * for scratch registers. So we use d14-d15 but back up their
128 * previous contents to a stack slot before using them - see
129 * mono_arm_emit_vfp_scratch_save/_restore ().
131 static int vfp_scratch1 = ARM_VFP_D14;
132 static int vfp_scratch2 = ARM_VFP_D15;
135 * On armel, d0-d7 do not need to be preserved, so we can
136 * freely make use of them as scratch registers.
138 static int vfp_scratch1 = ARM_VFP_D0;
139 static int vfp_scratch2 = ARM_VFP_D1;
144 static volatile int ss_trigger_var = 0;
146 static gpointer single_step_func_wrapper;
147 static gpointer breakpoint_func_wrapper;
150 * The code generated for sequence points reads from this location, which is
151 * made read-only when single stepping is enabled.
153 static gpointer ss_trigger_page;
155 /* Enabled breakpoints read from this trigger page */
156 static gpointer bp_trigger_page;
160 * floating point support: on ARM it is a mess, there are at least 3
161 * different setups, each of which binary incompat with the other.
162 * 1) FPA: old and ugly, but unfortunately what current distros use
163 * the double binary format has the two words swapped. 8 double registers.
164 * Implemented usually by kernel emulation.
165 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
166 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
167 * 3) VFP: the new and actually sensible and useful FP support. Implemented
168 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
170 * We do not care about FPA. We will support soft float and VFP.
172 int mono_exc_esp_offset = 0;
174 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
175 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
176 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
178 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
179 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
180 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
182 //#define DEBUG_IMT 0
185 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
189 mono_arch_regname (int reg)
191 static const char * rnames[] = {
192 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
193 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
194 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
197 if (reg >= 0 && reg < 16)
203 mono_arch_fregname (int reg)
205 static const char * rnames[] = {
206 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
207 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
208 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
209 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
210 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
211 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
214 if (reg >= 0 && reg < 32)
222 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
224 int imm8, rot_amount;
225 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
226 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
230 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
231 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
233 code = mono_arm_emit_load_imm (code, dreg, imm);
234 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
239 /* If dreg == sreg, this clobbers IP */
241 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
243 int imm8, rot_amount;
244 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
245 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
249 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
250 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
252 code = mono_arm_emit_load_imm (code, dreg, imm);
253 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
259 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
261 /* we can use r0-r3, since this is called only for incoming args on the stack */
262 if (size > sizeof (gpointer) * 4) {
264 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
265 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
266 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
267 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
268 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
269 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
270 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
271 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
272 ARM_B_COND (code, ARMCOND_NE, 0);
273 arm_patch (code - 4, start_loop);
276 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
277 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
279 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
280 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
286 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
287 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
288 doffset = soffset = 0;
290 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
291 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
297 g_assert (size == 0);
302 emit_call_reg (guint8 *code, int reg)
305 ARM_BLX_REG (code, reg);
307 #ifdef USE_JUMP_TABLES
308 g_assert_not_reached ();
310 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
314 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
320 emit_call_seq (MonoCompile *cfg, guint8 *code)
322 #ifdef USE_JUMP_TABLES
323 code = mono_arm_patchable_bl (code, ARMCOND_AL);
325 if (cfg->method->dynamic) {
326 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
328 *(gpointer*)code = NULL;
330 code = emit_call_reg (code, ARMREG_IP);
339 mono_arm_patchable_b (guint8 *code, int cond)
341 #ifdef USE_JUMP_TABLES
344 jte = mono_jumptable_add_entry ();
345 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
346 ARM_BX_COND (code, cond, ARMREG_IP);
348 ARM_B_COND (code, cond, 0);
354 mono_arm_patchable_bl (guint8 *code, int cond)
356 #ifdef USE_JUMP_TABLES
359 jte = mono_jumptable_add_entry ();
360 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
361 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
363 ARM_BL_COND (code, cond, 0);
368 #ifdef USE_JUMP_TABLES
370 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
372 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
373 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
378 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
380 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
381 ARM_LDR_IMM (code, reg, reg, 0);
387 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
389 switch (ins->opcode) {
392 case OP_FCALL_MEMBASE:
394 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
395 if (sig_ret->type == MONO_TYPE_R4) {
397 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
399 ARM_FMSR (code, ins->dreg, ARMREG_R0);
400 ARM_CVTS (code, ins->dreg, ins->dreg);
404 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
406 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
413 case OP_RCALL_MEMBASE: {
418 sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
419 g_assert (sig_ret->type == MONO_TYPE_R4);
421 ARM_CPYS (code, ins->dreg, ARM_VFP_F0);
423 ARM_FMSR (code, ins->dreg, ARMREG_R0);
424 ARM_CPYS (code, ins->dreg, ins->dreg);
438 * Emit code to push an LMF structure on the LMF stack.
439 * On arm, this is intermixed with the initialization of other fields of the structure.
442 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
444 gboolean get_lmf_fast = FALSE;
447 #ifdef HAVE_AEABI_READ_TP
448 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
450 if (lmf_addr_tls_offset != -1) {
453 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
454 (gpointer)"__aeabi_read_tp");
455 code = emit_call_seq (cfg, code);
457 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
463 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
466 /* Inline mono_get_lmf_addr () */
467 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
469 /* Load mono_jit_tls_id */
471 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
472 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
474 *(gpointer*)code = NULL;
476 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
477 /* call pthread_getspecific () */
478 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
479 (gpointer)"pthread_getspecific");
480 code = emit_call_seq (cfg, code);
481 /* lmf_addr = &jit_tls->lmf */
482 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
483 g_assert (arm_is_imm8 (lmf_offset));
484 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
491 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
492 (gpointer)"mono_get_lmf_addr");
493 code = emit_call_seq (cfg, code);
495 /* we build the MonoLMF structure on the stack - see mini-arm.h */
496 /* lmf_offset is the offset from the previous stack pointer,
497 * alloc_size is the total stack space allocated, so the offset
498 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
499 * The pointer to the struct is put in r1 (new_lmf).
500 * ip is used as scratch
501 * The callee-saved registers are already in the MonoLMF structure
503 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
504 /* r0 is the result from mono_get_lmf_addr () */
505 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
506 /* new_lmf->previous_lmf = *lmf_addr */
507 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
508 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
509 /* *(lmf_addr) = r1 */
510 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
511 /* Skip method (only needed for trampoline LMF frames) */
512 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
513 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
514 /* save the current IP */
515 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
516 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
518 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
519 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
530 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
534 g_assert (!cfg->r4fp);
536 for (list = inst->float_args; list; list = list->next) {
537 FloatArgData *fad = list->data;
538 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
539 gboolean imm = arm_is_fpimm8 (var->inst_offset);
541 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
547 if (*offset + *max_len > cfg->code_size) {
548 cfg->code_size += *max_len;
549 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
551 code = cfg->native_code + *offset;
555 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
556 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
558 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
560 *offset = code - cfg->native_code;
567 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FSTD (code, reg, ARMREG_LR, 0);
580 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
587 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
591 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
593 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
596 if (!arm_is_fpimm8 (inst->inst_offset)) {
597 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
598 ARM_FLDD (code, reg, ARMREG_LR, 0);
600 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
609 * Emit code to pop an LMF structure from the LMF stack.
612 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
616 if (lmf_offset < 32) {
617 basereg = cfg->frame_reg;
622 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
625 /* ip = previous_lmf */
626 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
628 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
629 /* *(lmf_addr) = previous_lmf */
630 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
635 #endif /* #ifndef DISABLE_JIT */
638 * mono_arch_get_argument_info:
639 * @csig: a method signature
640 * @param_count: the number of parameters to consider
641 * @arg_info: an array to store the result infos
643 * Gathers information on parameters such as size, alignment and
644 * padding. arg_info should be large enought to hold param_count + 1 entries.
646 * Returns the size of the activation frame.
649 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
651 int k, frame_size = 0;
652 guint32 size, align, pad;
656 t = mini_type_get_underlying_type (gsctx, csig->ret);
657 if (MONO_TYPE_ISSTRUCT (t)) {
658 frame_size += sizeof (gpointer);
662 arg_info [0].offset = offset;
665 frame_size += sizeof (gpointer);
669 arg_info [0].size = frame_size;
671 for (k = 0; k < param_count; k++) {
672 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
674 /* ignore alignment for now */
677 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
678 arg_info [k].pad = pad;
680 arg_info [k + 1].pad = 0;
681 arg_info [k + 1].size = size;
683 arg_info [k + 1].offset = offset;
687 align = MONO_ARCH_FRAME_ALIGNMENT;
688 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
689 arg_info [k].pad = pad;
694 #define MAX_ARCH_DELEGATE_PARAMS 3
697 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
699 guint8 *code, *start;
702 start = code = mono_global_codeman_reserve (12);
704 /* Replace the this argument with the target */
705 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
706 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
707 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
709 g_assert ((code - start) <= 12);
711 mono_arch_flush_icache (start, 12);
715 size = 8 + param_count * 4;
716 start = code = mono_global_codeman_reserve (size);
718 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
719 /* slide down the arguments */
720 for (i = 0; i < param_count; ++i) {
721 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
723 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
725 g_assert ((code - start) <= size);
727 mono_arch_flush_icache (start, size);
730 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
732 *code_size = code - start;
738 * mono_arch_get_delegate_invoke_impls:
740 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
744 mono_arch_get_delegate_invoke_impls (void)
752 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
753 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
755 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
756 code = get_delegate_invoke_impl (FALSE, i, &code_len);
757 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
758 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
766 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
768 guint8 *code, *start;
771 /* FIXME: Support more cases */
772 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
773 if (MONO_TYPE_ISSTRUCT (sig_ret))
777 static guint8* cached = NULL;
778 mono_mini_arch_lock ();
780 mono_mini_arch_unlock ();
785 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
787 start = get_delegate_invoke_impl (TRUE, 0, NULL);
789 mono_mini_arch_unlock ();
792 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
795 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
797 for (i = 0; i < sig->param_count; ++i)
798 if (!mono_is_regsize_var (sig->params [i]))
801 mono_mini_arch_lock ();
802 code = cache [sig->param_count];
804 mono_mini_arch_unlock ();
809 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
810 start = mono_aot_get_trampoline (name);
813 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
815 cache [sig->param_count] = start;
816 mono_mini_arch_unlock ();
824 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
830 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
832 return (gpointer)regs [ARMREG_R0];
836 * Initialize the cpu to execute managed code.
839 mono_arch_cpu_init (void)
841 i8_align = MONO_ABI_ALIGNOF (gint64);
842 #ifdef MONO_CROSS_COMPILE
843 /* Need to set the alignment of i8 since it can different on the target */
844 #ifdef TARGET_ANDROID
846 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
852 create_function_wrapper (gpointer function)
854 guint8 *start, *code;
856 start = code = mono_global_codeman_reserve (96);
859 * Construct the MonoContext structure on the stack.
862 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
864 /* save ip, lr and pc into their correspodings ctx.regs slots. */
865 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
866 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
867 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
869 /* save r0..r10 and fp */
870 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
871 ARM_STM (code, ARMREG_IP, 0x0fff);
873 /* now we can update fp. */
874 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
876 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
877 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
878 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
879 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
881 /* make ctx.eip hold the address of the call. */
882 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
883 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
885 /* r0 now points to the MonoContext */
886 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
889 #ifdef USE_JUMP_TABLES
891 gpointer *jte = mono_jumptable_add_entry ();
892 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
896 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
898 *(gpointer*)code = function;
901 ARM_BLX_REG (code, ARMREG_IP);
903 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
904 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
905 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
906 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
908 /* make ip point to the regs array, then restore everything, including pc. */
909 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
910 ARM_LDM (code, ARMREG_IP, 0xffff);
912 mono_arch_flush_icache (start, code - start);
913 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
919 * Initialize architecture specific code.
922 mono_arch_init (void)
924 const char *cpu_arch;
926 mono_mutex_init_recursive (&mini_arch_mutex);
927 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
928 if (mini_get_debug_options ()->soft_breakpoints) {
929 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
930 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
935 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
936 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
937 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
940 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
941 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
942 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
943 #if defined(ENABLE_GSHAREDVT)
944 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
947 #if defined(__ARM_EABI__)
948 eabi_supported = TRUE;
951 #if defined(ARM_FPU_VFP_HARD)
952 arm_fpu = MONO_ARM_FPU_VFP_HARD;
954 arm_fpu = MONO_ARM_FPU_VFP;
956 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
957 /* If we're compiling with a soft float fallback and it
958 turns out that no VFP unit is available, we need to
959 switch to soft float. We don't do this for iOS, since
960 iOS devices always have a VFP unit. */
961 if (!mono_hwcap_arm_has_vfp)
962 arm_fpu = MONO_ARM_FPU_NONE;
966 v5_supported = mono_hwcap_arm_is_v5;
967 v6_supported = mono_hwcap_arm_is_v6;
968 v7_supported = mono_hwcap_arm_is_v7;
969 v7s_supported = mono_hwcap_arm_is_v7s;
971 #if defined(__APPLE__)
972 /* iOS is special-cased here because we don't yet
973 have a way to properly detect CPU features on it. */
974 thumb_supported = TRUE;
977 thumb_supported = mono_hwcap_arm_has_thumb;
978 thumb2_supported = mono_hwcap_arm_has_thumb2;
981 /* Format: armv(5|6|7[s])[-thumb[2]] */
982 cpu_arch = g_getenv ("MONO_CPU_ARCH");
984 /* Do this here so it overrides any detection. */
986 if (strncmp (cpu_arch, "armv", 4) == 0) {
987 v5_supported = cpu_arch [4] >= '5';
988 v6_supported = cpu_arch [4] >= '6';
989 v7_supported = cpu_arch [4] >= '7';
990 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
993 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
994 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
999 * Cleanup architecture specific code.
1002 mono_arch_cleanup (void)
1007 * This function returns the optimizations supported on this cpu.
1010 mono_arch_cpu_optimizations (guint32 *exclude_mask)
1012 /* no arm-specific optimizations yet */
1018 * This function test for all SIMD functions supported.
1020 * Returns a bitmask corresponding to all supported versions.
1024 mono_arch_cpu_enumerate_simd_versions (void)
1026 /* SIMD is currently unimplemented */
1034 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1036 if (v7s_supported) {
1050 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1052 mono_arch_is_soft_float (void)
1054 return arm_fpu == MONO_ARM_FPU_NONE;
1059 mono_arm_is_hard_float (void)
1061 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1065 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1068 t = mini_type_get_underlying_type (gsctx, t);
1075 case MONO_TYPE_FNPTR:
1077 case MONO_TYPE_OBJECT:
1078 case MONO_TYPE_STRING:
1079 case MONO_TYPE_CLASS:
1080 case MONO_TYPE_SZARRAY:
1081 case MONO_TYPE_ARRAY:
1083 case MONO_TYPE_GENERICINST:
1084 if (!mono_type_generic_inst_is_valuetype (t))
1087 case MONO_TYPE_VALUETYPE:
1094 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1099 for (i = 0; i < cfg->num_varinfo; i++) {
1100 MonoInst *ins = cfg->varinfo [i];
1101 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1104 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1107 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1110 /* we can only allocate 32 bit values */
1111 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1112 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1113 g_assert (i == vmv->idx);
1114 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1122 mono_arch_get_global_int_regs (MonoCompile *cfg)
1126 mono_arch_compute_omit_fp (cfg);
1129 * FIXME: Interface calls might go through a static rgctx trampoline which
1130 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1133 if (cfg->flags & MONO_CFG_HAS_CALLS)
1134 cfg->uses_rgctx_reg = TRUE;
1136 if (cfg->arch.omit_fp)
1137 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1138 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1139 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1140 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1142 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1143 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1145 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1146 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1147 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1148 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1149 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1150 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1156 * mono_arch_regalloc_cost:
1158 * Return the cost, in number of memory references, of the action of
1159 * allocating the variable VMV into a register during global register
1163 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1169 #endif /* #ifndef DISABLE_JIT */
1171 #ifndef __GNUC_PREREQ
1172 #define __GNUC_PREREQ(maj, min) (0)
1176 mono_arch_flush_icache (guint8 *code, gint size)
1178 #if defined(__native_client__)
1179 // For Native Client we don't have to flush i-cache here,
1180 // as it's being done by dyncode interface.
1183 #ifdef MONO_CROSS_COMPILE
1185 sys_icache_invalidate (code, size);
1186 #elif __GNUC_PREREQ(4, 3)
1187 __builtin___clear_cache (code, code + size);
1188 #elif __GNUC_PREREQ(4, 1)
1189 __clear_cache (code, code + size);
1190 #elif defined(PLATFORM_ANDROID)
1191 const int syscall = 0xf0002;
1199 : "r" (code), "r" (code + size), "r" (syscall)
1200 : "r0", "r1", "r7", "r2"
1203 __asm __volatile ("mov r0, %0\n"
1206 "swi 0x9f0002 @ sys_cacheflush"
1208 : "r" (code), "r" (code + size), "r" (0)
1209 : "r0", "r1", "r3" );
1211 #endif /* !__native_client__ */
1222 RegTypeStructByAddr,
1223 /* gsharedvt argument passed by addr in greg */
1224 RegTypeGSharedVtInReg,
1225 /* gsharedvt argument passed by addr on stack */
1226 RegTypeGSharedVtOnStack,
1231 guint16 vtsize; /* in param area */
1235 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1240 guint32 stack_usage;
1241 gboolean vtype_retaddr;
1242 /* The index of the vret arg in the argument list */
1252 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1255 if (*gr > ARMREG_R3) {
1257 ainfo->offset = *stack_size;
1258 ainfo->reg = ARMREG_SP; /* in the caller */
1259 ainfo->storage = RegTypeBase;
1262 ainfo->storage = RegTypeGeneral;
1269 split = i8_align == 4;
1274 if (*gr == ARMREG_R3 && split) {
1275 /* first word in r3 and the second on the stack */
1276 ainfo->offset = *stack_size;
1277 ainfo->reg = ARMREG_SP; /* in the caller */
1278 ainfo->storage = RegTypeBaseGen;
1280 } else if (*gr >= ARMREG_R3) {
1281 if (eabi_supported) {
1282 /* darwin aligns longs to 4 byte only */
1283 if (i8_align == 8) {
1288 ainfo->offset = *stack_size;
1289 ainfo->reg = ARMREG_SP; /* in the caller */
1290 ainfo->storage = RegTypeBase;
1293 if (eabi_supported) {
1294 if (i8_align == 8 && ((*gr) & 1))
1297 ainfo->storage = RegTypeIRegPair;
1306 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1309 * If we're calling a function like this:
1311 * void foo(float a, double b, float c)
1313 * We pass a in s0 and b in d1. That leaves us
1314 * with s1 being unused. The armhf ABI recognizes
1315 * this and requires register assignment to then
1316 * use that for the next single-precision arg,
1317 * i.e. c in this example. So float_spare either
1318 * tells us which reg to use for the next single-
1319 * precision arg, or it's -1, meaning use *fpr.
1321 * Note that even though most of the JIT speaks
1322 * double-precision, fpr represents single-
1323 * precision registers.
1325 * See parts 5.5 and 6.1.2 of the AAPCS for how
1329 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1330 ainfo->storage = RegTypeFP;
1334 * If we're passing a double-precision value
1335 * and *fpr is odd (e.g. it's s1, s3, ...)
1336 * we need to use the next even register. So
1337 * we mark the current *fpr as a spare that
1338 * can be used for the next single-precision
1342 *float_spare = *fpr;
1347 * At this point, we have an even register
1348 * so we assign that and move along.
1352 } else if (*float_spare >= 0) {
1354 * We're passing a single-precision value
1355 * and it looks like a spare single-
1356 * precision register is available. Let's
1360 ainfo->reg = *float_spare;
1364 * If we hit this branch, we're passing a
1365 * single-precision value and we can simply
1366 * use the next available register.
1374 * We've exhausted available floating point
1375 * regs, so pass the rest on the stack.
1383 ainfo->offset = *stack_size;
1384 ainfo->reg = ARMREG_SP;
1385 ainfo->storage = RegTypeBase;
1392 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1394 guint i, gr, fpr, pstart;
1396 int n = sig->hasthis + sig->param_count;
1397 MonoType *simpletype;
1398 guint32 stack_size = 0;
1400 gboolean is_pinvoke = sig->pinvoke;
1404 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1406 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1413 t = mini_type_get_underlying_type (gsctx, sig->ret);
1414 if (MONO_TYPE_ISSTRUCT (t)) {
1417 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1418 cinfo->ret.storage = RegTypeStructByVal;
1420 cinfo->vtype_retaddr = TRUE;
1422 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1423 cinfo->vtype_retaddr = TRUE;
1429 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1430 * the first argument, allowing 'this' to be always passed in the first arg reg.
1431 * Also do this if the first argument is a reference type, since virtual calls
1432 * are sometimes made using calli without sig->hasthis set, like in the delegate
1435 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1437 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1439 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1443 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1444 cinfo->vret_arg_index = 1;
1448 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1452 if (cinfo->vtype_retaddr)
1453 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1456 DEBUG(printf("params: %d\n", sig->param_count));
1457 for (i = pstart; i < sig->param_count; ++i) {
1458 ArgInfo *ainfo = &cinfo->args [n];
1460 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1461 /* Prevent implicit arguments and sig_cookie from
1462 being passed in registers */
1465 /* Emit the signature cookie just before the implicit arguments */
1466 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1468 DEBUG(printf("param %d: ", i));
1469 if (sig->params [i]->byref) {
1470 DEBUG(printf("byref\n"));
1471 add_general (&gr, &stack_size, ainfo, TRUE);
1475 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1476 switch (simpletype->type) {
1477 case MONO_TYPE_BOOLEAN:
1480 cinfo->args [n].size = 1;
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_CHAR:
1487 cinfo->args [n].size = 2;
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1493 cinfo->args [n].size = 4;
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1500 case MONO_TYPE_FNPTR:
1501 case MONO_TYPE_CLASS:
1502 case MONO_TYPE_OBJECT:
1503 case MONO_TYPE_STRING:
1504 case MONO_TYPE_SZARRAY:
1505 case MONO_TYPE_ARRAY:
1506 cinfo->args [n].size = sizeof (gpointer);
1507 add_general (&gr, &stack_size, ainfo, TRUE);
1510 case MONO_TYPE_GENERICINST:
1511 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1512 cinfo->args [n].size = sizeof (gpointer);
1513 add_general (&gr, &stack_size, ainfo, TRUE);
1517 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1518 /* gsharedvt arguments are passed by ref */
1519 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1520 add_general (&gr, &stack_size, ainfo, TRUE);
1521 switch (ainfo->storage) {
1522 case RegTypeGeneral:
1523 ainfo->storage = RegTypeGSharedVtInReg;
1526 ainfo->storage = RegTypeGSharedVtOnStack;
1529 g_assert_not_reached ();
1535 case MONO_TYPE_TYPEDBYREF:
1536 case MONO_TYPE_VALUETYPE: {
1542 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1543 size = sizeof (MonoTypedRef);
1544 align = sizeof (gpointer);
1546 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1548 size = mono_class_native_size (klass, &align);
1550 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1552 DEBUG(printf ("load %d bytes struct\n", size));
1555 align_size += (sizeof (gpointer) - 1);
1556 align_size &= ~(sizeof (gpointer) - 1);
1557 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1558 ainfo->storage = RegTypeStructByVal;
1559 ainfo->struct_size = size;
1560 /* FIXME: align stack_size if needed */
1561 if (eabi_supported) {
1562 if (align >= 8 && (gr & 1))
1565 if (gr > ARMREG_R3) {
1567 ainfo->vtsize = nwords;
1569 int rest = ARMREG_R3 - gr + 1;
1570 int n_in_regs = rest >= nwords? nwords: rest;
1572 ainfo->size = n_in_regs;
1573 ainfo->vtsize = nwords - n_in_regs;
1576 nwords -= n_in_regs;
1578 if (sig->call_convention == MONO_CALL_VARARG)
1579 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1580 stack_size = ALIGN_TO (stack_size, align);
1581 ainfo->offset = stack_size;
1582 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1583 stack_size += nwords * sizeof (gpointer);
1590 add_general (&gr, &stack_size, ainfo, FALSE);
1597 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1599 add_general (&gr, &stack_size, ainfo, TRUE);
1607 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1609 add_general (&gr, &stack_size, ainfo, FALSE);
1614 case MONO_TYPE_MVAR:
1615 /* gsharedvt arguments are passed by ref */
1616 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1617 add_general (&gr, &stack_size, ainfo, TRUE);
1618 switch (ainfo->storage) {
1619 case RegTypeGeneral:
1620 ainfo->storage = RegTypeGSharedVtInReg;
1623 ainfo->storage = RegTypeGSharedVtOnStack;
1626 g_assert_not_reached ();
1631 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1635 /* Handle the case where there are no implicit arguments */
1636 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1637 /* Prevent implicit arguments and sig_cookie from
1638 being passed in registers */
1641 /* Emit the signature cookie just before the implicit arguments */
1642 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1646 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1647 switch (simpletype->type) {
1648 case MONO_TYPE_BOOLEAN:
1653 case MONO_TYPE_CHAR:
1659 case MONO_TYPE_FNPTR:
1660 case MONO_TYPE_CLASS:
1661 case MONO_TYPE_OBJECT:
1662 case MONO_TYPE_SZARRAY:
1663 case MONO_TYPE_ARRAY:
1664 case MONO_TYPE_STRING:
1665 cinfo->ret.storage = RegTypeGeneral;
1666 cinfo->ret.reg = ARMREG_R0;
1670 cinfo->ret.storage = RegTypeIRegPair;
1671 cinfo->ret.reg = ARMREG_R0;
1675 cinfo->ret.storage = RegTypeFP;
1677 if (IS_HARD_FLOAT) {
1678 cinfo->ret.reg = ARM_VFP_F0;
1680 cinfo->ret.reg = ARMREG_R0;
1684 case MONO_TYPE_GENERICINST:
1685 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1686 cinfo->ret.storage = RegTypeGeneral;
1687 cinfo->ret.reg = ARMREG_R0;
1690 // FIXME: Only for variable types
1691 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1692 cinfo->ret.storage = RegTypeStructByAddr;
1693 g_assert (cinfo->vtype_retaddr);
1697 case MONO_TYPE_VALUETYPE:
1698 case MONO_TYPE_TYPEDBYREF:
1699 if (cinfo->ret.storage != RegTypeStructByVal)
1700 cinfo->ret.storage = RegTypeStructByAddr;
1703 case MONO_TYPE_MVAR:
1704 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1705 cinfo->ret.storage = RegTypeStructByAddr;
1706 g_assert (cinfo->vtype_retaddr);
1708 case MONO_TYPE_VOID:
1711 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1715 /* align stack size to 8 */
1716 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1717 stack_size = (stack_size + 7) & ~7;
1719 cinfo->stack_usage = stack_size;
1725 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1727 MonoType *callee_ret;
1731 if (cfg->compile_aot && !cfg->full_aot)
1732 /* OP_TAILCALL doesn't work with AOT */
1735 c1 = get_call_info (NULL, NULL, caller_sig);
1736 c2 = get_call_info (NULL, NULL, callee_sig);
1739 * Tail calls with more callee stack usage than the caller cannot be supported, since
1740 * the extra stack space would be left on the stack after the tail call.
1742 res = c1->stack_usage >= c2->stack_usage;
1743 callee_ret = mini_replace_type (callee_sig->ret);
1744 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1745 /* An address on the callee's stack is passed as the first argument */
1748 if (c2->stack_usage > 16 * 4)
1760 debug_omit_fp (void)
1763 return mono_debug_count ();
1770 * mono_arch_compute_omit_fp:
1772 * Determine whenever the frame pointer can be eliminated.
1775 mono_arch_compute_omit_fp (MonoCompile *cfg)
1777 MonoMethodSignature *sig;
1778 MonoMethodHeader *header;
1782 if (cfg->arch.omit_fp_computed)
1785 header = cfg->header;
1787 sig = mono_method_signature (cfg->method);
1789 if (!cfg->arch.cinfo)
1790 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1791 cinfo = cfg->arch.cinfo;
1794 * FIXME: Remove some of the restrictions.
1796 cfg->arch.omit_fp = TRUE;
1797 cfg->arch.omit_fp_computed = TRUE;
1799 if (cfg->disable_omit_fp)
1800 cfg->arch.omit_fp = FALSE;
1801 if (!debug_omit_fp ())
1802 cfg->arch.omit_fp = FALSE;
1804 if (cfg->method->save_lmf)
1805 cfg->arch.omit_fp = FALSE;
1807 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1808 cfg->arch.omit_fp = FALSE;
1809 if (header->num_clauses)
1810 cfg->arch.omit_fp = FALSE;
1811 if (cfg->param_area)
1812 cfg->arch.omit_fp = FALSE;
1813 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1814 cfg->arch.omit_fp = FALSE;
1815 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1816 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1817 cfg->arch.omit_fp = FALSE;
1818 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1819 ArgInfo *ainfo = &cinfo->args [i];
1821 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1823 * The stack offset can only be determined when the frame
1826 cfg->arch.omit_fp = FALSE;
1831 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1832 MonoInst *ins = cfg->varinfo [i];
1835 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1840 * Set var information according to the calling convention. arm version.
1841 * The locals var stuff should most likely be split in another method.
1844 mono_arch_allocate_vars (MonoCompile *cfg)
1846 MonoMethodSignature *sig;
1847 MonoMethodHeader *header;
1850 int i, offset, size, align, curinst;
1854 sig = mono_method_signature (cfg->method);
1856 if (!cfg->arch.cinfo)
1857 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1858 cinfo = cfg->arch.cinfo;
1859 sig_ret = mini_replace_type (sig->ret);
1861 mono_arch_compute_omit_fp (cfg);
1863 if (cfg->arch.omit_fp)
1864 cfg->frame_reg = ARMREG_SP;
1866 cfg->frame_reg = ARMREG_FP;
1868 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1870 /* allow room for the vararg method args: void* and long/double */
1871 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1872 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1874 header = cfg->header;
1876 /* See mono_arch_get_global_int_regs () */
1877 if (cfg->flags & MONO_CFG_HAS_CALLS)
1878 cfg->uses_rgctx_reg = TRUE;
1880 if (cfg->frame_reg != ARMREG_SP)
1881 cfg->used_int_regs |= 1 << cfg->frame_reg;
1883 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1884 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1885 cfg->used_int_regs |= (1 << ARMREG_V5);
1889 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1890 if (sig_ret->type != MONO_TYPE_VOID) {
1891 cfg->ret->opcode = OP_REGVAR;
1892 cfg->ret->inst_c0 = ARMREG_R0;
1895 /* local vars are at a positive offset from the stack pointer */
1897 * also note that if the function uses alloca, we use FP
1898 * to point at the local variables.
1900 offset = 0; /* linkage area */
1901 /* align the offset to 16 bytes: not sure this is needed here */
1903 //offset &= ~(8 - 1);
1905 /* add parameter area size for called functions */
1906 offset += cfg->param_area;
1909 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1912 /* allow room to save the return value */
1913 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1916 /* the MonoLMF structure is stored just below the stack pointer */
1917 if (cinfo->ret.storage == RegTypeStructByVal) {
1918 cfg->ret->opcode = OP_REGOFFSET;
1919 cfg->ret->inst_basereg = cfg->frame_reg;
1920 offset += sizeof (gpointer) - 1;
1921 offset &= ~(sizeof (gpointer) - 1);
1922 cfg->ret->inst_offset = - offset;
1923 offset += sizeof(gpointer);
1924 } else if (cinfo->vtype_retaddr) {
1925 ins = cfg->vret_addr;
1926 offset += sizeof(gpointer) - 1;
1927 offset &= ~(sizeof(gpointer) - 1);
1928 ins->inst_offset = offset;
1929 ins->opcode = OP_REGOFFSET;
1930 ins->inst_basereg = cfg->frame_reg;
1931 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1932 printf ("vret_addr =");
1933 mono_print_ins (cfg->vret_addr);
1935 offset += sizeof(gpointer);
1938 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1939 if (cfg->arch.seq_point_info_var) {
1942 ins = cfg->arch.seq_point_info_var;
1946 offset += align - 1;
1947 offset &= ~(align - 1);
1948 ins->opcode = OP_REGOFFSET;
1949 ins->inst_basereg = cfg->frame_reg;
1950 ins->inst_offset = offset;
1953 ins = cfg->arch.ss_trigger_page_var;
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->opcode = OP_REGOFFSET;
1959 ins->inst_basereg = cfg->frame_reg;
1960 ins->inst_offset = offset;
1964 if (cfg->arch.seq_point_read_var) {
1967 ins = cfg->arch.seq_point_read_var;
1971 offset += align - 1;
1972 offset &= ~(align - 1);
1973 ins->opcode = OP_REGOFFSET;
1974 ins->inst_basereg = cfg->frame_reg;
1975 ins->inst_offset = offset;
1978 ins = cfg->arch.seq_point_ss_method_var;
1981 offset += align - 1;
1982 offset &= ~(align - 1);
1983 ins->opcode = OP_REGOFFSET;
1984 ins->inst_basereg = cfg->frame_reg;
1985 ins->inst_offset = offset;
1988 ins = cfg->arch.seq_point_bp_method_var;
1991 offset += align - 1;
1992 offset &= ~(align - 1);
1993 ins->opcode = OP_REGOFFSET;
1994 ins->inst_basereg = cfg->frame_reg;
1995 ins->inst_offset = offset;
1999 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
2000 /* Allocate a temporary used by the atomic ops */
2004 /* Allocate a local slot to hold the sig cookie address */
2005 offset += align - 1;
2006 offset &= ~(align - 1);
2007 cfg->arch.atomic_tmp_offset = offset;
2010 cfg->arch.atomic_tmp_offset = -1;
2013 cfg->locals_min_stack_offset = offset;
2015 curinst = cfg->locals_start;
2016 for (i = curinst; i < cfg->num_varinfo; ++i) {
2019 ins = cfg->varinfo [i];
2020 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2023 t = ins->inst_vtype;
2024 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2027 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2028 * pinvoke wrappers when they call functions returning structure */
2029 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2030 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2034 size = mono_type_size (t, &align);
2036 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2037 * since it loads/stores misaligned words, which don't do the right thing.
2039 if (align < 4 && size >= 4)
2041 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2042 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2043 offset += align - 1;
2044 offset &= ~(align - 1);
2045 ins->opcode = OP_REGOFFSET;
2046 ins->inst_offset = offset;
2047 ins->inst_basereg = cfg->frame_reg;
2049 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2052 cfg->locals_max_stack_offset = offset;
2056 ins = cfg->args [curinst];
2057 if (ins->opcode != OP_REGVAR) {
2058 ins->opcode = OP_REGOFFSET;
2059 ins->inst_basereg = cfg->frame_reg;
2060 offset += sizeof (gpointer) - 1;
2061 offset &= ~(sizeof (gpointer) - 1);
2062 ins->inst_offset = offset;
2063 offset += sizeof (gpointer);
2068 if (sig->call_convention == MONO_CALL_VARARG) {
2072 /* Allocate a local slot to hold the sig cookie address */
2073 offset += align - 1;
2074 offset &= ~(align - 1);
2075 cfg->sig_cookie = offset;
2079 for (i = 0; i < sig->param_count; ++i) {
2080 ins = cfg->args [curinst];
2082 if (ins->opcode != OP_REGVAR) {
2083 ins->opcode = OP_REGOFFSET;
2084 ins->inst_basereg = cfg->frame_reg;
2085 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2087 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2088 * since it loads/stores misaligned words, which don't do the right thing.
2090 if (align < 4 && size >= 4)
2092 /* The code in the prolog () stores words when storing vtypes received in a register */
2093 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2095 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2096 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2097 offset += align - 1;
2098 offset &= ~(align - 1);
2099 ins->inst_offset = offset;
2105 /* align the offset to 8 bytes */
2106 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2107 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2112 cfg->stack_offset = offset;
2116 mono_arch_create_vars (MonoCompile *cfg)
2118 MonoMethodSignature *sig;
2122 sig = mono_method_signature (cfg->method);
2124 if (!cfg->arch.cinfo)
2125 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2126 cinfo = cfg->arch.cinfo;
2128 if (IS_HARD_FLOAT) {
2129 for (i = 0; i < 2; i++) {
2130 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2131 inst->flags |= MONO_INST_VOLATILE;
2133 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2137 if (cinfo->ret.storage == RegTypeStructByVal)
2138 cfg->ret_var_is_local = TRUE;
2140 if (cinfo->vtype_retaddr) {
2141 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2142 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2143 printf ("vret_addr = ");
2144 mono_print_ins (cfg->vret_addr);
2148 if (cfg->gen_seq_points_debug_data) {
2149 if (cfg->soft_breakpoints) {
2150 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2151 ins->flags |= MONO_INST_VOLATILE;
2152 cfg->arch.seq_point_read_var = ins;
2154 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2155 ins->flags |= MONO_INST_VOLATILE;
2156 cfg->arch.seq_point_ss_method_var = ins;
2158 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2159 ins->flags |= MONO_INST_VOLATILE;
2160 cfg->arch.seq_point_bp_method_var = ins;
2162 g_assert (!cfg->compile_aot);
2163 } else if (cfg->compile_aot) {
2164 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2165 ins->flags |= MONO_INST_VOLATILE;
2166 cfg->arch.seq_point_info_var = ins;
2168 /* Allocate a separate variable for this to save 1 load per seq point */
2169 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2170 ins->flags |= MONO_INST_VOLATILE;
2171 cfg->arch.ss_trigger_page_var = ins;
2177 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2179 MonoMethodSignature *tmp_sig;
2182 if (call->tail_call)
2185 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2188 * mono_ArgIterator_Setup assumes the signature cookie is
2189 * passed first and all the arguments which were before it are
2190 * passed on the stack after the signature. So compensate by
2191 * passing a different signature.
2193 tmp_sig = mono_metadata_signature_dup (call->signature);
2194 tmp_sig->param_count -= call->signature->sentinelpos;
2195 tmp_sig->sentinelpos = 0;
2196 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2198 sig_reg = mono_alloc_ireg (cfg);
2199 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2201 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2206 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2211 LLVMCallInfo *linfo;
2213 n = sig->param_count + sig->hasthis;
2215 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2217 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2220 * LLVM always uses the native ABI while we use our own ABI, the
2221 * only difference is the handling of vtypes:
2222 * - we only pass/receive them in registers in some cases, and only
2223 * in 1 or 2 integer registers.
2225 if (cinfo->vtype_retaddr) {
2226 /* Vtype returned using a hidden argument */
2227 linfo->ret.storage = LLVMArgVtypeRetAddr;
2228 linfo->vret_arg_index = cinfo->vret_arg_index;
2229 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2230 cfg->exception_message = g_strdup ("unknown ret conv");
2231 cfg->disable_llvm = TRUE;
2235 for (i = 0; i < n; ++i) {
2236 ainfo = cinfo->args + i;
2238 linfo->args [i].storage = LLVMArgNone;
2240 switch (ainfo->storage) {
2241 case RegTypeGeneral:
2242 case RegTypeIRegPair:
2244 linfo->args [i].storage = LLVMArgInIReg;
2246 case RegTypeStructByVal:
2247 linfo->args [i].storage = LLVMArgAsIArgs;
2248 linfo->args [i].nslots = ainfo->struct_size / sizeof (gpointer);
2251 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2252 cfg->disable_llvm = TRUE;
2262 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2265 MonoMethodSignature *sig;
2269 sig = call->signature;
2270 n = sig->param_count + sig->hasthis;
2272 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2274 for (i = 0; i < n; ++i) {
2275 ArgInfo *ainfo = cinfo->args + i;
2278 if (i >= sig->hasthis)
2279 t = sig->params [i - sig->hasthis];
2281 t = &mono_defaults.int_class->byval_arg;
2282 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2284 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2285 /* Emit the signature cookie just before the implicit arguments */
2286 emit_sig_cookie (cfg, call, cinfo);
2289 in = call->args [i];
2291 switch (ainfo->storage) {
2292 case RegTypeGeneral:
2293 case RegTypeIRegPair:
2294 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2295 MONO_INST_NEW (cfg, ins, OP_MOVE);
2296 ins->dreg = mono_alloc_ireg (cfg);
2297 ins->sreg1 = in->dreg + 1;
2298 MONO_ADD_INS (cfg->cbb, ins);
2299 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2301 MONO_INST_NEW (cfg, ins, OP_MOVE);
2302 ins->dreg = mono_alloc_ireg (cfg);
2303 ins->sreg1 = in->dreg + 2;
2304 MONO_ADD_INS (cfg->cbb, ins);
2305 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2306 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2307 if (ainfo->size == 4) {
2308 if (IS_SOFT_FLOAT) {
2309 /* mono_emit_call_args () have already done the r8->r4 conversion */
2310 /* The converted value is in an int vreg */
2311 MONO_INST_NEW (cfg, ins, OP_MOVE);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2319 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2320 creg = mono_alloc_ireg (cfg);
2321 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2322 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2325 if (IS_SOFT_FLOAT) {
2326 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2327 ins->dreg = mono_alloc_ireg (cfg);
2328 ins->sreg1 = in->dreg;
2329 MONO_ADD_INS (cfg->cbb, ins);
2330 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2332 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2333 ins->dreg = mono_alloc_ireg (cfg);
2334 ins->sreg1 = in->dreg;
2335 MONO_ADD_INS (cfg->cbb, ins);
2336 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2340 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2341 creg = mono_alloc_ireg (cfg);
2342 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2343 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2344 creg = mono_alloc_ireg (cfg);
2345 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2346 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2349 cfg->flags |= MONO_CFG_HAS_FPOUT;
2351 MONO_INST_NEW (cfg, ins, OP_MOVE);
2352 ins->dreg = mono_alloc_ireg (cfg);
2353 ins->sreg1 = in->dreg;
2354 MONO_ADD_INS (cfg->cbb, ins);
2356 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2359 case RegTypeStructByAddr:
2362 /* FIXME: where si the data allocated? */
2363 arg->backend.reg3 = ainfo->reg;
2364 call->used_iregs |= 1 << ainfo->reg;
2365 g_assert_not_reached ();
2368 case RegTypeStructByVal:
2369 case RegTypeGSharedVtInReg:
2370 case RegTypeGSharedVtOnStack:
2371 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2372 ins->opcode = OP_OUTARG_VT;
2373 ins->sreg1 = in->dreg;
2374 ins->klass = in->klass;
2375 ins->inst_p0 = call;
2376 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2377 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2378 mono_call_inst_add_outarg_vt (cfg, call, ins);
2379 MONO_ADD_INS (cfg->cbb, ins);
2382 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2384 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2385 if (t->type == MONO_TYPE_R8) {
2386 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2391 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2394 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2397 case RegTypeBaseGen:
2398 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2399 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2400 MONO_INST_NEW (cfg, ins, OP_MOVE);
2401 ins->dreg = mono_alloc_ireg (cfg);
2402 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2403 MONO_ADD_INS (cfg->cbb, ins);
2404 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2405 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2408 /* This should work for soft-float as well */
2410 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2411 creg = mono_alloc_ireg (cfg);
2412 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2413 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2414 creg = mono_alloc_ireg (cfg);
2415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2416 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2417 cfg->flags |= MONO_CFG_HAS_FPOUT;
2419 g_assert_not_reached ();
2423 int fdreg = mono_alloc_freg (cfg);
2425 if (ainfo->size == 8) {
2426 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2427 ins->sreg1 = in->dreg;
2429 MONO_ADD_INS (cfg->cbb, ins);
2431 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2436 * Mono's register allocator doesn't speak single-precision registers that
2437 * overlap double-precision registers (i.e. armhf). So we have to work around
2438 * the register allocator and load the value from memory manually.
2440 * So we create a variable for the float argument and an instruction to store
2441 * the argument into the variable. We then store the list of these arguments
2442 * in cfg->float_args. This list is then used by emit_float_args later to
2443 * pass the arguments in the various call opcodes.
2445 * This is not very nice, and we should really try to fix the allocator.
2448 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2450 /* Make sure the instruction isn't seen as pointless and removed.
2452 float_arg->flags |= MONO_INST_VOLATILE;
2454 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2456 /* We use the dreg to look up the instruction later. The hreg is used to
2457 * emit the instruction that loads the value into the FP reg.
2459 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2460 fad->vreg = float_arg->dreg;
2461 fad->hreg = ainfo->reg;
2463 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2466 call->used_iregs |= 1 << ainfo->reg;
2467 cfg->flags |= MONO_CFG_HAS_FPOUT;
2471 g_assert_not_reached ();
2475 /* Handle the case where there are no implicit arguments */
2476 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2477 emit_sig_cookie (cfg, call, cinfo);
2479 if (cinfo->ret.storage == RegTypeStructByVal) {
2480 /* The JIT will transform this into a normal call */
2481 call->vret_in_reg = TRUE;
2482 } else if (cinfo->vtype_retaddr) {
2484 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2485 vtarg->sreg1 = call->vret_var->dreg;
2486 vtarg->dreg = mono_alloc_preg (cfg);
2487 MONO_ADD_INS (cfg->cbb, vtarg);
2489 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2492 call->stack_usage = cinfo->stack_usage;
2498 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2500 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2501 ArgInfo *ainfo = ins->inst_p1;
2502 int ovf_size = ainfo->vtsize;
2503 int doffset = ainfo->offset;
2504 int struct_size = ainfo->struct_size;
2505 int i, soffset, dreg, tmpreg;
2507 if (ainfo->storage == RegTypeGSharedVtInReg) {
2509 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2512 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2513 /* Pass by addr on stack */
2514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2519 for (i = 0; i < ainfo->size; ++i) {
2520 dreg = mono_alloc_ireg (cfg);
2521 switch (struct_size) {
2523 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2526 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2529 tmpreg = mono_alloc_ireg (cfg);
2530 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2531 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2532 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2533 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2534 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2535 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2536 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2539 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2542 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2543 soffset += sizeof (gpointer);
2544 struct_size -= sizeof (gpointer);
2546 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2548 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2552 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2554 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2557 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2560 if (COMPILE_LLVM (cfg)) {
2561 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2563 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2564 ins->sreg1 = val->dreg + 1;
2565 ins->sreg2 = val->dreg + 2;
2566 MONO_ADD_INS (cfg->cbb, ins);
2571 case MONO_ARM_FPU_NONE:
2572 if (ret->type == MONO_TYPE_R8) {
2575 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2576 ins->dreg = cfg->ret->dreg;
2577 ins->sreg1 = val->dreg;
2578 MONO_ADD_INS (cfg->cbb, ins);
2581 if (ret->type == MONO_TYPE_R4) {
2582 /* Already converted to an int in method_to_ir () */
2583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2587 case MONO_ARM_FPU_VFP:
2588 case MONO_ARM_FPU_VFP_HARD:
2589 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2592 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2593 ins->dreg = cfg->ret->dreg;
2594 ins->sreg1 = val->dreg;
2595 MONO_ADD_INS (cfg->cbb, ins);
2600 g_assert_not_reached ();
2604 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2607 #endif /* #ifndef DISABLE_JIT */
2610 mono_arch_is_inst_imm (gint64 imm)
2616 MonoMethodSignature *sig;
2619 MonoType **param_types;
2623 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2627 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2630 switch (cinfo->ret.storage) {
2632 case RegTypeGeneral:
2633 case RegTypeIRegPair:
2634 case RegTypeStructByAddr:
2645 for (i = 0; i < cinfo->nargs; ++i) {
2646 ArgInfo *ainfo = &cinfo->args [i];
2649 switch (ainfo->storage) {
2650 case RegTypeGeneral:
2652 case RegTypeIRegPair:
2655 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2658 case RegTypeStructByVal:
2659 if (ainfo->size == 0)
2660 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2662 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2663 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2671 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2672 for (i = 0; i < sig->param_count; ++i) {
2673 MonoType *t = sig->params [i];
2678 t = mini_replace_type (t);
2701 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2703 ArchDynCallInfo *info;
2707 cinfo = get_call_info (NULL, NULL, sig);
2709 if (!dyn_call_supported (cinfo, sig)) {
2714 info = g_new0 (ArchDynCallInfo, 1);
2715 // FIXME: Preprocess the info to speed up start_dyn_call ()
2717 info->cinfo = cinfo;
2718 info->rtype = mini_replace_type (sig->ret);
2719 info->param_types = g_new0 (MonoType*, sig->param_count);
2720 for (i = 0; i < sig->param_count; ++i)
2721 info->param_types [i] = mini_replace_type (sig->params [i]);
2723 return (MonoDynCallInfo*)info;
2727 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2729 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2731 g_free (ainfo->cinfo);
2736 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2738 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2739 DynCallArgs *p = (DynCallArgs*)buf;
2740 int arg_index, greg, i, j, pindex;
2741 MonoMethodSignature *sig = dinfo->sig;
2743 g_assert (buf_len >= sizeof (DynCallArgs));
2752 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2753 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2758 if (dinfo->cinfo->vtype_retaddr)
2759 p->regs [greg ++] = (mgreg_t)ret;
2761 for (i = pindex; i < sig->param_count; i++) {
2762 MonoType *t = dinfo->param_types [i];
2763 gpointer *arg = args [arg_index ++];
2764 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2767 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2769 else if (ainfo->storage == RegTypeBase)
2770 slot = PARAM_REGS + (ainfo->offset / 4);
2772 g_assert_not_reached ();
2775 p->regs [slot] = (mgreg_t)*arg;
2780 case MONO_TYPE_STRING:
2781 case MONO_TYPE_CLASS:
2782 case MONO_TYPE_ARRAY:
2783 case MONO_TYPE_SZARRAY:
2784 case MONO_TYPE_OBJECT:
2788 p->regs [slot] = (mgreg_t)*arg;
2790 case MONO_TYPE_BOOLEAN:
2792 p->regs [slot] = *(guint8*)arg;
2795 p->regs [slot] = *(gint8*)arg;
2798 p->regs [slot] = *(gint16*)arg;
2801 case MONO_TYPE_CHAR:
2802 p->regs [slot] = *(guint16*)arg;
2805 p->regs [slot] = *(gint32*)arg;
2808 p->regs [slot] = *(guint32*)arg;
2812 p->regs [slot ++] = (mgreg_t)arg [0];
2813 p->regs [slot] = (mgreg_t)arg [1];
2816 p->regs [slot] = *(mgreg_t*)arg;
2819 p->regs [slot ++] = (mgreg_t)arg [0];
2820 p->regs [slot] = (mgreg_t)arg [1];
2822 case MONO_TYPE_GENERICINST:
2823 if (MONO_TYPE_IS_REFERENCE (t)) {
2824 p->regs [slot] = (mgreg_t)*arg;
2829 case MONO_TYPE_VALUETYPE:
2830 g_assert (ainfo->storage == RegTypeStructByVal);
2832 if (ainfo->size == 0)
2833 slot = PARAM_REGS + (ainfo->offset / 4);
2837 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2838 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2841 g_assert_not_reached ();
2847 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2849 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2850 MonoType *ptype = ainfo->rtype;
2851 guint8 *ret = ((DynCallArgs*)buf)->ret;
2852 mgreg_t res = ((DynCallArgs*)buf)->res;
2853 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2855 switch (ptype->type) {
2856 case MONO_TYPE_VOID:
2857 *(gpointer*)ret = NULL;
2859 case MONO_TYPE_STRING:
2860 case MONO_TYPE_CLASS:
2861 case MONO_TYPE_ARRAY:
2862 case MONO_TYPE_SZARRAY:
2863 case MONO_TYPE_OBJECT:
2867 *(gpointer*)ret = (gpointer)res;
2873 case MONO_TYPE_BOOLEAN:
2874 *(guint8*)ret = res;
2877 *(gint16*)ret = res;
2880 case MONO_TYPE_CHAR:
2881 *(guint16*)ret = res;
2884 *(gint32*)ret = res;
2887 *(guint32*)ret = res;
2891 /* This handles endianness as well */
2892 ((gint32*)ret) [0] = res;
2893 ((gint32*)ret) [1] = res2;
2895 case MONO_TYPE_GENERICINST:
2896 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2897 *(gpointer*)ret = (gpointer)res;
2902 case MONO_TYPE_VALUETYPE:
2903 g_assert (ainfo->cinfo->vtype_retaddr);
2908 *(float*)ret = *(float*)&res;
2910 case MONO_TYPE_R8: {
2917 *(double*)ret = *(double*)®s;
2921 g_assert_not_reached ();
2928 * Allow tracing to work with this interface (with an optional argument)
2932 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2936 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2937 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2938 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2939 code = emit_call_reg (code, ARMREG_R2);
2953 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2956 int save_mode = SAVE_NONE;
2958 MonoMethod *method = cfg->method;
2959 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2960 int rtype = ret_type->type;
2961 int save_offset = cfg->param_area;
2965 offset = code - cfg->native_code;
2966 /* we need about 16 instructions */
2967 if (offset > (cfg->code_size - 16 * 4)) {
2968 cfg->code_size *= 2;
2969 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2970 code = cfg->native_code + offset;
2973 case MONO_TYPE_VOID:
2974 /* special case string .ctor icall */
2975 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2976 save_mode = SAVE_ONE;
2978 save_mode = SAVE_NONE;
2982 save_mode = SAVE_TWO;
2986 save_mode = SAVE_ONE_FP;
2988 save_mode = SAVE_ONE;
2992 save_mode = SAVE_TWO_FP;
2994 save_mode = SAVE_TWO;
2996 case MONO_TYPE_GENERICINST:
2997 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2998 save_mode = SAVE_ONE;
3002 case MONO_TYPE_VALUETYPE:
3003 save_mode = SAVE_STRUCT;
3006 save_mode = SAVE_ONE;
3010 switch (save_mode) {
3012 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3013 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3014 if (enable_arguments) {
3015 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3016 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3020 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3021 if (enable_arguments) {
3022 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3026 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3027 if (enable_arguments) {
3028 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3032 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3033 if (enable_arguments) {
3034 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3038 if (enable_arguments) {
3039 /* FIXME: get the actual address */
3040 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3048 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3049 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3050 code = emit_call_reg (code, ARMREG_IP);
3052 switch (save_mode) {
3054 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3055 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3058 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3061 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3064 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3075 * The immediate field for cond branches is big enough for all reasonable methods
3077 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3078 if (0 && ins->inst_true_bb->native_offset) { \
3079 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3081 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3082 ARM_B_COND (code, (condcode), 0); \
3085 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3087 /* emit an exception if condition is fail
3089 * We assign the extra code used to throw the implicit exceptions
3090 * to cfg->bb_exit as far as the big branch handling is concerned
3092 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3094 mono_add_patch_info (cfg, code - cfg->native_code, \
3095 MONO_PATCH_INFO_EXC, exc_name); \
3096 ARM_BL_COND (code, (condcode), 0); \
3099 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3102 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3107 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3109 MonoInst *ins, *n, *last_ins = NULL;
3111 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3112 switch (ins->opcode) {
3115 /* Already done by an arch-independent pass */
3117 case OP_LOAD_MEMBASE:
3118 case OP_LOADI4_MEMBASE:
3120 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3121 * OP_LOAD_MEMBASE offset(basereg), reg
3123 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3124 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3125 ins->inst_basereg == last_ins->inst_destbasereg &&
3126 ins->inst_offset == last_ins->inst_offset) {
3127 if (ins->dreg == last_ins->sreg1) {
3128 MONO_DELETE_INS (bb, ins);
3131 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3132 ins->opcode = OP_MOVE;
3133 ins->sreg1 = last_ins->sreg1;
3137 * Note: reg1 must be different from the basereg in the second load
3138 * OP_LOAD_MEMBASE offset(basereg), reg1
3139 * OP_LOAD_MEMBASE offset(basereg), reg2
3141 * OP_LOAD_MEMBASE offset(basereg), reg1
3142 * OP_MOVE reg1, reg2
3144 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3145 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3146 ins->inst_basereg != last_ins->dreg &&
3147 ins->inst_basereg == last_ins->inst_basereg &&
3148 ins->inst_offset == last_ins->inst_offset) {
3150 if (ins->dreg == last_ins->dreg) {
3151 MONO_DELETE_INS (bb, ins);
3154 ins->opcode = OP_MOVE;
3155 ins->sreg1 = last_ins->dreg;
3158 //g_assert_not_reached ();
3162 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3163 * OP_LOAD_MEMBASE offset(basereg), reg
3165 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3166 * OP_ICONST reg, imm
3168 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3169 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3170 ins->inst_basereg == last_ins->inst_destbasereg &&
3171 ins->inst_offset == last_ins->inst_offset) {
3172 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3173 ins->opcode = OP_ICONST;
3174 ins->inst_c0 = last_ins->inst_imm;
3175 g_assert_not_reached (); // check this rule
3179 case OP_LOADU1_MEMBASE:
3180 case OP_LOADI1_MEMBASE:
3181 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3182 ins->inst_basereg == last_ins->inst_destbasereg &&
3183 ins->inst_offset == last_ins->inst_offset) {
3184 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3185 ins->sreg1 = last_ins->sreg1;
3188 case OP_LOADU2_MEMBASE:
3189 case OP_LOADI2_MEMBASE:
3190 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3191 ins->inst_basereg == last_ins->inst_destbasereg &&
3192 ins->inst_offset == last_ins->inst_offset) {
3193 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3194 ins->sreg1 = last_ins->sreg1;
3198 ins->opcode = OP_MOVE;
3202 if (ins->dreg == ins->sreg1) {
3203 MONO_DELETE_INS (bb, ins);
3207 * OP_MOVE sreg, dreg
3208 * OP_MOVE dreg, sreg
3210 if (last_ins && last_ins->opcode == OP_MOVE &&
3211 ins->sreg1 == last_ins->dreg &&
3212 ins->dreg == last_ins->sreg1) {
3213 MONO_DELETE_INS (bb, ins);
3221 bb->last_ins = last_ins;
3225 * the branch_cc_table should maintain the order of these
3239 branch_cc_table [] = {
3253 #define ADD_NEW_INS(cfg,dest,op) do { \
3254 MONO_INST_NEW ((cfg), (dest), (op)); \
3255 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3259 map_to_reg_reg_op (int op)
3268 case OP_COMPARE_IMM:
3270 case OP_ICOMPARE_IMM:
3284 case OP_LOAD_MEMBASE:
3285 return OP_LOAD_MEMINDEX;
3286 case OP_LOADI4_MEMBASE:
3287 return OP_LOADI4_MEMINDEX;
3288 case OP_LOADU4_MEMBASE:
3289 return OP_LOADU4_MEMINDEX;
3290 case OP_LOADU1_MEMBASE:
3291 return OP_LOADU1_MEMINDEX;
3292 case OP_LOADI2_MEMBASE:
3293 return OP_LOADI2_MEMINDEX;
3294 case OP_LOADU2_MEMBASE:
3295 return OP_LOADU2_MEMINDEX;
3296 case OP_LOADI1_MEMBASE:
3297 return OP_LOADI1_MEMINDEX;
3298 case OP_STOREI1_MEMBASE_REG:
3299 return OP_STOREI1_MEMINDEX;
3300 case OP_STOREI2_MEMBASE_REG:
3301 return OP_STOREI2_MEMINDEX;
3302 case OP_STOREI4_MEMBASE_REG:
3303 return OP_STOREI4_MEMINDEX;
3304 case OP_STORE_MEMBASE_REG:
3305 return OP_STORE_MEMINDEX;
3306 case OP_STORER4_MEMBASE_REG:
3307 return OP_STORER4_MEMINDEX;
3308 case OP_STORER8_MEMBASE_REG:
3309 return OP_STORER8_MEMINDEX;
3310 case OP_STORE_MEMBASE_IMM:
3311 return OP_STORE_MEMBASE_REG;
3312 case OP_STOREI1_MEMBASE_IMM:
3313 return OP_STOREI1_MEMBASE_REG;
3314 case OP_STOREI2_MEMBASE_IMM:
3315 return OP_STOREI2_MEMBASE_REG;
3316 case OP_STOREI4_MEMBASE_IMM:
3317 return OP_STOREI4_MEMBASE_REG;
3319 g_assert_not_reached ();
3323 * Remove from the instruction list the instructions that can't be
3324 * represented with very simple instructions with no register
3328 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3330 MonoInst *ins, *temp, *last_ins = NULL;
3331 int rot_amount, imm8, low_imm;
3333 MONO_BB_FOR_EACH_INS (bb, ins) {
3335 switch (ins->opcode) {
3339 case OP_COMPARE_IMM:
3340 case OP_ICOMPARE_IMM:
3354 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3355 ADD_NEW_INS (cfg, temp, OP_ICONST);
3356 temp->inst_c0 = ins->inst_imm;
3357 temp->dreg = mono_alloc_ireg (cfg);
3358 ins->sreg2 = temp->dreg;
3359 ins->opcode = mono_op_imm_to_op (ins->opcode);
3361 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3367 if (ins->inst_imm == 1) {
3368 ins->opcode = OP_MOVE;
3371 if (ins->inst_imm == 0) {
3372 ins->opcode = OP_ICONST;
3376 imm8 = mono_is_power_of_two (ins->inst_imm);
3378 ins->opcode = OP_SHL_IMM;
3379 ins->inst_imm = imm8;
3382 ADD_NEW_INS (cfg, temp, OP_ICONST);
3383 temp->inst_c0 = ins->inst_imm;
3384 temp->dreg = mono_alloc_ireg (cfg);
3385 ins->sreg2 = temp->dreg;
3386 ins->opcode = OP_IMUL;
3392 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3393 /* ARM sets the C flag to 1 if there was _no_ overflow */
3394 ins->next->opcode = OP_COND_EXC_NC;
3397 case OP_IDIV_UN_IMM:
3399 case OP_IREM_UN_IMM:
3400 ADD_NEW_INS (cfg, temp, OP_ICONST);
3401 temp->inst_c0 = ins->inst_imm;
3402 temp->dreg = mono_alloc_ireg (cfg);
3403 ins->sreg2 = temp->dreg;
3404 ins->opcode = mono_op_imm_to_op (ins->opcode);
3406 case OP_LOCALLOC_IMM:
3407 ADD_NEW_INS (cfg, temp, OP_ICONST);
3408 temp->inst_c0 = ins->inst_imm;
3409 temp->dreg = mono_alloc_ireg (cfg);
3410 ins->sreg1 = temp->dreg;
3411 ins->opcode = OP_LOCALLOC;
3413 case OP_LOAD_MEMBASE:
3414 case OP_LOADI4_MEMBASE:
3415 case OP_LOADU4_MEMBASE:
3416 case OP_LOADU1_MEMBASE:
3417 /* we can do two things: load the immed in a register
3418 * and use an indexed load, or see if the immed can be
3419 * represented as an ad_imm + a load with a smaller offset
3420 * that fits. We just do the first for now, optimize later.
3422 if (arm_is_imm12 (ins->inst_offset))
3424 ADD_NEW_INS (cfg, temp, OP_ICONST);
3425 temp->inst_c0 = ins->inst_offset;
3426 temp->dreg = mono_alloc_ireg (cfg);
3427 ins->sreg2 = temp->dreg;
3428 ins->opcode = map_to_reg_reg_op (ins->opcode);
3430 case OP_LOADI2_MEMBASE:
3431 case OP_LOADU2_MEMBASE:
3432 case OP_LOADI1_MEMBASE:
3433 if (arm_is_imm8 (ins->inst_offset))
3435 ADD_NEW_INS (cfg, temp, OP_ICONST);
3436 temp->inst_c0 = ins->inst_offset;
3437 temp->dreg = mono_alloc_ireg (cfg);
3438 ins->sreg2 = temp->dreg;
3439 ins->opcode = map_to_reg_reg_op (ins->opcode);
3441 case OP_LOADR4_MEMBASE:
3442 case OP_LOADR8_MEMBASE:
3443 if (arm_is_fpimm8 (ins->inst_offset))
3445 low_imm = ins->inst_offset & 0x1ff;
3446 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3447 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3448 temp->inst_imm = ins->inst_offset & ~0x1ff;
3449 temp->sreg1 = ins->inst_basereg;
3450 temp->dreg = mono_alloc_ireg (cfg);
3451 ins->inst_basereg = temp->dreg;
3452 ins->inst_offset = low_imm;
3456 ADD_NEW_INS (cfg, temp, OP_ICONST);
3457 temp->inst_c0 = ins->inst_offset;
3458 temp->dreg = mono_alloc_ireg (cfg);
3460 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3461 add_ins->sreg1 = ins->inst_basereg;
3462 add_ins->sreg2 = temp->dreg;
3463 add_ins->dreg = mono_alloc_ireg (cfg);
3465 ins->inst_basereg = add_ins->dreg;
3466 ins->inst_offset = 0;
3469 case OP_STORE_MEMBASE_REG:
3470 case OP_STOREI4_MEMBASE_REG:
3471 case OP_STOREI1_MEMBASE_REG:
3472 if (arm_is_imm12 (ins->inst_offset))
3474 ADD_NEW_INS (cfg, temp, OP_ICONST);
3475 temp->inst_c0 = ins->inst_offset;
3476 temp->dreg = mono_alloc_ireg (cfg);
3477 ins->sreg2 = temp->dreg;
3478 ins->opcode = map_to_reg_reg_op (ins->opcode);
3480 case OP_STOREI2_MEMBASE_REG:
3481 if (arm_is_imm8 (ins->inst_offset))
3483 ADD_NEW_INS (cfg, temp, OP_ICONST);
3484 temp->inst_c0 = ins->inst_offset;
3485 temp->dreg = mono_alloc_ireg (cfg);
3486 ins->sreg2 = temp->dreg;
3487 ins->opcode = map_to_reg_reg_op (ins->opcode);
3489 case OP_STORER4_MEMBASE_REG:
3490 case OP_STORER8_MEMBASE_REG:
3491 if (arm_is_fpimm8 (ins->inst_offset))
3493 low_imm = ins->inst_offset & 0x1ff;
3494 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3495 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3496 temp->inst_imm = ins->inst_offset & ~0x1ff;
3497 temp->sreg1 = ins->inst_destbasereg;
3498 temp->dreg = mono_alloc_ireg (cfg);
3499 ins->inst_destbasereg = temp->dreg;
3500 ins->inst_offset = low_imm;
3504 ADD_NEW_INS (cfg, temp, OP_ICONST);
3505 temp->inst_c0 = ins->inst_offset;
3506 temp->dreg = mono_alloc_ireg (cfg);
3508 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3509 add_ins->sreg1 = ins->inst_destbasereg;
3510 add_ins->sreg2 = temp->dreg;
3511 add_ins->dreg = mono_alloc_ireg (cfg);
3513 ins->inst_destbasereg = add_ins->dreg;
3514 ins->inst_offset = 0;
3517 case OP_STORE_MEMBASE_IMM:
3518 case OP_STOREI1_MEMBASE_IMM:
3519 case OP_STOREI2_MEMBASE_IMM:
3520 case OP_STOREI4_MEMBASE_IMM:
3521 ADD_NEW_INS (cfg, temp, OP_ICONST);
3522 temp->inst_c0 = ins->inst_imm;
3523 temp->dreg = mono_alloc_ireg (cfg);
3524 ins->sreg1 = temp->dreg;
3525 ins->opcode = map_to_reg_reg_op (ins->opcode);
3527 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3530 gboolean swap = FALSE;
3534 /* Optimized away */
3539 /* Some fp compares require swapped operands */
3540 switch (ins->next->opcode) {
3542 ins->next->opcode = OP_FBLT;
3546 ins->next->opcode = OP_FBLT_UN;
3550 ins->next->opcode = OP_FBGE;
3554 ins->next->opcode = OP_FBGE_UN;
3562 ins->sreg1 = ins->sreg2;
3571 bb->last_ins = last_ins;
3572 bb->max_vreg = cfg->next_vreg;
3576 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3580 if (long_ins->opcode == OP_LNEG) {
3582 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3583 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3589 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3591 /* sreg is a float, dreg is an integer reg */
3593 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3595 ARM_TOSIZD (code, vfp_scratch1, sreg);
3597 ARM_TOUIZD (code, vfp_scratch1, sreg);
3598 ARM_FMRS (code, dreg, vfp_scratch1);
3599 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3603 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3604 else if (size == 2) {
3605 ARM_SHL_IMM (code, dreg, dreg, 16);
3606 ARM_SHR_IMM (code, dreg, dreg, 16);
3610 ARM_SHL_IMM (code, dreg, dreg, 24);
3611 ARM_SAR_IMM (code, dreg, dreg, 24);
3612 } else if (size == 2) {
3613 ARM_SHL_IMM (code, dreg, dreg, 16);
3614 ARM_SAR_IMM (code, dreg, dreg, 16);
3621 emit_r4_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3623 /* sreg is a float, dreg is an integer reg */
3625 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3627 ARM_TOSIZS (code, vfp_scratch1, sreg);
3629 ARM_TOUIZS (code, vfp_scratch1, sreg);
3630 ARM_FMRS (code, dreg, vfp_scratch1);
3631 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3635 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3636 else if (size == 2) {
3637 ARM_SHL_IMM (code, dreg, dreg, 16);
3638 ARM_SHR_IMM (code, dreg, dreg, 16);
3642 ARM_SHL_IMM (code, dreg, dreg, 24);
3643 ARM_SAR_IMM (code, dreg, dreg, 24);
3644 } else if (size == 2) {
3645 ARM_SHL_IMM (code, dreg, dreg, 16);
3646 ARM_SAR_IMM (code, dreg, dreg, 16);
3652 #endif /* #ifndef DISABLE_JIT */
3656 const guchar *target;
3661 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3664 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3665 PatchData *pdata = (PatchData*)user_data;
3666 guchar *code = data;
3667 guint32 *thunks = data;
3668 guint32 *endthunks = (guint32*)(code + bsize);
3670 int difflow, diffhigh;
3672 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3673 difflow = (char*)pdata->code - (char*)thunks;
3674 diffhigh = (char*)pdata->code - (char*)endthunks;
3675 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3679 * The thunk is composed of 3 words:
3680 * load constant from thunks [2] into ARM_IP
3683 * Note that the LR register is already setup
3685 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3686 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3687 while (thunks < endthunks) {
3688 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3689 if (thunks [2] == (guint32)pdata->target) {
3690 arm_patch (pdata->code, (guchar*)thunks);
3691 mono_arch_flush_icache (pdata->code, 4);
3694 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3695 /* found a free slot instead: emit thunk */
3696 /* ARMREG_IP is fine to use since this can't be an IMT call
3699 code = (guchar*)thunks;
3700 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3701 if (thumb_supported)
3702 ARM_BX (code, ARMREG_IP);
3704 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3705 thunks [2] = (guint32)pdata->target;
3706 mono_arch_flush_icache ((guchar*)thunks, 12);
3708 arm_patch (pdata->code, (guchar*)thunks);
3709 mono_arch_flush_icache (pdata->code, 4);
3713 /* skip 12 bytes, the size of the thunk */
3717 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3723 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3728 domain = mono_domain_get ();
3731 pdata.target = target;
3732 pdata.absolute = absolute;
3736 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3739 if (pdata.found != 1) {
3740 mono_domain_lock (domain);
3741 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3744 /* this uses the first available slot */
3746 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3748 mono_domain_unlock (domain);
3751 if (pdata.found != 1) {
3753 GHashTableIter iter;
3754 MonoJitDynamicMethodInfo *ji;
3757 * This might be a dynamic method, search its code manager. We can only
3758 * use the dynamic method containing CODE, since the others might be freed later.
3762 mono_domain_lock (domain);
3763 hash = domain_jit_info (domain)->dynamic_code_hash;
3765 /* FIXME: Speed this up */
3766 g_hash_table_iter_init (&iter, hash);
3767 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3768 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3769 if (pdata.found == 1)
3773 mono_domain_unlock (domain);
3775 if (pdata.found != 1)
3776 g_print ("thunk failed for %p from %p\n", target, code);
3777 g_assert (pdata.found == 1);
3781 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3783 guint32 *code32 = (void*)code;
3784 guint32 ins = *code32;
3785 guint32 prim = (ins >> 25) & 7;
3786 guint32 tval = GPOINTER_TO_UINT (target);
3788 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3789 if (prim == 5) { /* 101b */
3790 /* the diff starts 8 bytes from the branch opcode */
3791 gint diff = target - code - 8;
3793 gint tmask = 0xffffffff;
3794 if (tval & 1) { /* entering thumb mode */
3795 diff = target - 1 - code - 8;
3796 g_assert (thumb_supported);
3797 tbits = 0xf << 28; /* bl->blx bit pattern */
3798 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3799 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3803 tmask = ~(1 << 24); /* clear the link bit */
3804 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3809 if (diff <= 33554431) {
3811 ins = (ins & 0xff000000) | diff;
3813 *code32 = ins | tbits;
3817 /* diff between 0 and -33554432 */
3818 if (diff >= -33554432) {
3820 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3822 *code32 = ins | tbits;
3827 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3831 #ifdef USE_JUMP_TABLES
3833 gpointer *jte = mono_jumptable_get_entry (code);
3835 jte [0] = (gpointer) target;
3839 * The alternative call sequences looks like this:
3841 * ldr ip, [pc] // loads the address constant
3842 * b 1f // jumps around the constant
3843 * address constant embedded in the code
3848 * There are two cases for patching:
3849 * a) at the end of method emission: in this case code points to the start
3850 * of the call sequence
3851 * b) during runtime patching of the call site: in this case code points
3852 * to the mov pc, ip instruction
3854 * We have to handle also the thunk jump code sequence:
3858 * address constant // execution never reaches here
3860 if ((ins & 0x0ffffff0) == 0x12fff10) {
3861 /* Branch and exchange: the address is constructed in a reg
3862 * We can patch BX when the code sequence is the following:
3863 * ldr ip, [pc, #0] ; 0x8
3870 guint8 *emit = (guint8*)ccode;
3871 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3873 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3874 ARM_BX (emit, ARMREG_IP);
3876 /*patching from magic trampoline*/
3877 if (ins == ccode [3]) {
3878 g_assert (code32 [-4] == ccode [0]);
3879 g_assert (code32 [-3] == ccode [1]);
3880 g_assert (code32 [-1] == ccode [2]);
3881 code32 [-2] = (guint32)target;
3884 /*patching from JIT*/
3885 if (ins == ccode [0]) {
3886 g_assert (code32 [1] == ccode [1]);
3887 g_assert (code32 [3] == ccode [2]);
3888 g_assert (code32 [4] == ccode [3]);
3889 code32 [2] = (guint32)target;
3892 g_assert_not_reached ();
3893 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3901 guint8 *emit = (guint8*)ccode;
3902 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3904 ARM_BLX_REG (emit, ARMREG_IP);
3906 g_assert (code32 [-3] == ccode [0]);
3907 g_assert (code32 [-2] == ccode [1]);
3908 g_assert (code32 [0] == ccode [2]);
3910 code32 [-1] = (guint32)target;
3913 guint32 *tmp = ccode;
3914 guint8 *emit = (guint8*)tmp;
3915 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3916 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3917 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3918 ARM_BX (emit, ARMREG_IP);
3919 if (ins == ccode [2]) {
3920 g_assert_not_reached (); // should be -2 ...
3921 code32 [-1] = (guint32)target;
3924 if (ins == ccode [0]) {
3925 /* handles both thunk jump code and the far call sequence */
3926 code32 [2] = (guint32)target;
3929 g_assert_not_reached ();
3931 // g_print ("patched with 0x%08x\n", ins);
3936 arm_patch (guchar *code, const guchar *target)
3938 arm_patch_general (NULL, code, target, NULL);
3942 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3943 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3944 * to be used with the emit macros.
3945 * Return -1 otherwise.
3948 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3951 for (i = 0; i < 31; i+= 2) {
3952 res = (val << (32 - i)) | (val >> i);
3955 *rot_amount = i? 32 - i: 0;
3962 * Emits in code a sequence of instructions that load the value 'val'
3963 * into the dreg register. Uses at most 4 instructions.
3966 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3968 int imm8, rot_amount;
3970 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3971 /* skip the constant pool */
3977 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3978 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3979 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3980 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3983 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3985 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3989 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3991 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3993 if (val & 0xFF0000) {
3994 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3996 if (val & 0xFF000000) {
3997 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3999 } else if (val & 0xFF00) {
4000 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
4001 if (val & 0xFF0000) {
4002 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4004 if (val & 0xFF000000) {
4005 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4007 } else if (val & 0xFF0000) {
4008 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
4009 if (val & 0xFF000000) {
4010 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
4013 //g_assert_not_reached ();
4019 mono_arm_thumb_supported (void)
4021 return thumb_supported;
4027 * emit_load_volatile_arguments:
4029 * Load volatile arguments from the stack to the original input registers.
4030 * Required before a tail call.
4033 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
4035 MonoMethod *method = cfg->method;
4036 MonoMethodSignature *sig;
4041 /* FIXME: Generate intermediate code instead */
4043 sig = mono_method_signature (method);
4045 /* This is the opposite of the code in emit_prolog */
4049 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4051 if (cinfo->vtype_retaddr) {
4052 ArgInfo *ainfo = &cinfo->ret;
4053 inst = cfg->vret_addr;
4054 g_assert (arm_is_imm12 (inst->inst_offset));
4055 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4057 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4058 ArgInfo *ainfo = cinfo->args + i;
4059 inst = cfg->args [pos];
4061 if (cfg->verbose_level > 2)
4062 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4063 if (inst->opcode == OP_REGVAR) {
4064 if (ainfo->storage == RegTypeGeneral)
4065 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4066 else if (ainfo->storage == RegTypeFP) {
4067 g_assert_not_reached ();
4068 } else if (ainfo->storage == RegTypeBase) {
4072 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4073 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4075 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4076 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4080 g_assert_not_reached ();
4082 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4083 switch (ainfo->size) {
4090 g_assert (arm_is_imm12 (inst->inst_offset));
4091 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4092 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4093 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4096 if (arm_is_imm12 (inst->inst_offset)) {
4097 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4099 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4100 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4104 } else if (ainfo->storage == RegTypeBaseGen) {
4107 } else if (ainfo->storage == RegTypeBase) {
4109 } else if (ainfo->storage == RegTypeFP) {
4110 g_assert_not_reached ();
4111 } else if (ainfo->storage == RegTypeStructByVal) {
4112 int doffset = inst->inst_offset;
4116 if (mono_class_from_mono_type (inst->inst_vtype))
4117 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4118 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4119 if (arm_is_imm12 (doffset)) {
4120 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4122 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4123 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4125 soffset += sizeof (gpointer);
4126 doffset += sizeof (gpointer);
4131 } else if (ainfo->storage == RegTypeStructByAddr) {
4146 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4151 guint8 *code = cfg->native_code + cfg->code_len;
4152 MonoInst *last_ins = NULL;
4153 guint last_offset = 0;
4155 int imm8, rot_amount;
4157 /* we don't align basic blocks of loops on arm */
4159 if (cfg->verbose_level > 2)
4160 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4162 cpos = bb->max_offset;
4164 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4165 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4166 //g_assert (!mono_compile_aot);
4169 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4170 /* this is not thread save, but good enough */
4171 /* fixme: howto handle overflows? */
4172 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4175 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4176 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4177 (gpointer)"mono_break");
4178 code = emit_call_seq (cfg, code);
4181 MONO_BB_FOR_EACH_INS (bb, ins) {
4182 offset = code - cfg->native_code;
4184 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4186 if (offset > (cfg->code_size - max_len - 16)) {
4187 cfg->code_size *= 2;
4188 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4189 code = cfg->native_code + offset;
4191 // if (ins->cil_code)
4192 // g_print ("cil code\n");
4193 mono_debug_record_line_number (cfg, ins, offset);
4195 switch (ins->opcode) {
4196 case OP_MEMORY_BARRIER:
4198 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4199 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4203 #ifdef HAVE_AEABI_READ_TP
4204 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4205 (gpointer)"__aeabi_read_tp");
4206 code = emit_call_seq (cfg, code);
4208 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4210 g_assert_not_reached ();
4213 case OP_ATOMIC_EXCHANGE_I4:
4214 case OP_ATOMIC_CAS_I4:
4215 case OP_ATOMIC_ADD_I4: {
4219 g_assert (v7_supported);
4222 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4224 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4226 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4230 g_assert (cfg->arch.atomic_tmp_offset != -1);
4231 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4233 switch (ins->opcode) {
4234 case OP_ATOMIC_EXCHANGE_I4:
4236 ARM_DMB (code, ARM_DMB_SY);
4237 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4238 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4239 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4241 ARM_B_COND (code, ARMCOND_NE, 0);
4242 arm_patch (buf [1], buf [0]);
4244 case OP_ATOMIC_CAS_I4:
4245 ARM_DMB (code, ARM_DMB_SY);
4247 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4248 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4250 ARM_B_COND (code, ARMCOND_NE, 0);
4251 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4252 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4254 ARM_B_COND (code, ARMCOND_NE, 0);
4255 arm_patch (buf [2], buf [0]);
4256 arm_patch (buf [1], code);
4258 case OP_ATOMIC_ADD_I4:
4260 ARM_DMB (code, ARM_DMB_SY);
4261 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4262 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4263 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4264 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4266 ARM_B_COND (code, ARMCOND_NE, 0);
4267 arm_patch (buf [1], buf [0]);
4270 g_assert_not_reached ();
4273 ARM_DMB (code, ARM_DMB_SY);
4274 if (tmpreg != ins->dreg)
4275 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4276 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4279 case OP_ATOMIC_LOAD_I1:
4280 case OP_ATOMIC_LOAD_U1:
4281 case OP_ATOMIC_LOAD_I2:
4282 case OP_ATOMIC_LOAD_U2:
4283 case OP_ATOMIC_LOAD_I4:
4284 case OP_ATOMIC_LOAD_U4:
4285 case OP_ATOMIC_LOAD_R4:
4286 case OP_ATOMIC_LOAD_R8: {
4287 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4288 ARM_DMB (code, ARM_DMB_SY);
4290 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4292 switch (ins->opcode) {
4293 case OP_ATOMIC_LOAD_I1:
4294 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4296 case OP_ATOMIC_LOAD_U1:
4297 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4299 case OP_ATOMIC_LOAD_I2:
4300 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4302 case OP_ATOMIC_LOAD_U2:
4303 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4305 case OP_ATOMIC_LOAD_I4:
4306 case OP_ATOMIC_LOAD_U4:
4307 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4309 case OP_ATOMIC_LOAD_R4:
4310 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4311 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4312 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4313 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4315 case OP_ATOMIC_LOAD_R8:
4316 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4317 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4321 ARM_DMB (code, ARM_DMB_SY);
4324 case OP_ATOMIC_STORE_I1:
4325 case OP_ATOMIC_STORE_U1:
4326 case OP_ATOMIC_STORE_I2:
4327 case OP_ATOMIC_STORE_U2:
4328 case OP_ATOMIC_STORE_I4:
4329 case OP_ATOMIC_STORE_U4:
4330 case OP_ATOMIC_STORE_R4:
4331 case OP_ATOMIC_STORE_R8: {
4332 ARM_DMB (code, ARM_DMB_SY);
4334 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4336 switch (ins->opcode) {
4337 case OP_ATOMIC_STORE_I1:
4338 case OP_ATOMIC_STORE_U1:
4339 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4341 case OP_ATOMIC_STORE_I2:
4342 case OP_ATOMIC_STORE_U2:
4343 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4345 case OP_ATOMIC_STORE_I4:
4346 case OP_ATOMIC_STORE_U4:
4347 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4349 case OP_ATOMIC_STORE_R4:
4350 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4351 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4352 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
4353 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4355 case OP_ATOMIC_STORE_R8:
4356 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4357 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4361 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4362 ARM_DMB (code, ARM_DMB_SY);
4366 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4367 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4370 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4371 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4373 case OP_STOREI1_MEMBASE_IMM:
4374 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4375 g_assert (arm_is_imm12 (ins->inst_offset));
4376 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4378 case OP_STOREI2_MEMBASE_IMM:
4379 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4380 g_assert (arm_is_imm8 (ins->inst_offset));
4381 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4383 case OP_STORE_MEMBASE_IMM:
4384 case OP_STOREI4_MEMBASE_IMM:
4385 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4386 g_assert (arm_is_imm12 (ins->inst_offset));
4387 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4389 case OP_STOREI1_MEMBASE_REG:
4390 g_assert (arm_is_imm12 (ins->inst_offset));
4391 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4393 case OP_STOREI2_MEMBASE_REG:
4394 g_assert (arm_is_imm8 (ins->inst_offset));
4395 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4397 case OP_STORE_MEMBASE_REG:
4398 case OP_STOREI4_MEMBASE_REG:
4399 /* this case is special, since it happens for spill code after lowering has been called */
4400 if (arm_is_imm12 (ins->inst_offset)) {
4401 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4403 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4404 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4407 case OP_STOREI1_MEMINDEX:
4408 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4410 case OP_STOREI2_MEMINDEX:
4411 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4413 case OP_STORE_MEMINDEX:
4414 case OP_STOREI4_MEMINDEX:
4415 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4418 g_assert_not_reached ();
4420 case OP_LOAD_MEMINDEX:
4421 case OP_LOADI4_MEMINDEX:
4422 case OP_LOADU4_MEMINDEX:
4423 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4425 case OP_LOADI1_MEMINDEX:
4426 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4428 case OP_LOADU1_MEMINDEX:
4429 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4431 case OP_LOADI2_MEMINDEX:
4432 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4434 case OP_LOADU2_MEMINDEX:
4435 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4437 case OP_LOAD_MEMBASE:
4438 case OP_LOADI4_MEMBASE:
4439 case OP_LOADU4_MEMBASE:
4440 /* this case is special, since it happens for spill code after lowering has been called */
4441 if (arm_is_imm12 (ins->inst_offset)) {
4442 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4444 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4445 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4448 case OP_LOADI1_MEMBASE:
4449 g_assert (arm_is_imm8 (ins->inst_offset));
4450 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4452 case OP_LOADU1_MEMBASE:
4453 g_assert (arm_is_imm12 (ins->inst_offset));
4454 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4456 case OP_LOADU2_MEMBASE:
4457 g_assert (arm_is_imm8 (ins->inst_offset));
4458 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4460 case OP_LOADI2_MEMBASE:
4461 g_assert (arm_is_imm8 (ins->inst_offset));
4462 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4464 case OP_ICONV_TO_I1:
4465 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4466 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4468 case OP_ICONV_TO_I2:
4469 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4470 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4472 case OP_ICONV_TO_U1:
4473 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4475 case OP_ICONV_TO_U2:
4476 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4477 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4481 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4483 case OP_COMPARE_IMM:
4484 case OP_ICOMPARE_IMM:
4485 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4486 g_assert (imm8 >= 0);
4487 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4491 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4492 * So instead of emitting a trap, we emit a call a C function and place a
4495 //*(int*)code = 0xef9f0001;
4498 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4499 (gpointer)"mono_break");
4500 code = emit_call_seq (cfg, code);
4502 case OP_RELAXED_NOP:
4507 case OP_DUMMY_STORE:
4508 case OP_DUMMY_ICONST:
4509 case OP_DUMMY_R8CONST:
4510 case OP_NOT_REACHED:
4513 case OP_IL_SEQ_POINT:
4514 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4516 case OP_SEQ_POINT: {
4518 MonoInst *info_var = cfg->arch.seq_point_info_var;
4519 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4520 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4521 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4522 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4524 int dreg = ARMREG_LR;
4526 if (cfg->soft_breakpoints) {
4527 g_assert (!cfg->compile_aot);
4531 * For AOT, we use one got slot per method, which will point to a
4532 * SeqPointInfo structure, containing all the information required
4533 * by the code below.
4535 if (cfg->compile_aot) {
4536 g_assert (info_var);
4537 g_assert (info_var->opcode == OP_REGOFFSET);
4538 g_assert (arm_is_imm12 (info_var->inst_offset));
4541 if (!cfg->soft_breakpoints) {
4543 * Read from the single stepping trigger page. This will cause a
4544 * SIGSEGV when single stepping is enabled.
4545 * We do this _before_ the breakpoint, so single stepping after
4546 * a breakpoint is hit will step to the next IL offset.
4548 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4551 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4552 if (cfg->soft_breakpoints) {
4553 /* Load the address of the sequence point trigger variable. */
4556 g_assert (var->opcode == OP_REGOFFSET);
4557 g_assert (arm_is_imm12 (var->inst_offset));
4558 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4560 /* Read the value and check whether it is non-zero. */
4561 ARM_LDR_IMM (code, dreg, dreg, 0);
4562 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4564 /* Load the address of the sequence point method. */
4565 var = ss_method_var;
4567 g_assert (var->opcode == OP_REGOFFSET);
4568 g_assert (arm_is_imm12 (var->inst_offset));
4569 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4571 /* Call it conditionally. */
4572 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4574 if (cfg->compile_aot) {
4575 /* Load the trigger page addr from the variable initialized in the prolog */
4576 var = ss_trigger_page_var;
4578 g_assert (var->opcode == OP_REGOFFSET);
4579 g_assert (arm_is_imm12 (var->inst_offset));
4580 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4582 #ifdef USE_JUMP_TABLES
4583 gpointer *jte = mono_jumptable_add_entry ();
4584 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4585 jte [0] = ss_trigger_page;
4587 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4589 *(int*)code = (int)ss_trigger_page;
4593 ARM_LDR_IMM (code, dreg, dreg, 0);
4597 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4599 if (cfg->soft_breakpoints) {
4600 /* Load the address of the breakpoint method into ip. */
4601 var = bp_method_var;
4603 g_assert (var->opcode == OP_REGOFFSET);
4604 g_assert (arm_is_imm12 (var->inst_offset));
4605 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4608 * A placeholder for a possible breakpoint inserted by
4609 * mono_arch_set_breakpoint ().
4612 } else if (cfg->compile_aot) {
4613 guint32 offset = code - cfg->native_code;
4616 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4617 /* Add the offset */
4618 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4619 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4620 if (arm_is_imm12 ((int)val)) {
4621 ARM_LDR_IMM (code, dreg, dreg, val);
4623 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4625 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4627 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4628 g_assert (!(val & 0xFF000000));
4630 ARM_LDR_IMM (code, dreg, dreg, 0);
4632 /* What is faster, a branch or a load ? */
4633 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4634 /* The breakpoint instruction */
4635 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4638 * A placeholder for a possible breakpoint inserted by
4639 * mono_arch_set_breakpoint ().
4641 for (i = 0; i < 4; ++i)
4648 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4651 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4655 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4658 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4659 g_assert (imm8 >= 0);
4660 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4664 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4665 g_assert (imm8 >= 0);
4666 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4670 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4671 g_assert (imm8 >= 0);
4672 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4675 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4676 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4678 case OP_IADD_OVF_UN:
4679 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4680 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4683 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4684 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4686 case OP_ISUB_OVF_UN:
4687 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4688 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4690 case OP_ADD_OVF_CARRY:
4691 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4692 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4694 case OP_ADD_OVF_UN_CARRY:
4695 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4696 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4698 case OP_SUB_OVF_CARRY:
4699 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4700 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4702 case OP_SUB_OVF_UN_CARRY:
4703 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4704 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4708 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4711 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4712 g_assert (imm8 >= 0);
4713 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4716 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4720 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4724 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4725 g_assert (imm8 >= 0);
4726 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4730 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4731 g_assert (imm8 >= 0);
4732 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4734 case OP_ARM_RSBS_IMM:
4735 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4736 g_assert (imm8 >= 0);
4737 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4739 case OP_ARM_RSC_IMM:
4740 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4741 g_assert (imm8 >= 0);
4742 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4745 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4749 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4750 g_assert (imm8 >= 0);
4751 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4754 g_assert (v7s_supported);
4755 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4758 g_assert (v7s_supported);
4759 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4762 g_assert (v7s_supported);
4763 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4764 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4767 g_assert (v7s_supported);
4768 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4769 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4773 g_assert_not_reached ();
4775 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4779 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4780 g_assert (imm8 >= 0);
4781 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4784 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4788 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4789 g_assert (imm8 >= 0);
4790 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4793 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4798 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4799 else if (ins->dreg != ins->sreg1)
4800 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4803 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4808 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4809 else if (ins->dreg != ins->sreg1)
4810 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4813 case OP_ISHR_UN_IMM:
4815 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4816 else if (ins->dreg != ins->sreg1)
4817 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4820 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4823 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4826 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4829 if (ins->dreg == ins->sreg2)
4830 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4832 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4835 g_assert_not_reached ();
4838 /* FIXME: handle ovf/ sreg2 != dreg */
4839 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4840 /* FIXME: MUL doesn't set the C/O flags on ARM */
4842 case OP_IMUL_OVF_UN:
4843 /* FIXME: handle ovf/ sreg2 != dreg */
4844 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4845 /* FIXME: MUL doesn't set the C/O flags on ARM */
4848 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4851 /* Load the GOT offset */
4852 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4853 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4855 *(gpointer*)code = NULL;
4857 /* Load the value from the GOT */
4858 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4860 case OP_OBJC_GET_SELECTOR:
4861 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4862 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4864 *(gpointer*)code = NULL;
4866 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4868 case OP_ICONV_TO_I4:
4869 case OP_ICONV_TO_U4:
4871 if (ins->dreg != ins->sreg1)
4872 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4875 int saved = ins->sreg2;
4876 if (ins->sreg2 == ARM_LSW_REG) {
4877 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4880 if (ins->sreg1 != ARM_LSW_REG)
4881 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4882 if (saved != ARM_MSW_REG)
4883 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4887 if (IS_VFP && ins->dreg != ins->sreg1)
4888 ARM_CPYD (code, ins->dreg, ins->sreg1);
4890 case OP_MOVE_F_TO_I4:
4891 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4892 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4893 ARM_FMRS (code, ins->dreg, vfp_scratch1);
4894 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4896 case OP_MOVE_I4_TO_F:
4897 ARM_FMSR (code, ins->dreg, ins->sreg1);
4898 ARM_CVTS (code, ins->dreg, ins->dreg);
4900 case OP_FCONV_TO_R4:
4903 ARM_CVTD (code, ins->dreg, ins->sreg1);
4905 ARM_CVTD (code, ins->dreg, ins->sreg1);
4906 ARM_CVTS (code, ins->dreg, ins->dreg);
4912 * Keep in sync with mono_arch_emit_epilog
4914 g_assert (!cfg->method->save_lmf);
4916 code = emit_load_volatile_arguments (cfg, code);
4918 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4920 if (cfg->used_int_regs)
4921 ARM_POP (code, cfg->used_int_regs);
4922 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4924 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4926 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4927 if (cfg->compile_aot) {
4928 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4930 *(gpointer*)code = NULL;
4932 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4934 code = mono_arm_patchable_b (code, ARMCOND_AL);
4938 MonoCallInst *call = (MonoCallInst*)ins;
4941 * The stack looks like the following:
4942 * <caller argument area>
4945 * <callee argument area>
4946 * Need to copy the arguments from the callee argument area to
4947 * the caller argument area, and pop the frame.
4949 if (call->stack_usage) {
4950 int i, prev_sp_offset = 0;
4952 /* Compute size of saved registers restored below */
4954 prev_sp_offset = 2 * 4;
4956 prev_sp_offset = 1 * 4;
4957 for (i = 0; i < 16; ++i) {
4958 if (cfg->used_int_regs & (1 << i))
4959 prev_sp_offset += 4;
4962 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4964 /* Copy arguments on the stack to our argument area */
4965 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4966 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4967 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4972 * Keep in sync with mono_arch_emit_epilog
4974 g_assert (!cfg->method->save_lmf);
4976 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4978 if (cfg->used_int_regs)
4979 ARM_POP (code, cfg->used_int_regs);
4980 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4982 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4985 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4986 if (cfg->compile_aot) {
4987 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4989 *(gpointer*)code = NULL;
4991 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4993 code = mono_arm_patchable_b (code, ARMCOND_AL);
4998 /* ensure ins->sreg1 is not NULL */
4999 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
5002 g_assert (cfg->sig_cookie < 128);
5003 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5004 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
5014 call = (MonoCallInst*)ins;
5017 code = emit_float_args (cfg, call, code, &max_len, &offset);
5019 if (ins->flags & MONO_INST_HAS_METHOD)
5020 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
5022 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
5023 code = emit_call_seq (cfg, code);
5024 ins->flags |= MONO_INST_GC_CALLSITE;
5025 ins->backend.pc_offset = code - cfg->native_code;
5026 code = emit_move_return_value (cfg, ins, code);
5033 case OP_VOIDCALL_REG:
5036 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
5038 code = emit_call_reg (code, ins->sreg1);
5039 ins->flags |= MONO_INST_GC_CALLSITE;
5040 ins->backend.pc_offset = code - cfg->native_code;
5041 code = emit_move_return_value (cfg, ins, code);
5043 case OP_FCALL_MEMBASE:
5044 case OP_RCALL_MEMBASE:
5045 case OP_LCALL_MEMBASE:
5046 case OP_VCALL_MEMBASE:
5047 case OP_VCALL2_MEMBASE:
5048 case OP_VOIDCALL_MEMBASE:
5049 case OP_CALL_MEMBASE: {
5050 gboolean imt_arg = FALSE;
5052 g_assert (ins->sreg1 != ARMREG_LR);
5053 call = (MonoCallInst*)ins;
5056 code = emit_float_args (cfg, call, code, &max_len, &offset);
5058 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
5060 if (!arm_is_imm12 (ins->inst_offset))
5061 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
5062 #ifdef USE_JUMP_TABLES
5068 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
5070 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5072 if (!arm_is_imm12 (ins->inst_offset))
5073 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
5075 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5078 * We can't embed the method in the code stream in PIC code, or
5080 * Instead, we put it in V5 in code emitted by
5081 * mono_arch_emit_imt_argument (), and embed NULL here to
5082 * signal the IMT thunk that the value is in V5.
5084 #ifdef USE_JUMP_TABLES
5085 /* In case of jumptables we always use value in V5. */
5088 if (call->dynamic_imt_arg)
5089 *((gpointer*)code) = NULL;
5091 *((gpointer*)code) = (gpointer)call->method;
5095 ins->flags |= MONO_INST_GC_CALLSITE;
5096 ins->backend.pc_offset = code - cfg->native_code;
5097 code = emit_move_return_value (cfg, ins, code);
5101 /* round the size to 8 bytes */
5102 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
5103 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
5104 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5105 /* memzero the area: dreg holds the size, sp is the pointer */
5106 if (ins->flags & MONO_INST_INIT) {
5107 guint8 *start_loop, *branch_to_cond;
5108 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5109 branch_to_cond = code;
5112 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5113 arm_patch (branch_to_cond, code);
5114 /* decrement by 4 and set flags */
5115 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
5116 ARM_B_COND (code, ARMCOND_GE, 0);
5117 arm_patch (code - 4, start_loop);
5119 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5120 if (cfg->param_area)
5121 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5126 MonoInst *var = cfg->dyn_call_var;
5128 g_assert (var->opcode == OP_REGOFFSET);
5129 g_assert (arm_is_imm12 (var->inst_offset));
5131 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5132 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
5134 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
5136 /* Save args buffer */
5137 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5139 /* Set stack slots using R0 as scratch reg */
5140 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5141 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5142 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5143 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5146 /* Set argument registers */
5147 for (i = 0; i < PARAM_REGS; ++i)
5148 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5151 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5152 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5155 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5156 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5157 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5161 if (ins->sreg1 != ARMREG_R0)
5162 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5163 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5164 (gpointer)"mono_arch_throw_exception");
5165 code = emit_call_seq (cfg, code);
5169 if (ins->sreg1 != ARMREG_R0)
5170 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5171 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5172 (gpointer)"mono_arch_rethrow_exception");
5173 code = emit_call_seq (cfg, code);
5176 case OP_START_HANDLER: {
5177 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5180 /* Reserve a param area, see filter-stack.exe */
5181 if (cfg->param_area) {
5182 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5183 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5185 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5186 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5190 if (arm_is_imm12 (spvar->inst_offset)) {
5191 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5193 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5194 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5198 case OP_ENDFILTER: {
5199 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5202 /* Free the param area */
5203 if (cfg->param_area) {
5204 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5205 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5207 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5208 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5212 if (ins->sreg1 != ARMREG_R0)
5213 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5214 if (arm_is_imm12 (spvar->inst_offset)) {
5215 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5217 g_assert (ARMREG_IP != spvar->inst_basereg);
5218 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5219 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5221 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5224 case OP_ENDFINALLY: {
5225 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5228 /* Free the param area */
5229 if (cfg->param_area) {
5230 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5231 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5233 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5234 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5238 if (arm_is_imm12 (spvar->inst_offset)) {
5239 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5241 g_assert (ARMREG_IP != spvar->inst_basereg);
5242 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5243 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5245 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5248 case OP_CALL_HANDLER:
5249 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5250 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5251 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5254 ins->inst_c0 = code - cfg->native_code;
5257 /*if (ins->inst_target_bb->native_offset) {
5259 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5261 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5262 code = mono_arm_patchable_b (code, ARMCOND_AL);
5266 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5270 * In the normal case we have:
5271 * ldr pc, [pc, ins->sreg1 << 2]
5274 * ldr lr, [pc, ins->sreg1 << 2]
5276 * After follows the data.
5277 * FIXME: add aot support.
5279 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5280 #ifdef USE_JUMP_TABLES
5282 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5283 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5284 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5288 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5289 if (offset + max_len > (cfg->code_size - 16)) {
5290 cfg->code_size += max_len;
5291 cfg->code_size *= 2;
5292 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5293 code = cfg->native_code + offset;
5295 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5297 code += 4 * GPOINTER_TO_INT (ins->klass);
5302 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5303 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5307 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5308 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5312 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5313 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5317 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5318 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5322 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5323 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5326 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5327 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5330 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5331 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5334 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5335 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5338 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5339 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5342 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5343 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5345 case OP_COND_EXC_EQ:
5346 case OP_COND_EXC_NE_UN:
5347 case OP_COND_EXC_LT:
5348 case OP_COND_EXC_LT_UN:
5349 case OP_COND_EXC_GT:
5350 case OP_COND_EXC_GT_UN:
5351 case OP_COND_EXC_GE:
5352 case OP_COND_EXC_GE_UN:
5353 case OP_COND_EXC_LE:
5354 case OP_COND_EXC_LE_UN:
5355 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5357 case OP_COND_EXC_IEQ:
5358 case OP_COND_EXC_INE_UN:
5359 case OP_COND_EXC_ILT:
5360 case OP_COND_EXC_ILT_UN:
5361 case OP_COND_EXC_IGT:
5362 case OP_COND_EXC_IGT_UN:
5363 case OP_COND_EXC_IGE:
5364 case OP_COND_EXC_IGE_UN:
5365 case OP_COND_EXC_ILE:
5366 case OP_COND_EXC_ILE_UN:
5367 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5370 case OP_COND_EXC_IC:
5371 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5373 case OP_COND_EXC_OV:
5374 case OP_COND_EXC_IOV:
5375 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5377 case OP_COND_EXC_NC:
5378 case OP_COND_EXC_INC:
5379 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5381 case OP_COND_EXC_NO:
5382 case OP_COND_EXC_INO:
5383 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5395 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5398 /* floating point opcodes */
5400 if (cfg->compile_aot) {
5401 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5403 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5405 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5408 /* FIXME: we can optimize the imm load by dealing with part of
5409 * the displacement in LDFD (aligning to 512).
5411 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5412 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5416 if (cfg->compile_aot) {
5417 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5419 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5422 ARM_CVTS (code, ins->dreg, ins->dreg);
5424 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5425 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5427 ARM_CVTS (code, ins->dreg, ins->dreg);
5430 case OP_STORER8_MEMBASE_REG:
5431 /* This is generated by the local regalloc pass which runs after the lowering pass */
5432 if (!arm_is_fpimm8 (ins->inst_offset)) {
5433 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5434 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5435 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5437 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5440 case OP_LOADR8_MEMBASE:
5441 /* This is generated by the local regalloc pass which runs after the lowering pass */
5442 if (!arm_is_fpimm8 (ins->inst_offset)) {
5443 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5444 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5445 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5447 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5450 case OP_STORER4_MEMBASE_REG:
5451 g_assert (arm_is_fpimm8 (ins->inst_offset));
5453 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5455 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5456 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5457 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5458 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5461 case OP_LOADR4_MEMBASE:
5463 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5465 g_assert (arm_is_fpimm8 (ins->inst_offset));
5466 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5467 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5468 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5469 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5472 case OP_ICONV_TO_R_UN: {
5473 g_assert_not_reached ();
5476 case OP_ICONV_TO_R4:
5478 ARM_FMSR (code, ins->dreg, ins->sreg1);
5479 ARM_FSITOS (code, ins->dreg, ins->dreg);
5481 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5482 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5483 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5484 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5485 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5488 case OP_ICONV_TO_R8:
5489 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5490 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5491 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5492 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5496 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5497 if (sig_ret->type == MONO_TYPE_R4) {
5499 g_assert (!IS_HARD_FLOAT);
5500 ARM_FMRS (code, ARMREG_R0, ins->sreg1);
5502 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5505 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5509 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5511 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5515 case OP_FCONV_TO_I1:
5516 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5518 case OP_FCONV_TO_U1:
5519 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5521 case OP_FCONV_TO_I2:
5522 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5524 case OP_FCONV_TO_U2:
5525 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5527 case OP_FCONV_TO_I4:
5529 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5531 case OP_FCONV_TO_U4:
5533 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5535 case OP_FCONV_TO_I8:
5536 case OP_FCONV_TO_U8:
5537 g_assert_not_reached ();
5538 /* Implemented as helper calls */
5540 case OP_LCONV_TO_R_UN:
5541 g_assert_not_reached ();
5542 /* Implemented as helper calls */
5544 case OP_LCONV_TO_OVF_I4_2: {
5545 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5547 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5550 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5551 high_bit_not_set = code;
5552 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5554 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5555 valid_negative = code;
5556 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5557 invalid_negative = code;
5558 ARM_B_COND (code, ARMCOND_AL, 0);
5560 arm_patch (high_bit_not_set, code);
5562 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5563 valid_positive = code;
5564 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5566 arm_patch (invalid_negative, code);
5567 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5569 arm_patch (valid_negative, code);
5570 arm_patch (valid_positive, code);
5572 if (ins->dreg != ins->sreg1)
5573 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5577 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5580 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5583 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5586 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5589 ARM_NEGD (code, ins->dreg, ins->sreg1);
5593 g_assert_not_reached ();
5597 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5603 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5608 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5611 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5612 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5616 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5619 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5620 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5624 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5627 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5628 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5629 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5633 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5636 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5637 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5641 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5644 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5645 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5646 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5650 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5653 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5654 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5658 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5661 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5662 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5666 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5669 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5670 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5673 /* ARM FPA flags table:
5674 * N Less than ARMCOND_MI
5675 * Z Equal ARMCOND_EQ
5676 * C Greater Than or Equal ARMCOND_CS
5677 * V Unordered ARMCOND_VS
5680 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5683 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5686 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5689 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5690 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5696 g_assert_not_reached ();
5700 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5702 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5703 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5704 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5708 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5709 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5714 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5715 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5717 #ifdef USE_JUMP_TABLES
5719 gpointer *jte = mono_jumptable_add_entries (2);
5720 jte [0] = GUINT_TO_POINTER (0xffffffff);
5721 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5722 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5723 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5726 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5727 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5729 *(guint32*)code = 0xffffffff;
5731 *(guint32*)code = 0x7fefffff;
5734 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5736 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5737 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5739 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5740 ARM_CPYD (code, ins->dreg, ins->sreg1);
5742 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5743 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5748 case OP_RCONV_TO_I1:
5749 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5751 case OP_RCONV_TO_U1:
5752 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5754 case OP_RCONV_TO_I2:
5755 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5757 case OP_RCONV_TO_U2:
5758 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5760 case OP_RCONV_TO_I4:
5761 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5763 case OP_RCONV_TO_U4:
5764 code = emit_r4_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5766 case OP_RCONV_TO_R4:
5768 if (ins->dreg != ins->sreg1)
5769 ARM_CPYS (code, ins->dreg, ins->sreg1);
5771 case OP_RCONV_TO_R8:
5773 ARM_CVTS (code, ins->dreg, ins->sreg1);
5776 ARM_VFP_ADDS (code, ins->dreg, ins->sreg1, ins->sreg2);
5779 ARM_VFP_SUBS (code, ins->dreg, ins->sreg1, ins->sreg2);
5782 ARM_VFP_MULS (code, ins->dreg, ins->sreg1, ins->sreg2);
5785 ARM_VFP_DIVS (code, ins->dreg, ins->sreg1, ins->sreg2);
5788 ARM_NEGS (code, ins->dreg, ins->sreg1);
5792 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5795 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5796 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5800 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5803 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5804 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5808 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5811 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5812 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5813 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5817 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5820 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5821 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5825 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5828 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5829 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5830 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5834 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5837 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5838 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5842 ARM_CMPS (code, ins->sreg1, ins->sreg2);
5845 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5846 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5850 ARM_CMPS (code, ins->sreg2, ins->sreg1);
5853 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5854 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5857 case OP_GC_LIVENESS_DEF:
5858 case OP_GC_LIVENESS_USE:
5859 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5860 ins->backend.pc_offset = code - cfg->native_code;
5862 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5863 ins->backend.pc_offset = code - cfg->native_code;
5864 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5868 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5869 g_assert_not_reached ();
5872 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5873 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5874 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5875 g_assert_not_reached ();
5881 last_offset = offset;
5884 cfg->code_len = code - cfg->native_code;
5887 #endif /* DISABLE_JIT */
5889 #ifdef HAVE_AEABI_READ_TP
5890 void __aeabi_read_tp (void);
5894 mono_arch_register_lowlevel_calls (void)
5896 /* The signature doesn't matter */
5897 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5898 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5900 #ifndef MONO_CROSS_COMPILE
5901 #ifdef HAVE_AEABI_READ_TP
5902 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5907 #define patch_lis_ori(ip,val) do {\
5908 guint16 *__lis_ori = (guint16*)(ip); \
5909 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5910 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5914 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5916 MonoJumpInfo *patch_info;
5917 gboolean compile_aot = !run_cctors;
5919 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5920 unsigned char *ip = patch_info->ip.i + code;
5921 const unsigned char *target;
5923 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5924 #ifdef USE_JUMP_TABLES
5925 gpointer *jt = mono_jumptable_get_entry (ip);
5927 gpointer *jt = (gpointer*)(ip + 8);
5930 /* jt is the inlined jump table, 2 instructions after ip
5931 * In the normal case we store the absolute addresses,
5932 * otherwise the displacements.
5934 for (i = 0; i < patch_info->data.table->table_size; i++)
5935 jt [i] = code + (int)patch_info->data.table->table [i];
5940 switch (patch_info->type) {
5941 case MONO_PATCH_INFO_BB:
5942 case MONO_PATCH_INFO_LABEL:
5945 /* No need to patch these */
5950 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5952 switch (patch_info->type) {
5953 case MONO_PATCH_INFO_IP:
5954 g_assert_not_reached ();
5955 patch_lis_ori (ip, ip);
5957 case MONO_PATCH_INFO_METHOD_REL:
5958 g_assert_not_reached ();
5959 *((gpointer *)(ip)) = code + patch_info->data.offset;
5961 case MONO_PATCH_INFO_METHODCONST:
5962 case MONO_PATCH_INFO_CLASS:
5963 case MONO_PATCH_INFO_IMAGE:
5964 case MONO_PATCH_INFO_FIELD:
5965 case MONO_PATCH_INFO_VTABLE:
5966 case MONO_PATCH_INFO_IID:
5967 case MONO_PATCH_INFO_SFLDA:
5968 case MONO_PATCH_INFO_LDSTR:
5969 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5970 case MONO_PATCH_INFO_LDTOKEN:
5971 g_assert_not_reached ();
5972 /* from OP_AOTCONST : lis + ori */
5973 patch_lis_ori (ip, target);
5975 case MONO_PATCH_INFO_R4:
5976 case MONO_PATCH_INFO_R8:
5977 g_assert_not_reached ();
5978 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5980 case MONO_PATCH_INFO_EXC_NAME:
5981 g_assert_not_reached ();
5982 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5984 case MONO_PATCH_INFO_NONE:
5985 case MONO_PATCH_INFO_BB_OVF:
5986 case MONO_PATCH_INFO_EXC_OVF:
5987 /* everything is dealt with at epilog output time */
5992 arm_patch_general (domain, ip, target, dyn_code_mp);
5999 * Stack frame layout:
6001 * ------------------- fp
6002 * MonoLMF structure or saved registers
6003 * -------------------
6005 * -------------------
6007 * -------------------
6008 * optional 8 bytes for tracing
6009 * -------------------
6010 * param area size is cfg->param_area
6011 * ------------------- sp
6014 mono_arch_emit_prolog (MonoCompile *cfg)
6016 MonoMethod *method = cfg->method;
6018 MonoMethodSignature *sig;
6020 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
6025 int prev_sp_offset, reg_offset;
6027 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
6030 sig = mono_method_signature (method);
6031 cfg->code_size = 256 + sig->param_count * 64;
6032 code = cfg->native_code = g_malloc (cfg->code_size);
6034 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
6036 alloc_size = cfg->stack_offset;
6042 * The iphone uses R7 as the frame pointer, and it points at the saved
6047 * We can't use r7 as a frame pointer since it points into the middle of
6048 * the frame, so we keep using our own frame pointer.
6049 * FIXME: Optimize this.
6051 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
6052 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
6053 prev_sp_offset += 8; /* r7 and lr */
6054 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6055 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
6058 if (!method->save_lmf) {
6060 /* No need to push LR again */
6061 if (cfg->used_int_regs)
6062 ARM_PUSH (code, cfg->used_int_regs);
6064 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
6065 prev_sp_offset += 4;
6067 for (i = 0; i < 16; ++i) {
6068 if (cfg->used_int_regs & (1 << i))
6069 prev_sp_offset += 4;
6071 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6073 for (i = 0; i < 16; ++i) {
6074 if ((cfg->used_int_regs & (1 << i))) {
6075 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6076 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
6081 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6082 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6084 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
6085 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
6088 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
6089 ARM_PUSH (code, 0x5ff0);
6090 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
6091 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
6093 for (i = 0; i < 16; ++i) {
6094 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
6095 /* The original r7 is saved at the start */
6096 if (!(iphone_abi && i == ARMREG_R7))
6097 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
6101 g_assert (reg_offset == 4 * 10);
6102 pos += sizeof (MonoLMF) - (4 * 10);
6106 orig_alloc_size = alloc_size;
6107 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
6108 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
6109 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
6110 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
6113 /* the stack used in the pushed regs */
6114 if (prev_sp_offset & 4)
6116 cfg->stack_usage = alloc_size;
6118 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
6119 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
6121 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
6122 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
6124 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
6126 if (cfg->frame_reg != ARMREG_SP) {
6127 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
6128 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
6130 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
6131 prev_sp_offset += alloc_size;
6133 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
6134 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
6136 /* compute max_offset in order to use short forward jumps
6137 * we could skip do it on arm because the immediate displacement
6138 * for jumps is large enough, it may be useful later for constant pools
6141 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
6142 MonoInst *ins = bb->code;
6143 bb->max_offset = max_offset;
6145 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
6148 MONO_BB_FOR_EACH_INS (bb, ins)
6149 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
6152 /* store runtime generic context */
6153 if (cfg->rgctx_var) {
6154 MonoInst *ins = cfg->rgctx_var;
6156 g_assert (ins->opcode == OP_REGOFFSET);
6158 if (arm_is_imm12 (ins->inst_offset)) {
6159 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
6161 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6162 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
6166 /* load arguments allocated to register from the stack */
6169 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
6171 if (cinfo->vtype_retaddr) {
6172 ArgInfo *ainfo = &cinfo->ret;
6173 inst = cfg->vret_addr;
6174 g_assert (arm_is_imm12 (inst->inst_offset));
6175 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6178 if (sig->call_convention == MONO_CALL_VARARG) {
6179 ArgInfo *cookie = &cinfo->sig_cookie;
6181 /* Save the sig cookie address */
6182 g_assert (cookie->storage == RegTypeBase);
6184 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
6185 g_assert (arm_is_imm12 (cfg->sig_cookie));
6186 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
6187 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
6190 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
6191 ArgInfo *ainfo = cinfo->args + i;
6192 inst = cfg->args [pos];
6194 if (cfg->verbose_level > 2)
6195 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
6196 if (inst->opcode == OP_REGVAR) {
6197 if (ainfo->storage == RegTypeGeneral)
6198 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
6199 else if (ainfo->storage == RegTypeFP) {
6200 g_assert_not_reached ();
6201 } else if (ainfo->storage == RegTypeBase) {
6202 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6203 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6205 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6206 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6209 g_assert_not_reached ();
6211 if (cfg->verbose_level > 2)
6212 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
6214 /* the argument should be put on the stack: FIXME handle size != word */
6215 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
6216 switch (ainfo->size) {
6218 if (arm_is_imm12 (inst->inst_offset))
6219 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6221 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6222 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6226 if (arm_is_imm8 (inst->inst_offset)) {
6227 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6229 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6230 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6234 if (arm_is_imm12 (inst->inst_offset)) {
6235 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6237 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6238 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6240 if (arm_is_imm12 (inst->inst_offset + 4)) {
6241 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6243 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6244 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6248 if (arm_is_imm12 (inst->inst_offset)) {
6249 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6251 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6252 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6256 } else if (ainfo->storage == RegTypeBaseGen) {
6257 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6258 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6260 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6261 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6263 if (arm_is_imm12 (inst->inst_offset + 4)) {
6264 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6265 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6267 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6268 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6269 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6270 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6272 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
6273 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6274 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6276 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6277 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6280 switch (ainfo->size) {
6282 if (arm_is_imm8 (inst->inst_offset)) {
6283 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6285 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6286 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6290 if (arm_is_imm8 (inst->inst_offset)) {
6291 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6293 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6294 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6298 if (arm_is_imm12 (inst->inst_offset)) {
6299 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6301 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6302 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6304 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6305 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6307 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6308 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6310 if (arm_is_imm12 (inst->inst_offset + 4)) {
6311 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6313 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6314 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6318 if (arm_is_imm12 (inst->inst_offset)) {
6319 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6321 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6322 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6326 } else if (ainfo->storage == RegTypeFP) {
6327 int imm8, rot_amount;
6329 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6330 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6331 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6333 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6335 if (ainfo->size == 8)
6336 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6338 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6339 } else if (ainfo->storage == RegTypeStructByVal) {
6340 int doffset = inst->inst_offset;
6344 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6345 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6346 if (arm_is_imm12 (doffset)) {
6347 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6349 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6350 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6352 soffset += sizeof (gpointer);
6353 doffset += sizeof (gpointer);
6355 if (ainfo->vtsize) {
6356 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6357 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6358 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6360 } else if (ainfo->storage == RegTypeStructByAddr) {
6361 g_assert_not_reached ();
6362 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6363 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6365 g_assert_not_reached ();
6370 if (method->save_lmf)
6371 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6374 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6376 if (cfg->arch.seq_point_info_var) {
6377 MonoInst *ins = cfg->arch.seq_point_info_var;
6379 /* Initialize the variable from a GOT slot */
6380 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6381 #ifdef USE_JUMP_TABLES
6383 gpointer *jte = mono_jumptable_add_entry ();
6384 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6385 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6387 /** XXX: is it correct? */
6389 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6391 *(gpointer*)code = NULL;
6394 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6396 g_assert (ins->opcode == OP_REGOFFSET);
6398 if (arm_is_imm12 (ins->inst_offset)) {
6399 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6401 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6402 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6406 /* Initialize ss_trigger_page_var */
6407 if (!cfg->soft_breakpoints) {
6408 MonoInst *info_var = cfg->arch.seq_point_info_var;
6409 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6410 int dreg = ARMREG_LR;
6413 g_assert (info_var->opcode == OP_REGOFFSET);
6414 g_assert (arm_is_imm12 (info_var->inst_offset));
6416 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6417 /* Load the trigger page addr */
6418 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6419 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6423 if (cfg->arch.seq_point_read_var) {
6424 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6425 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6426 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6427 #ifdef USE_JUMP_TABLES
6430 g_assert (read_ins->opcode == OP_REGOFFSET);
6431 g_assert (arm_is_imm12 (read_ins->inst_offset));
6432 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6433 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6434 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6435 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6437 #ifdef USE_JUMP_TABLES
6438 jte = mono_jumptable_add_entries (3);
6439 jte [0] = (gpointer)&ss_trigger_var;
6440 jte [1] = single_step_func_wrapper;
6441 jte [2] = breakpoint_func_wrapper;
6442 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6444 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6446 *(volatile int **)code = &ss_trigger_var;
6448 *(gpointer*)code = single_step_func_wrapper;
6450 *(gpointer*)code = breakpoint_func_wrapper;
6454 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6455 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6456 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6457 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6458 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6459 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6462 cfg->code_len = code - cfg->native_code;
6463 g_assert (cfg->code_len < cfg->code_size);
6470 mono_arch_emit_epilog (MonoCompile *cfg)
6472 MonoMethod *method = cfg->method;
6473 int pos, i, rot_amount;
6474 int max_epilog_size = 16 + 20*4;
6478 if (cfg->method->save_lmf)
6479 max_epilog_size += 128;
6481 if (mono_jit_trace_calls != NULL)
6482 max_epilog_size += 50;
6484 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6485 max_epilog_size += 50;
6487 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6488 cfg->code_size *= 2;
6489 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6490 cfg->stat_code_reallocs++;
6494 * Keep in sync with OP_JMP
6496 code = cfg->native_code + cfg->code_len;
6498 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6499 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6503 /* Load returned vtypes into registers if needed */
6504 cinfo = cfg->arch.cinfo;
6505 if (cinfo->ret.storage == RegTypeStructByVal) {
6506 MonoInst *ins = cfg->ret;
6508 if (arm_is_imm12 (ins->inst_offset)) {
6509 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6511 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6512 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6516 if (method->save_lmf) {
6517 int lmf_offset, reg, sp_adj, regmask;
6518 /* all but r0-r3, sp and pc */
6519 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6522 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6524 /* This points to r4 inside MonoLMF->iregs */
6525 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6527 regmask = 0x9ff0; /* restore lr to pc */
6528 /* Skip caller saved registers not used by the method */
6529 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6530 regmask &= ~(1 << reg);
6535 /* Restored later */
6536 regmask &= ~(1 << ARMREG_PC);
6537 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6538 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6540 ARM_POP (code, regmask);
6542 /* Restore saved r7, restore LR to PC */
6543 /* Skip lr from the lmf */
6544 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6545 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6548 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6549 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6551 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6552 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6556 /* Restore saved gregs */
6557 if (cfg->used_int_regs)
6558 ARM_POP (code, cfg->used_int_regs);
6559 /* Restore saved r7, restore LR to PC */
6560 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6562 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6566 cfg->code_len = code - cfg->native_code;
6568 g_assert (cfg->code_len < cfg->code_size);
6573 mono_arch_emit_exceptions (MonoCompile *cfg)
6575 MonoJumpInfo *patch_info;
6578 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6579 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6580 int max_epilog_size = 50;
6582 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6583 exc_throw_pos [i] = NULL;
6584 exc_throw_found [i] = 0;
6587 /* count the number of exception infos */
6590 * make sure we have enough space for exceptions
6592 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6593 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6594 i = mini_exception_id_by_name (patch_info->data.target);
6595 if (!exc_throw_found [i]) {
6596 max_epilog_size += 32;
6597 exc_throw_found [i] = TRUE;
6602 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6603 cfg->code_size *= 2;
6604 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6605 cfg->stat_code_reallocs++;
6608 code = cfg->native_code + cfg->code_len;
6610 /* add code to raise exceptions */
6611 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6612 switch (patch_info->type) {
6613 case MONO_PATCH_INFO_EXC: {
6614 MonoClass *exc_class;
6615 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6617 i = mini_exception_id_by_name (patch_info->data.target);
6618 if (exc_throw_pos [i]) {
6619 arm_patch (ip, exc_throw_pos [i]);
6620 patch_info->type = MONO_PATCH_INFO_NONE;
6623 exc_throw_pos [i] = code;
6625 arm_patch (ip, code);
6627 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6628 g_assert (exc_class);
6630 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6631 #ifdef USE_JUMP_TABLES
6633 gpointer *jte = mono_jumptable_add_entries (2);
6634 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6635 patch_info->data.name = "mono_arch_throw_corlib_exception";
6636 patch_info->ip.i = code - cfg->native_code;
6637 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6638 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6639 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6640 ARM_BLX_REG (code, ARMREG_IP);
6641 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6644 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6645 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6646 patch_info->data.name = "mono_arch_throw_corlib_exception";
6647 patch_info->ip.i = code - cfg->native_code;
6649 *(guint32*)(gpointer)code = exc_class->type_token;
6660 cfg->code_len = code - cfg->native_code;
6662 g_assert (cfg->code_len < cfg->code_size);
6666 #endif /* #ifndef DISABLE_JIT */
6669 mono_arch_finish_init (void)
6674 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6679 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6686 mono_arch_print_tree (MonoInst *tree, int arity)
6696 mono_arch_get_patch_offset (guint8 *code)
6703 mono_arch_flush_register_windows (void)
6710 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6712 int method_reg = mono_alloc_ireg (cfg);
6713 #ifdef USE_JUMP_TABLES
6714 int use_jumptables = TRUE;
6716 int use_jumptables = FALSE;
6719 if (cfg->compile_aot) {
6722 call->dynamic_imt_arg = TRUE;
6725 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6727 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6728 ins->dreg = method_reg;
6729 ins->inst_p0 = call->method;
6730 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6731 MONO_ADD_INS (cfg->cbb, ins);
6733 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6734 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6735 /* Always pass in a register for simplicity */
6736 call->dynamic_imt_arg = TRUE;
6738 cfg->uses_rgctx_reg = TRUE;
6741 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6745 MONO_INST_NEW (cfg, ins, OP_PCONST);
6746 ins->inst_p0 = call->method;
6747 ins->dreg = method_reg;
6748 MONO_ADD_INS (cfg->cbb, ins);
6751 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6755 #endif /* DISABLE_JIT */
6758 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6760 #ifdef USE_JUMP_TABLES
6761 return (MonoMethod*)regs [ARMREG_V5];
6764 guint32 *code_ptr = (guint32*)code;
6766 method = GUINT_TO_POINTER (code_ptr [1]);
6770 return (MonoMethod*)regs [ARMREG_V5];
6772 /* The IMT value is stored in the code stream right after the LDC instruction. */
6773 /* This is no longer true for the gsharedvt_in trampoline */
6775 if (!IS_LDR_PC (code_ptr [0])) {
6776 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6777 g_assert (IS_LDR_PC (code_ptr [0]));
6781 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6782 return (MonoMethod*)regs [ARMREG_V5];
6784 return (MonoMethod*) method;
6789 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6791 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6794 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6795 #define BASE_SIZE (6 * 4)
6796 #define BSEARCH_ENTRY_SIZE (4 * 4)
6797 #define CMP_SIZE (3 * 4)
6798 #define BRANCH_SIZE (1 * 4)
6799 #define CALL_SIZE (2 * 4)
6800 #define WMC_SIZE (8 * 4)
6801 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6803 #ifdef USE_JUMP_TABLES
6805 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6807 g_assert (base [index] == NULL);
6808 base [index] = value;
6811 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6813 if (arm_is_imm12 (jti * 4)) {
6814 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6816 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6817 if ((jti * 4) >> 16)
6818 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6819 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6825 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6827 guint32 delta = DISTANCE (target, code);
6829 g_assert (delta >= 0 && delta <= 0xFFF);
6830 *target = *target | delta;
6836 #ifdef ENABLE_WRONG_METHOD_CHECK
6838 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6840 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6846 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6847 gpointer fail_tramp)
6850 arminstr_t *code, *start;
6851 #ifdef USE_JUMP_TABLES
6854 gboolean large_offsets = FALSE;
6855 guint32 **constant_pool_starts;
6856 arminstr_t *vtable_target = NULL;
6857 int extra_space = 0;
6859 #ifdef ENABLE_WRONG_METHOD_CHECK
6864 #ifdef USE_JUMP_TABLES
6865 for (i = 0; i < count; ++i) {
6866 MonoIMTCheckItem *item = imt_entries [i];
6867 item->chunk_size += 4 * 16;
6868 if (!item->is_equals)
6869 imt_entries [item->check_target_idx]->compare_done = TRUE;
6870 size += item->chunk_size;
6873 constant_pool_starts = g_new0 (guint32*, count);
6875 for (i = 0; i < count; ++i) {
6876 MonoIMTCheckItem *item = imt_entries [i];
6877 if (item->is_equals) {
6878 gboolean fail_case = !item->check_target_idx && fail_tramp;
6880 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6881 item->chunk_size += 32;
6882 large_offsets = TRUE;
6885 if (item->check_target_idx || fail_case) {
6886 if (!item->compare_done || fail_case)
6887 item->chunk_size += CMP_SIZE;
6888 item->chunk_size += BRANCH_SIZE;
6890 #ifdef ENABLE_WRONG_METHOD_CHECK
6891 item->chunk_size += WMC_SIZE;
6895 item->chunk_size += 16;
6896 large_offsets = TRUE;
6898 item->chunk_size += CALL_SIZE;
6900 item->chunk_size += BSEARCH_ENTRY_SIZE;
6901 imt_entries [item->check_target_idx]->compare_done = TRUE;
6903 size += item->chunk_size;
6907 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6911 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6913 code = mono_domain_code_reserve (domain, size);
6917 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6918 for (i = 0; i < count; ++i) {
6919 MonoIMTCheckItem *item = imt_entries [i];
6920 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6924 #ifdef USE_JUMP_TABLES
6925 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6926 /* If jumptables we always pass the IMT method in R5 */
6927 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6928 #define VTABLE_JTI 0
6929 #define IMT_METHOD_OFFSET 0
6930 #define TARGET_CODE_OFFSET 1
6931 #define JUMP_CODE_OFFSET 2
6932 #define RECORDS_PER_ENTRY 3
6933 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6934 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6935 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6937 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6938 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6939 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6940 set_jumptable_element (jte, VTABLE_JTI, vtable);
6943 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6945 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6946 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6947 vtable_target = code;
6948 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6950 if (mono_use_llvm) {
6951 /* LLVM always passes the IMT method in R5 */
6952 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6954 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6955 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6956 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6960 for (i = 0; i < count; ++i) {
6961 MonoIMTCheckItem *item = imt_entries [i];
6962 #ifdef USE_JUMP_TABLES
6963 guint32 imt_method_jti = 0, target_code_jti = 0;
6965 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6967 gint32 vtable_offset;
6969 item->code_target = (guint8*)code;
6971 if (item->is_equals) {
6972 gboolean fail_case = !item->check_target_idx && fail_tramp;
6974 if (item->check_target_idx || fail_case) {
6975 if (!item->compare_done || fail_case) {
6976 #ifdef USE_JUMP_TABLES
6977 imt_method_jti = IMT_METHOD_JTI (i);
6978 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6981 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6983 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6985 #ifdef USE_JUMP_TABLES
6986 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6987 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6988 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6990 item->jmp_code = (guint8*)code;
6991 ARM_B_COND (code, ARMCOND_NE, 0);
6994 /*Enable the commented code to assert on wrong method*/
6995 #ifdef ENABLE_WRONG_METHOD_CHECK
6996 #ifdef USE_JUMP_TABLES
6997 imt_method_jti = IMT_METHOD_JTI (i);
6998 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
7001 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7003 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7005 ARM_B_COND (code, ARMCOND_EQ, 0);
7007 /* Define this if your system is so bad that gdb is failing. */
7008 #ifdef BROKEN_DEV_ENV
7009 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
7011 arm_patch (code - 1, mini_dump_bad_imt);
7015 arm_patch (cond, code);
7019 if (item->has_target_code) {
7020 /* Load target address */
7021 #ifdef USE_JUMP_TABLES
7022 target_code_jti = TARGET_CODE_JTI (i);
7023 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
7024 /* Restore registers */
7025 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
7027 ARM_BX (code, ARMREG_R1);
7028 set_jumptable_element (jte, target_code_jti, item->value.target_code);
7030 target_code_ins = code;
7031 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7032 /* Save it to the fourth slot */
7033 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
7034 /* Restore registers and branch */
7035 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7037 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
7040 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
7041 if (!arm_is_imm12 (vtable_offset)) {
7043 * We need to branch to a computed address but we don't have
7044 * a free register to store it, since IP must contain the
7045 * vtable address. So we push the two values to the stack, and
7046 * load them both using LDM.
7048 /* Compute target address */
7049 #ifdef USE_JUMP_TABLES
7050 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
7051 if (vtable_offset >> 16)
7052 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
7053 /* IP had vtable base. */
7054 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
7055 /* Restore registers and branch */
7056 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
7057 ARM_BX (code, ARMREG_IP);
7059 vtable_offset_ins = code;
7060 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7061 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
7062 /* Save it to the fourth slot */
7063 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
7064 /* Restore registers and branch */
7065 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7067 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
7070 #ifdef USE_JUMP_TABLES
7071 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
7072 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
7073 ARM_BX (code, ARMREG_IP);
7075 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
7077 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
7078 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
7084 #ifdef USE_JUMP_TABLES
7085 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
7086 target_code_jti = TARGET_CODE_JTI (i);
7087 /* Load target address */
7088 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
7089 /* Restore registers */
7090 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
7092 ARM_BX (code, ARMREG_R1);
7093 set_jumptable_element (jte, target_code_jti, fail_tramp);
7095 arm_patch (item->jmp_code, (guchar*)code);
7097 target_code_ins = code;
7098 /* Load target address */
7099 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7100 /* Save it to the fourth slot */
7101 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
7102 /* Restore registers and branch */
7103 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
7105 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
7107 item->jmp_code = NULL;
7110 #ifdef USE_JUMP_TABLES
7112 set_jumptable_element (jte, imt_method_jti, item->key);
7115 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
7117 /*must emit after unconditional branch*/
7118 if (vtable_target) {
7119 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
7120 item->chunk_size += 4;
7121 vtable_target = NULL;
7124 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
7125 constant_pool_starts [i] = code;
7127 code += extra_space;
7132 #ifdef USE_JUMP_TABLES
7133 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
7134 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7135 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
7136 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
7137 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
7139 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
7140 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
7142 item->jmp_code = (guint8*)code;
7143 ARM_B_COND (code, ARMCOND_HS, 0);
7149 for (i = 0; i < count; ++i) {
7150 MonoIMTCheckItem *item = imt_entries [i];
7151 if (item->jmp_code) {
7152 if (item->check_target_idx)
7153 #ifdef USE_JUMP_TABLES
7154 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
7156 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
7159 if (i > 0 && item->is_equals) {
7161 #ifdef USE_JUMP_TABLES
7162 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
7163 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
7165 arminstr_t *space_start = constant_pool_starts [i];
7166 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
7167 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
7175 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
7176 mono_disassemble_code (NULL, (guint8*)start, size, buff);
7181 #ifndef USE_JUMP_TABLES
7182 g_free (constant_pool_starts);
7185 mono_arch_flush_icache ((guint8*)start, size);
7186 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
7187 mono_stats.imt_thunks_size += code - start;
7189 g_assert (DISTANCE (start, code) <= size);
7194 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
7196 return ctx->regs [reg];
7200 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
7202 ctx->regs [reg] = val;
7206 * mono_arch_get_trampolines:
7208 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7212 mono_arch_get_trampolines (gboolean aot)
7214 return mono_arm_get_exception_trampolines (aot);
7218 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
7225 bp = MONO_CONTEXT_GET_BP (ctx);
7226 lr_loc = (gpointer*)(bp + clause->exvar_offset);
7228 old_value = *lr_loc;
7229 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
7232 *lr_loc = new_value;
7237 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7239 * mono_arch_set_breakpoint:
7241 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7242 * The location should contain code emitted by OP_SEQ_POINT.
7245 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7248 guint32 native_offset = ip - (guint8*)ji->code_start;
7249 MonoDebugOptions *opt = mini_get_debug_options ();
7251 if (opt->soft_breakpoints) {
7252 g_assert (!ji->from_aot);
7254 ARM_BLX_REG (code, ARMREG_LR);
7255 mono_arch_flush_icache (code - 4, 4);
7256 } else if (ji->from_aot) {
7257 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7259 g_assert (native_offset % 4 == 0);
7260 g_assert (info->bp_addrs [native_offset / 4] == 0);
7261 info->bp_addrs [native_offset / 4] = bp_trigger_page;
7263 int dreg = ARMREG_LR;
7265 /* Read from another trigger page */
7266 #ifdef USE_JUMP_TABLES
7267 gpointer *jte = mono_jumptable_add_entry ();
7268 code = mono_arm_load_jumptable_entry (code, jte, dreg);
7269 jte [0] = bp_trigger_page;
7271 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7273 *(int*)code = (int)bp_trigger_page;
7276 ARM_LDR_IMM (code, dreg, dreg, 0);
7278 mono_arch_flush_icache (code - 16, 16);
7281 /* This is currently implemented by emitting an SWI instruction, which
7282 * qemu/linux seems to convert to a SIGILL.
7284 *(int*)code = (0xef << 24) | 8;
7286 mono_arch_flush_icache (code - 4, 4);
7292 * mono_arch_clear_breakpoint:
7294 * Clear the breakpoint at IP.
7297 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7299 MonoDebugOptions *opt = mini_get_debug_options ();
7303 if (opt->soft_breakpoints) {
7304 g_assert (!ji->from_aot);
7307 mono_arch_flush_icache (code - 4, 4);
7308 } else if (ji->from_aot) {
7309 guint32 native_offset = ip - (guint8*)ji->code_start;
7310 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7312 g_assert (native_offset % 4 == 0);
7313 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7314 info->bp_addrs [native_offset / 4] = 0;
7316 for (i = 0; i < 4; ++i)
7319 mono_arch_flush_icache (ip, code - ip);
7324 * mono_arch_start_single_stepping:
7326 * Start single stepping.
7329 mono_arch_start_single_stepping (void)
7331 if (ss_trigger_page)
7332 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7338 * mono_arch_stop_single_stepping:
7340 * Stop single stepping.
7343 mono_arch_stop_single_stepping (void)
7345 if (ss_trigger_page)
7346 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7352 #define DBG_SIGNAL SIGBUS
7354 #define DBG_SIGNAL SIGSEGV
7358 * mono_arch_is_single_step_event:
7360 * Return whenever the machine state in SIGCTX corresponds to a single
7364 mono_arch_is_single_step_event (void *info, void *sigctx)
7366 siginfo_t *sinfo = info;
7368 if (!ss_trigger_page)
7371 /* Sometimes the address is off by 4 */
7372 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7379 * mono_arch_is_breakpoint_event:
7381 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7384 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7386 siginfo_t *sinfo = info;
7388 if (!ss_trigger_page)
7391 if (sinfo->si_signo == DBG_SIGNAL) {
7392 /* Sometimes the address is off by 4 */
7393 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7403 * mono_arch_skip_breakpoint:
7405 * See mini-amd64.c for docs.
7408 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7410 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7414 * mono_arch_skip_single_step:
7416 * See mini-amd64.c for docs.
7419 mono_arch_skip_single_step (MonoContext *ctx)
7421 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7424 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7427 * mono_arch_get_seq_point_info:
7429 * See mini-amd64.c for docs.
7432 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7437 // FIXME: Add a free function
7439 mono_domain_lock (domain);
7440 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7442 mono_domain_unlock (domain);
7445 ji = mono_jit_info_table_find (domain, (char*)code);
7448 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7450 info->ss_trigger_page = ss_trigger_page;
7451 info->bp_trigger_page = bp_trigger_page;
7453 mono_domain_lock (domain);
7454 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7456 mono_domain_unlock (domain);
7463 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7465 ext->lmf.previous_lmf = prev_lmf;
7466 /* Mark that this is a MonoLMFExt */
7467 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7468 ext->lmf.sp = (gssize)ext;
7472 * mono_arch_set_target:
7474 * Set the target architecture the JIT backend should generate code for, in the form
7475 * of a GNU target triplet. Only used in AOT mode.
7478 mono_arch_set_target (char *mtriple)
7480 /* The GNU target triple format is not very well documented */
7481 if (strstr (mtriple, "armv7")) {
7482 v5_supported = TRUE;
7483 v6_supported = TRUE;
7484 v7_supported = TRUE;
7486 if (strstr (mtriple, "armv6")) {
7487 v5_supported = TRUE;
7488 v6_supported = TRUE;
7490 if (strstr (mtriple, "armv7s")) {
7491 v7s_supported = TRUE;
7493 if (strstr (mtriple, "thumbv7s")) {
7494 v5_supported = TRUE;
7495 v6_supported = TRUE;
7496 v7_supported = TRUE;
7497 v7s_supported = TRUE;
7498 thumb_supported = TRUE;
7499 thumb2_supported = TRUE;
7501 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7502 v5_supported = TRUE;
7503 v6_supported = TRUE;
7504 thumb_supported = TRUE;
7507 if (strstr (mtriple, "gnueabi"))
7508 eabi_supported = TRUE;
7512 mono_arch_opcode_supported (int opcode)
7515 case OP_ATOMIC_ADD_I4:
7516 case OP_ATOMIC_EXCHANGE_I4:
7517 case OP_ATOMIC_CAS_I4:
7518 case OP_ATOMIC_LOAD_I1:
7519 case OP_ATOMIC_LOAD_I2:
7520 case OP_ATOMIC_LOAD_I4:
7521 case OP_ATOMIC_LOAD_U1:
7522 case OP_ATOMIC_LOAD_U2:
7523 case OP_ATOMIC_LOAD_U4:
7524 case OP_ATOMIC_STORE_I1:
7525 case OP_ATOMIC_STORE_I2:
7526 case OP_ATOMIC_STORE_I4:
7527 case OP_ATOMIC_STORE_U1:
7528 case OP_ATOMIC_STORE_U2:
7529 case OP_ATOMIC_STORE_U4:
7530 return v7_supported;
7531 case OP_ATOMIC_LOAD_R4:
7532 case OP_ATOMIC_LOAD_R8:
7533 case OP_ATOMIC_STORE_R4:
7534 case OP_ATOMIC_STORE_R8:
7535 return v7_supported && IS_VFP;
7541 #if defined(ENABLE_GSHAREDVT)
7543 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7545 #endif /* !MONOTOUCH */