2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/profiler-private.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-mmap.h>
20 #include <mono/utils/mono-hwcap-arm.h>
21 #include <mono/utils/mono-memory-model.h>
27 #include "debugger-agent.h"
29 #include "mono/arch/arm/arm-vfp-codegen.h"
31 /* Sanity check: This makes no sense */
32 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
33 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
37 * IS_SOFT_FLOAT: Is full software floating point used?
38 * IS_HARD_FLOAT: Is full hardware floating point used?
39 * IS_VFP: Is hardware floating point with software ABI used?
41 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
42 * IS_VFP may delegate to mono_arch_is_soft_float ().
45 #if defined(ARM_FPU_VFP_HARD)
46 #define IS_SOFT_FLOAT (FALSE)
47 #define IS_HARD_FLOAT (TRUE)
49 #elif defined(ARM_FPU_NONE)
50 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
51 #define IS_HARD_FLOAT (FALSE)
52 #define IS_VFP (!mono_arch_is_soft_float ())
54 #define IS_SOFT_FLOAT (FALSE)
55 #define IS_HARD_FLOAT (FALSE)
59 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
60 #define HAVE_AEABI_READ_TP 1
63 #ifdef __native_client_codegen__
64 const guint kNaClAlignment = kNaClAlignmentARM;
65 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
66 gint8 nacl_align_byte = -1; /* 0xff */
69 mono_arch_nacl_pad (guint8 *code, int pad)
71 /* Not yet properly implemented. */
72 g_assert_not_reached ();
77 mono_arch_nacl_skip_nops (guint8 *code)
79 /* Not yet properly implemented. */
80 g_assert_not_reached ();
84 #endif /* __native_client_codegen__ */
86 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
89 void sys_icache_invalidate (void *start, size_t len);
92 /* This mutex protects architecture specific caches */
93 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
94 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
95 static mono_mutex_t mini_arch_mutex;
97 static gboolean v5_supported = FALSE;
98 static gboolean v6_supported = FALSE;
99 static gboolean v7_supported = FALSE;
100 static gboolean v7s_supported = FALSE;
101 static gboolean thumb_supported = FALSE;
102 static gboolean thumb2_supported = FALSE;
104 * Whenever to use the ARM EABI
106 static gboolean eabi_supported = FALSE;
109 * Whenever to use the iphone ABI extensions:
110 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
111 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
112 * This is required for debugging/profiling tools to work, but it has some overhead so it should
113 * only be turned on in debug builds.
115 static gboolean iphone_abi = FALSE;
118 * The FPU we are generating code for. This is NOT runtime configurable right now,
119 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
121 static MonoArmFPU arm_fpu;
123 #if defined(ARM_FPU_VFP_HARD)
125 * On armhf, d0-d7 are used for argument passing and d8-d15
126 * must be preserved across calls, which leaves us no room
127 * for scratch registers. So we use d14-d15 but back up their
128 * previous contents to a stack slot before using them - see
129 * mono_arm_emit_vfp_scratch_save/_restore ().
131 static int vfp_scratch1 = ARM_VFP_D14;
132 static int vfp_scratch2 = ARM_VFP_D15;
135 * On armel, d0-d7 do not need to be preserved, so we can
136 * freely make use of them as scratch registers.
138 static int vfp_scratch1 = ARM_VFP_D0;
139 static int vfp_scratch2 = ARM_VFP_D1;
144 static volatile int ss_trigger_var = 0;
146 static gpointer single_step_func_wrapper;
147 static gpointer breakpoint_func_wrapper;
150 * The code generated for sequence points reads from this location, which is
151 * made read-only when single stepping is enabled.
153 static gpointer ss_trigger_page;
155 /* Enabled breakpoints read from this trigger page */
156 static gpointer bp_trigger_page;
160 * floating point support: on ARM it is a mess, there are at least 3
161 * different setups, each of which binary incompat with the other.
162 * 1) FPA: old and ugly, but unfortunately what current distros use
163 * the double binary format has the two words swapped. 8 double registers.
164 * Implemented usually by kernel emulation.
165 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
166 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
167 * 3) VFP: the new and actually sensible and useful FP support. Implemented
168 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
170 * We do not care about FPA. We will support soft float and VFP.
172 int mono_exc_esp_offset = 0;
174 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
175 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
176 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
178 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
179 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
180 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
182 //#define DEBUG_IMT 0
185 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
189 mono_arch_regname (int reg)
191 static const char * rnames[] = {
192 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
193 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
194 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
197 if (reg >= 0 && reg < 16)
203 mono_arch_fregname (int reg)
205 static const char * rnames[] = {
206 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
207 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
208 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
209 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
210 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
211 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
214 if (reg >= 0 && reg < 32)
222 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
224 int imm8, rot_amount;
225 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
226 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
230 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
231 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
233 code = mono_arm_emit_load_imm (code, dreg, imm);
234 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
239 /* If dreg == sreg, this clobbers IP */
241 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
243 int imm8, rot_amount;
244 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
245 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
249 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
250 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
252 code = mono_arm_emit_load_imm (code, dreg, imm);
253 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
259 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
261 /* we can use r0-r3, since this is called only for incoming args on the stack */
262 if (size > sizeof (gpointer) * 4) {
264 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
265 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
266 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
267 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
268 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
269 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
270 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
271 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
272 ARM_B_COND (code, ARMCOND_NE, 0);
273 arm_patch (code - 4, start_loop);
276 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
277 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
279 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
280 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
286 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
287 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
288 doffset = soffset = 0;
290 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
291 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
297 g_assert (size == 0);
302 emit_call_reg (guint8 *code, int reg)
305 ARM_BLX_REG (code, reg);
307 #ifdef USE_JUMP_TABLES
308 g_assert_not_reached ();
310 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
314 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
320 emit_call_seq (MonoCompile *cfg, guint8 *code)
322 #ifdef USE_JUMP_TABLES
323 code = mono_arm_patchable_bl (code, ARMCOND_AL);
325 if (cfg->method->dynamic) {
326 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
328 *(gpointer*)code = NULL;
330 code = emit_call_reg (code, ARMREG_IP);
339 mono_arm_patchable_b (guint8 *code, int cond)
341 #ifdef USE_JUMP_TABLES
344 jte = mono_jumptable_add_entry ();
345 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
346 ARM_BX_COND (code, cond, ARMREG_IP);
348 ARM_B_COND (code, cond, 0);
354 mono_arm_patchable_bl (guint8 *code, int cond)
356 #ifdef USE_JUMP_TABLES
359 jte = mono_jumptable_add_entry ();
360 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
361 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
363 ARM_BL_COND (code, cond, 0);
368 #ifdef USE_JUMP_TABLES
370 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
372 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
373 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
378 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
380 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
381 ARM_LDR_IMM (code, reg, reg, 0);
387 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
389 switch (ins->opcode) {
392 case OP_FCALL_MEMBASE:
394 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
395 if (sig_ret->type == MONO_TYPE_R4) {
397 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
399 ARM_FMSR (code, ins->dreg, ARMREG_R0);
400 ARM_CVTS (code, ins->dreg, ins->dreg);
404 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
406 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
419 * Emit code to push an LMF structure on the LMF stack.
420 * On arm, this is intermixed with the initialization of other fields of the structure.
423 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
425 gboolean get_lmf_fast = FALSE;
428 #ifdef HAVE_AEABI_READ_TP
429 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
431 if (lmf_addr_tls_offset != -1) {
434 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
435 (gpointer)"__aeabi_read_tp");
436 code = emit_call_seq (cfg, code);
438 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
444 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
447 /* Inline mono_get_lmf_addr () */
448 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
450 /* Load mono_jit_tls_id */
452 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
453 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
455 *(gpointer*)code = NULL;
457 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
458 /* call pthread_getspecific () */
459 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
460 (gpointer)"pthread_getspecific");
461 code = emit_call_seq (cfg, code);
462 /* lmf_addr = &jit_tls->lmf */
463 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
464 g_assert (arm_is_imm8 (lmf_offset));
465 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
472 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
473 (gpointer)"mono_get_lmf_addr");
474 code = emit_call_seq (cfg, code);
476 /* we build the MonoLMF structure on the stack - see mini-arm.h */
477 /* lmf_offset is the offset from the previous stack pointer,
478 * alloc_size is the total stack space allocated, so the offset
479 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
480 * The pointer to the struct is put in r1 (new_lmf).
481 * ip is used as scratch
482 * The callee-saved registers are already in the MonoLMF structure
484 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
485 /* r0 is the result from mono_get_lmf_addr () */
486 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
487 /* new_lmf->previous_lmf = *lmf_addr */
488 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
489 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 /* *(lmf_addr) = r1 */
491 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
492 /* Skip method (only needed for trampoline LMF frames) */
493 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
494 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
495 /* save the current IP */
496 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
497 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
499 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
500 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
511 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
515 for (list = inst->float_args; list; list = list->next) {
516 FloatArgData *fad = list->data;
517 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
518 gboolean imm = arm_is_fpimm8 (var->inst_offset);
520 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
526 if (*offset + *max_len > cfg->code_size) {
527 cfg->code_size += *max_len;
528 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
530 code = cfg->native_code + *offset;
534 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
535 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
537 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
539 *offset = code - cfg->native_code;
546 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
550 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
552 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
555 if (!arm_is_fpimm8 (inst->inst_offset)) {
556 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
557 ARM_FSTD (code, reg, ARMREG_LR, 0);
559 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
566 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
570 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
572 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
575 if (!arm_is_fpimm8 (inst->inst_offset)) {
576 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
577 ARM_FLDD (code, reg, ARMREG_LR, 0);
579 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
588 * Emit code to pop an LMF structure from the LMF stack.
591 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
595 if (lmf_offset < 32) {
596 basereg = cfg->frame_reg;
601 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
604 /* ip = previous_lmf */
605 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
607 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
608 /* *(lmf_addr) = previous_lmf */
609 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
614 #endif /* #ifndef DISABLE_JIT */
617 * mono_arch_get_argument_info:
618 * @csig: a method signature
619 * @param_count: the number of parameters to consider
620 * @arg_info: an array to store the result infos
622 * Gathers information on parameters such as size, alignment and
623 * padding. arg_info should be large enought to hold param_count + 1 entries.
625 * Returns the size of the activation frame.
628 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
630 int k, frame_size = 0;
631 guint32 size, align, pad;
635 t = mini_type_get_underlying_type (gsctx, csig->ret);
636 if (MONO_TYPE_ISSTRUCT (t)) {
637 frame_size += sizeof (gpointer);
641 arg_info [0].offset = offset;
644 frame_size += sizeof (gpointer);
648 arg_info [0].size = frame_size;
650 for (k = 0; k < param_count; k++) {
651 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
653 /* ignore alignment for now */
656 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
657 arg_info [k].pad = pad;
659 arg_info [k + 1].pad = 0;
660 arg_info [k + 1].size = size;
662 arg_info [k + 1].offset = offset;
666 align = MONO_ARCH_FRAME_ALIGNMENT;
667 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
668 arg_info [k].pad = pad;
673 #define MAX_ARCH_DELEGATE_PARAMS 3
676 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
678 guint8 *code, *start;
681 start = code = mono_global_codeman_reserve (12);
683 /* Replace the this argument with the target */
684 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
685 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
686 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
688 g_assert ((code - start) <= 12);
690 mono_arch_flush_icache (start, 12);
694 size = 8 + param_count * 4;
695 start = code = mono_global_codeman_reserve (size);
697 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
698 /* slide down the arguments */
699 for (i = 0; i < param_count; ++i) {
700 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
702 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
704 g_assert ((code - start) <= size);
706 mono_arch_flush_icache (start, size);
709 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
711 *code_size = code - start;
717 * mono_arch_get_delegate_invoke_impls:
719 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
723 mono_arch_get_delegate_invoke_impls (void)
731 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
732 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
734 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
735 code = get_delegate_invoke_impl (FALSE, i, &code_len);
736 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
737 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
745 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
747 guint8 *code, *start;
750 /* FIXME: Support more cases */
751 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
752 if (MONO_TYPE_ISSTRUCT (sig_ret))
756 static guint8* cached = NULL;
757 mono_mini_arch_lock ();
759 mono_mini_arch_unlock ();
764 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
766 start = get_delegate_invoke_impl (TRUE, 0, NULL);
768 mono_mini_arch_unlock ();
771 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
774 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
776 for (i = 0; i < sig->param_count; ++i)
777 if (!mono_is_regsize_var (sig->params [i]))
780 mono_mini_arch_lock ();
781 code = cache [sig->param_count];
783 mono_mini_arch_unlock ();
788 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
789 start = mono_aot_get_trampoline (name);
792 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
794 cache [sig->param_count] = start;
795 mono_mini_arch_unlock ();
803 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
809 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
811 return (gpointer)regs [ARMREG_R0];
815 * Initialize the cpu to execute managed code.
818 mono_arch_cpu_init (void)
820 i8_align = MONO_ABI_ALIGNOF (gint64);
821 #ifdef MONO_CROSS_COMPILE
822 /* Need to set the alignment of i8 since it can different on the target */
823 #ifdef TARGET_ANDROID
825 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
831 create_function_wrapper (gpointer function)
833 guint8 *start, *code;
835 start = code = mono_global_codeman_reserve (96);
838 * Construct the MonoContext structure on the stack.
841 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
843 /* save ip, lr and pc into their correspodings ctx.regs slots. */
844 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
845 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
846 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
848 /* save r0..r10 and fp */
849 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
850 ARM_STM (code, ARMREG_IP, 0x0fff);
852 /* now we can update fp. */
853 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
855 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
856 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
857 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
858 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
860 /* make ctx.eip hold the address of the call. */
861 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
862 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
864 /* r0 now points to the MonoContext */
865 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
868 #ifdef USE_JUMP_TABLES
870 gpointer *jte = mono_jumptable_add_entry ();
871 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
875 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
877 *(gpointer*)code = function;
880 ARM_BLX_REG (code, ARMREG_IP);
882 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
883 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
884 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
885 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
887 /* make ip point to the regs array, then restore everything, including pc. */
888 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
889 ARM_LDM (code, ARMREG_IP, 0xffff);
891 mono_arch_flush_icache (start, code - start);
892 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
898 * Initialize architecture specific code.
901 mono_arch_init (void)
903 const char *cpu_arch;
905 mono_mutex_init_recursive (&mini_arch_mutex);
906 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
907 if (mini_get_debug_options ()->soft_breakpoints) {
908 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
909 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
914 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
915 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
916 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
919 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
920 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
921 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
922 #if defined(ENABLE_GSHAREDVT)
923 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
926 #if defined(__ARM_EABI__)
927 eabi_supported = TRUE;
930 #if defined(ARM_FPU_VFP_HARD)
931 arm_fpu = MONO_ARM_FPU_VFP_HARD;
933 arm_fpu = MONO_ARM_FPU_VFP;
935 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
936 /* If we're compiling with a soft float fallback and it
937 turns out that no VFP unit is available, we need to
938 switch to soft float. We don't do this for iOS, since
939 iOS devices always have a VFP unit. */
940 if (!mono_hwcap_arm_has_vfp)
941 arm_fpu = MONO_ARM_FPU_NONE;
945 v5_supported = mono_hwcap_arm_is_v5;
946 v6_supported = mono_hwcap_arm_is_v6;
947 v7_supported = mono_hwcap_arm_is_v7;
948 v7s_supported = mono_hwcap_arm_is_v7s;
950 #if defined(__APPLE__)
951 /* iOS is special-cased here because we don't yet
952 have a way to properly detect CPU features on it. */
953 thumb_supported = TRUE;
956 thumb_supported = mono_hwcap_arm_has_thumb;
957 thumb2_supported = mono_hwcap_arm_has_thumb2;
960 /* Format: armv(5|6|7[s])[-thumb[2]] */
961 cpu_arch = g_getenv ("MONO_CPU_ARCH");
963 /* Do this here so it overrides any detection. */
965 if (strncmp (cpu_arch, "armv", 4) == 0) {
966 v5_supported = cpu_arch [4] >= '5';
967 v6_supported = cpu_arch [4] >= '6';
968 v7_supported = cpu_arch [4] >= '7';
969 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
972 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
973 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
978 * Cleanup architecture specific code.
981 mono_arch_cleanup (void)
986 * This function returns the optimizations supported on this cpu.
989 mono_arch_cpu_optimizations (guint32 *exclude_mask)
991 /* no arm-specific optimizations yet */
997 * This function test for all SIMD functions supported.
999 * Returns a bitmask corresponding to all supported versions.
1003 mono_arch_cpu_enumerate_simd_versions (void)
1005 /* SIMD is currently unimplemented */
1013 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1015 if (v7s_supported) {
1029 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1031 mono_arch_is_soft_float (void)
1033 return arm_fpu == MONO_ARM_FPU_NONE;
1038 mono_arm_is_hard_float (void)
1040 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1044 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1047 t = mini_type_get_underlying_type (gsctx, t);
1054 case MONO_TYPE_FNPTR:
1056 case MONO_TYPE_OBJECT:
1057 case MONO_TYPE_STRING:
1058 case MONO_TYPE_CLASS:
1059 case MONO_TYPE_SZARRAY:
1060 case MONO_TYPE_ARRAY:
1062 case MONO_TYPE_GENERICINST:
1063 if (!mono_type_generic_inst_is_valuetype (t))
1066 case MONO_TYPE_VALUETYPE:
1073 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1078 for (i = 0; i < cfg->num_varinfo; i++) {
1079 MonoInst *ins = cfg->varinfo [i];
1080 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1083 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1086 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1089 /* we can only allocate 32 bit values */
1090 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1091 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1092 g_assert (i == vmv->idx);
1093 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1101 mono_arch_get_global_int_regs (MonoCompile *cfg)
1105 mono_arch_compute_omit_fp (cfg);
1108 * FIXME: Interface calls might go through a static rgctx trampoline which
1109 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1112 if (cfg->flags & MONO_CFG_HAS_CALLS)
1113 cfg->uses_rgctx_reg = TRUE;
1115 if (cfg->arch.omit_fp)
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1118 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1119 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1121 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1122 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1124 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1125 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1126 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1127 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1128 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1129 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1135 * mono_arch_regalloc_cost:
1137 * Return the cost, in number of memory references, of the action of
1138 * allocating the variable VMV into a register during global register
1142 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1148 #endif /* #ifndef DISABLE_JIT */
1150 #ifndef __GNUC_PREREQ
1151 #define __GNUC_PREREQ(maj, min) (0)
1155 mono_arch_flush_icache (guint8 *code, gint size)
1157 #if defined(__native_client__)
1158 // For Native Client we don't have to flush i-cache here,
1159 // as it's being done by dyncode interface.
1162 #ifdef MONO_CROSS_COMPILE
1164 sys_icache_invalidate (code, size);
1165 #elif __GNUC_PREREQ(4, 3)
1166 __builtin___clear_cache (code, code + size);
1167 #elif __GNUC_PREREQ(4, 1)
1168 __clear_cache (code, code + size);
1169 #elif defined(PLATFORM_ANDROID)
1170 const int syscall = 0xf0002;
1178 : "r" (code), "r" (code + size), "r" (syscall)
1179 : "r0", "r1", "r7", "r2"
1182 __asm __volatile ("mov r0, %0\n"
1185 "swi 0x9f0002 @ sys_cacheflush"
1187 : "r" (code), "r" (code + size), "r" (0)
1188 : "r0", "r1", "r3" );
1190 #endif /* !__native_client__ */
1201 RegTypeStructByAddr,
1202 /* gsharedvt argument passed by addr in greg */
1203 RegTypeGSharedVtInReg,
1204 /* gsharedvt argument passed by addr on stack */
1205 RegTypeGSharedVtOnStack,
1210 guint16 vtsize; /* in param area */
1214 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1219 guint32 stack_usage;
1220 gboolean vtype_retaddr;
1221 /* The index of the vret arg in the argument list */
1231 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1234 if (*gr > ARMREG_R3) {
1236 ainfo->offset = *stack_size;
1237 ainfo->reg = ARMREG_SP; /* in the caller */
1238 ainfo->storage = RegTypeBase;
1241 ainfo->storage = RegTypeGeneral;
1248 split = i8_align == 4;
1253 if (*gr == ARMREG_R3 && split) {
1254 /* first word in r3 and the second on the stack */
1255 ainfo->offset = *stack_size;
1256 ainfo->reg = ARMREG_SP; /* in the caller */
1257 ainfo->storage = RegTypeBaseGen;
1259 } else if (*gr >= ARMREG_R3) {
1260 if (eabi_supported) {
1261 /* darwin aligns longs to 4 byte only */
1262 if (i8_align == 8) {
1267 ainfo->offset = *stack_size;
1268 ainfo->reg = ARMREG_SP; /* in the caller */
1269 ainfo->storage = RegTypeBase;
1272 if (eabi_supported) {
1273 if (i8_align == 8 && ((*gr) & 1))
1276 ainfo->storage = RegTypeIRegPair;
1285 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1288 * If we're calling a function like this:
1290 * void foo(float a, double b, float c)
1292 * We pass a in s0 and b in d1. That leaves us
1293 * with s1 being unused. The armhf ABI recognizes
1294 * this and requires register assignment to then
1295 * use that for the next single-precision arg,
1296 * i.e. c in this example. So float_spare either
1297 * tells us which reg to use for the next single-
1298 * precision arg, or it's -1, meaning use *fpr.
1300 * Note that even though most of the JIT speaks
1301 * double-precision, fpr represents single-
1302 * precision registers.
1304 * See parts 5.5 and 6.1.2 of the AAPCS for how
1308 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1309 ainfo->storage = RegTypeFP;
1313 * If we're passing a double-precision value
1314 * and *fpr is odd (e.g. it's s1, s3, ...)
1315 * we need to use the next even register. So
1316 * we mark the current *fpr as a spare that
1317 * can be used for the next single-precision
1321 *float_spare = *fpr;
1326 * At this point, we have an even register
1327 * so we assign that and move along.
1331 } else if (*float_spare >= 0) {
1333 * We're passing a single-precision value
1334 * and it looks like a spare single-
1335 * precision register is available. Let's
1339 ainfo->reg = *float_spare;
1343 * If we hit this branch, we're passing a
1344 * single-precision value and we can simply
1345 * use the next available register.
1353 * We've exhausted available floating point
1354 * regs, so pass the rest on the stack.
1362 ainfo->offset = *stack_size;
1363 ainfo->reg = ARMREG_SP;
1364 ainfo->storage = RegTypeBase;
1371 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1373 guint i, gr, fpr, pstart;
1375 int n = sig->hasthis + sig->param_count;
1376 MonoType *simpletype;
1377 guint32 stack_size = 0;
1379 gboolean is_pinvoke = sig->pinvoke;
1383 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1385 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1392 t = mini_type_get_underlying_type (gsctx, sig->ret);
1393 if (MONO_TYPE_ISSTRUCT (t)) {
1396 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1397 cinfo->ret.storage = RegTypeStructByVal;
1399 cinfo->vtype_retaddr = TRUE;
1401 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1402 cinfo->vtype_retaddr = TRUE;
1408 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1409 * the first argument, allowing 'this' to be always passed in the first arg reg.
1410 * Also do this if the first argument is a reference type, since virtual calls
1411 * are sometimes made using calli without sig->hasthis set, like in the delegate
1414 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1416 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1418 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1422 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1423 cinfo->vret_arg_index = 1;
1427 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1431 if (cinfo->vtype_retaddr)
1432 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1435 DEBUG(printf("params: %d\n", sig->param_count));
1436 for (i = pstart; i < sig->param_count; ++i) {
1437 ArgInfo *ainfo = &cinfo->args [n];
1439 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1440 /* Prevent implicit arguments and sig_cookie from
1441 being passed in registers */
1444 /* Emit the signature cookie just before the implicit arguments */
1445 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1447 DEBUG(printf("param %d: ", i));
1448 if (sig->params [i]->byref) {
1449 DEBUG(printf("byref\n"));
1450 add_general (&gr, &stack_size, ainfo, TRUE);
1454 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1455 switch (simpletype->type) {
1456 case MONO_TYPE_BOOLEAN:
1459 cinfo->args [n].size = 1;
1460 add_general (&gr, &stack_size, ainfo, TRUE);
1463 case MONO_TYPE_CHAR:
1466 cinfo->args [n].size = 2;
1467 add_general (&gr, &stack_size, ainfo, TRUE);
1472 cinfo->args [n].size = 4;
1473 add_general (&gr, &stack_size, ainfo, TRUE);
1479 case MONO_TYPE_FNPTR:
1480 case MONO_TYPE_CLASS:
1481 case MONO_TYPE_OBJECT:
1482 case MONO_TYPE_STRING:
1483 case MONO_TYPE_SZARRAY:
1484 case MONO_TYPE_ARRAY:
1485 cinfo->args [n].size = sizeof (gpointer);
1486 add_general (&gr, &stack_size, ainfo, TRUE);
1489 case MONO_TYPE_GENERICINST:
1490 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1491 cinfo->args [n].size = sizeof (gpointer);
1492 add_general (&gr, &stack_size, ainfo, TRUE);
1496 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1497 /* gsharedvt arguments are passed by ref */
1498 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1499 add_general (&gr, &stack_size, ainfo, TRUE);
1500 switch (ainfo->storage) {
1501 case RegTypeGeneral:
1502 ainfo->storage = RegTypeGSharedVtInReg;
1505 ainfo->storage = RegTypeGSharedVtOnStack;
1508 g_assert_not_reached ();
1514 case MONO_TYPE_TYPEDBYREF:
1515 case MONO_TYPE_VALUETYPE: {
1521 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1522 size = sizeof (MonoTypedRef);
1523 align = sizeof (gpointer);
1525 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1527 size = mono_class_native_size (klass, &align);
1529 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1531 DEBUG(printf ("load %d bytes struct\n", size));
1534 align_size += (sizeof (gpointer) - 1);
1535 align_size &= ~(sizeof (gpointer) - 1);
1536 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1537 ainfo->storage = RegTypeStructByVal;
1538 ainfo->struct_size = size;
1539 /* FIXME: align stack_size if needed */
1540 if (eabi_supported) {
1541 if (align >= 8 && (gr & 1))
1544 if (gr > ARMREG_R3) {
1546 ainfo->vtsize = nwords;
1548 int rest = ARMREG_R3 - gr + 1;
1549 int n_in_regs = rest >= nwords? nwords: rest;
1551 ainfo->size = n_in_regs;
1552 ainfo->vtsize = nwords - n_in_regs;
1555 nwords -= n_in_regs;
1557 if (sig->call_convention == MONO_CALL_VARARG)
1558 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1559 stack_size = ALIGN_TO (stack_size, align);
1560 ainfo->offset = stack_size;
1561 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1562 stack_size += nwords * sizeof (gpointer);
1569 add_general (&gr, &stack_size, ainfo, FALSE);
1576 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1578 add_general (&gr, &stack_size, ainfo, TRUE);
1586 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1588 add_general (&gr, &stack_size, ainfo, FALSE);
1593 case MONO_TYPE_MVAR:
1594 /* gsharedvt arguments are passed by ref */
1595 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1596 add_general (&gr, &stack_size, ainfo, TRUE);
1597 switch (ainfo->storage) {
1598 case RegTypeGeneral:
1599 ainfo->storage = RegTypeGSharedVtInReg;
1602 ainfo->storage = RegTypeGSharedVtOnStack;
1605 g_assert_not_reached ();
1610 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1614 /* Handle the case where there are no implicit arguments */
1615 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1616 /* Prevent implicit arguments and sig_cookie from
1617 being passed in registers */
1620 /* Emit the signature cookie just before the implicit arguments */
1621 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1625 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1626 switch (simpletype->type) {
1627 case MONO_TYPE_BOOLEAN:
1632 case MONO_TYPE_CHAR:
1638 case MONO_TYPE_FNPTR:
1639 case MONO_TYPE_CLASS:
1640 case MONO_TYPE_OBJECT:
1641 case MONO_TYPE_SZARRAY:
1642 case MONO_TYPE_ARRAY:
1643 case MONO_TYPE_STRING:
1644 cinfo->ret.storage = RegTypeGeneral;
1645 cinfo->ret.reg = ARMREG_R0;
1649 cinfo->ret.storage = RegTypeIRegPair;
1650 cinfo->ret.reg = ARMREG_R0;
1654 cinfo->ret.storage = RegTypeFP;
1656 if (IS_HARD_FLOAT) {
1657 cinfo->ret.reg = ARM_VFP_F0;
1659 cinfo->ret.reg = ARMREG_R0;
1663 case MONO_TYPE_GENERICINST:
1664 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1665 cinfo->ret.storage = RegTypeGeneral;
1666 cinfo->ret.reg = ARMREG_R0;
1669 // FIXME: Only for variable types
1670 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1671 cinfo->ret.storage = RegTypeStructByAddr;
1672 g_assert (cinfo->vtype_retaddr);
1676 case MONO_TYPE_VALUETYPE:
1677 case MONO_TYPE_TYPEDBYREF:
1678 if (cinfo->ret.storage != RegTypeStructByVal)
1679 cinfo->ret.storage = RegTypeStructByAddr;
1682 case MONO_TYPE_MVAR:
1683 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1684 cinfo->ret.storage = RegTypeStructByAddr;
1685 g_assert (cinfo->vtype_retaddr);
1687 case MONO_TYPE_VOID:
1690 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1694 /* align stack size to 8 */
1695 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1696 stack_size = (stack_size + 7) & ~7;
1698 cinfo->stack_usage = stack_size;
1704 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1706 MonoType *callee_ret;
1710 if (cfg->compile_aot && !cfg->full_aot)
1711 /* OP_TAILCALL doesn't work with AOT */
1714 c1 = get_call_info (NULL, NULL, caller_sig);
1715 c2 = get_call_info (NULL, NULL, callee_sig);
1718 * Tail calls with more callee stack usage than the caller cannot be supported, since
1719 * the extra stack space would be left on the stack after the tail call.
1721 res = c1->stack_usage >= c2->stack_usage;
1722 callee_ret = mini_replace_type (callee_sig->ret);
1723 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1724 /* An address on the callee's stack is passed as the first argument */
1727 if (c2->stack_usage > 16 * 4)
1739 debug_omit_fp (void)
1742 return mono_debug_count ();
1749 * mono_arch_compute_omit_fp:
1751 * Determine whenever the frame pointer can be eliminated.
1754 mono_arch_compute_omit_fp (MonoCompile *cfg)
1756 MonoMethodSignature *sig;
1757 MonoMethodHeader *header;
1761 if (cfg->arch.omit_fp_computed)
1764 header = cfg->header;
1766 sig = mono_method_signature (cfg->method);
1768 if (!cfg->arch.cinfo)
1769 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1770 cinfo = cfg->arch.cinfo;
1773 * FIXME: Remove some of the restrictions.
1775 cfg->arch.omit_fp = TRUE;
1776 cfg->arch.omit_fp_computed = TRUE;
1778 if (cfg->disable_omit_fp)
1779 cfg->arch.omit_fp = FALSE;
1780 if (!debug_omit_fp ())
1781 cfg->arch.omit_fp = FALSE;
1783 if (cfg->method->save_lmf)
1784 cfg->arch.omit_fp = FALSE;
1786 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1787 cfg->arch.omit_fp = FALSE;
1788 if (header->num_clauses)
1789 cfg->arch.omit_fp = FALSE;
1790 if (cfg->param_area)
1791 cfg->arch.omit_fp = FALSE;
1792 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1793 cfg->arch.omit_fp = FALSE;
1794 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1795 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1796 cfg->arch.omit_fp = FALSE;
1797 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1798 ArgInfo *ainfo = &cinfo->args [i];
1800 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1802 * The stack offset can only be determined when the frame
1805 cfg->arch.omit_fp = FALSE;
1810 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1811 MonoInst *ins = cfg->varinfo [i];
1814 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1819 * Set var information according to the calling convention. arm version.
1820 * The locals var stuff should most likely be split in another method.
1823 mono_arch_allocate_vars (MonoCompile *cfg)
1825 MonoMethodSignature *sig;
1826 MonoMethodHeader *header;
1829 int i, offset, size, align, curinst;
1833 sig = mono_method_signature (cfg->method);
1835 if (!cfg->arch.cinfo)
1836 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1837 cinfo = cfg->arch.cinfo;
1838 sig_ret = mini_replace_type (sig->ret);
1840 mono_arch_compute_omit_fp (cfg);
1842 if (cfg->arch.omit_fp)
1843 cfg->frame_reg = ARMREG_SP;
1845 cfg->frame_reg = ARMREG_FP;
1847 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1849 /* allow room for the vararg method args: void* and long/double */
1850 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1851 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1853 header = cfg->header;
1855 /* See mono_arch_get_global_int_regs () */
1856 if (cfg->flags & MONO_CFG_HAS_CALLS)
1857 cfg->uses_rgctx_reg = TRUE;
1859 if (cfg->frame_reg != ARMREG_SP)
1860 cfg->used_int_regs |= 1 << cfg->frame_reg;
1862 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1863 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1864 cfg->used_int_regs |= (1 << ARMREG_V5);
1868 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1869 if (sig_ret->type != MONO_TYPE_VOID) {
1870 cfg->ret->opcode = OP_REGVAR;
1871 cfg->ret->inst_c0 = ARMREG_R0;
1874 /* local vars are at a positive offset from the stack pointer */
1876 * also note that if the function uses alloca, we use FP
1877 * to point at the local variables.
1879 offset = 0; /* linkage area */
1880 /* align the offset to 16 bytes: not sure this is needed here */
1882 //offset &= ~(8 - 1);
1884 /* add parameter area size for called functions */
1885 offset += cfg->param_area;
1888 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1891 /* allow room to save the return value */
1892 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1895 /* the MonoLMF structure is stored just below the stack pointer */
1896 if (cinfo->ret.storage == RegTypeStructByVal) {
1897 cfg->ret->opcode = OP_REGOFFSET;
1898 cfg->ret->inst_basereg = cfg->frame_reg;
1899 offset += sizeof (gpointer) - 1;
1900 offset &= ~(sizeof (gpointer) - 1);
1901 cfg->ret->inst_offset = - offset;
1902 offset += sizeof(gpointer);
1903 } else if (cinfo->vtype_retaddr) {
1904 ins = cfg->vret_addr;
1905 offset += sizeof(gpointer) - 1;
1906 offset &= ~(sizeof(gpointer) - 1);
1907 ins->inst_offset = offset;
1908 ins->opcode = OP_REGOFFSET;
1909 ins->inst_basereg = cfg->frame_reg;
1910 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1911 printf ("vret_addr =");
1912 mono_print_ins (cfg->vret_addr);
1914 offset += sizeof(gpointer);
1917 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1918 if (cfg->arch.seq_point_info_var) {
1921 ins = cfg->arch.seq_point_info_var;
1925 offset += align - 1;
1926 offset &= ~(align - 1);
1927 ins->opcode = OP_REGOFFSET;
1928 ins->inst_basereg = cfg->frame_reg;
1929 ins->inst_offset = offset;
1932 ins = cfg->arch.ss_trigger_page_var;
1935 offset += align - 1;
1936 offset &= ~(align - 1);
1937 ins->opcode = OP_REGOFFSET;
1938 ins->inst_basereg = cfg->frame_reg;
1939 ins->inst_offset = offset;
1943 if (cfg->arch.seq_point_read_var) {
1946 ins = cfg->arch.seq_point_read_var;
1950 offset += align - 1;
1951 offset &= ~(align - 1);
1952 ins->opcode = OP_REGOFFSET;
1953 ins->inst_basereg = cfg->frame_reg;
1954 ins->inst_offset = offset;
1957 ins = cfg->arch.seq_point_ss_method_var;
1960 offset += align - 1;
1961 offset &= ~(align - 1);
1962 ins->opcode = OP_REGOFFSET;
1963 ins->inst_basereg = cfg->frame_reg;
1964 ins->inst_offset = offset;
1967 ins = cfg->arch.seq_point_bp_method_var;
1970 offset += align - 1;
1971 offset &= ~(align - 1);
1972 ins->opcode = OP_REGOFFSET;
1973 ins->inst_basereg = cfg->frame_reg;
1974 ins->inst_offset = offset;
1978 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1979 /* Allocate a temporary used by the atomic ops */
1983 /* Allocate a local slot to hold the sig cookie address */
1984 offset += align - 1;
1985 offset &= ~(align - 1);
1986 cfg->arch.atomic_tmp_offset = offset;
1989 cfg->arch.atomic_tmp_offset = -1;
1992 cfg->locals_min_stack_offset = offset;
1994 curinst = cfg->locals_start;
1995 for (i = curinst; i < cfg->num_varinfo; ++i) {
1998 ins = cfg->varinfo [i];
1999 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2002 t = ins->inst_vtype;
2003 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2006 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2007 * pinvoke wrappers when they call functions returning structure */
2008 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2009 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2013 size = mono_type_size (t, &align);
2015 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2016 * since it loads/stores misaligned words, which don't do the right thing.
2018 if (align < 4 && size >= 4)
2020 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2021 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2022 offset += align - 1;
2023 offset &= ~(align - 1);
2024 ins->opcode = OP_REGOFFSET;
2025 ins->inst_offset = offset;
2026 ins->inst_basereg = cfg->frame_reg;
2028 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2031 cfg->locals_max_stack_offset = offset;
2035 ins = cfg->args [curinst];
2036 if (ins->opcode != OP_REGVAR) {
2037 ins->opcode = OP_REGOFFSET;
2038 ins->inst_basereg = cfg->frame_reg;
2039 offset += sizeof (gpointer) - 1;
2040 offset &= ~(sizeof (gpointer) - 1);
2041 ins->inst_offset = offset;
2042 offset += sizeof (gpointer);
2047 if (sig->call_convention == MONO_CALL_VARARG) {
2051 /* Allocate a local slot to hold the sig cookie address */
2052 offset += align - 1;
2053 offset &= ~(align - 1);
2054 cfg->sig_cookie = offset;
2058 for (i = 0; i < sig->param_count; ++i) {
2059 ins = cfg->args [curinst];
2061 if (ins->opcode != OP_REGVAR) {
2062 ins->opcode = OP_REGOFFSET;
2063 ins->inst_basereg = cfg->frame_reg;
2064 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2066 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2067 * since it loads/stores misaligned words, which don't do the right thing.
2069 if (align < 4 && size >= 4)
2071 /* The code in the prolog () stores words when storing vtypes received in a register */
2072 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2074 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2075 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2076 offset += align - 1;
2077 offset &= ~(align - 1);
2078 ins->inst_offset = offset;
2084 /* align the offset to 8 bytes */
2085 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2086 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2091 cfg->stack_offset = offset;
2095 mono_arch_create_vars (MonoCompile *cfg)
2097 MonoMethodSignature *sig;
2101 sig = mono_method_signature (cfg->method);
2103 if (!cfg->arch.cinfo)
2104 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2105 cinfo = cfg->arch.cinfo;
2107 if (IS_HARD_FLOAT) {
2108 for (i = 0; i < 2; i++) {
2109 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2110 inst->flags |= MONO_INST_VOLATILE;
2112 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2116 if (cinfo->ret.storage == RegTypeStructByVal)
2117 cfg->ret_var_is_local = TRUE;
2119 if (cinfo->vtype_retaddr) {
2120 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2121 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2122 printf ("vret_addr = ");
2123 mono_print_ins (cfg->vret_addr);
2127 if (cfg->gen_seq_points_debug_data) {
2128 if (cfg->soft_breakpoints) {
2129 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2130 ins->flags |= MONO_INST_VOLATILE;
2131 cfg->arch.seq_point_read_var = ins;
2133 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2134 ins->flags |= MONO_INST_VOLATILE;
2135 cfg->arch.seq_point_ss_method_var = ins;
2137 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2138 ins->flags |= MONO_INST_VOLATILE;
2139 cfg->arch.seq_point_bp_method_var = ins;
2141 g_assert (!cfg->compile_aot);
2142 } else if (cfg->compile_aot) {
2143 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2144 ins->flags |= MONO_INST_VOLATILE;
2145 cfg->arch.seq_point_info_var = ins;
2147 /* Allocate a separate variable for this to save 1 load per seq point */
2148 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2149 ins->flags |= MONO_INST_VOLATILE;
2150 cfg->arch.ss_trigger_page_var = ins;
2156 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2158 MonoMethodSignature *tmp_sig;
2161 if (call->tail_call)
2164 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2167 * mono_ArgIterator_Setup assumes the signature cookie is
2168 * passed first and all the arguments which were before it are
2169 * passed on the stack after the signature. So compensate by
2170 * passing a different signature.
2172 tmp_sig = mono_metadata_signature_dup (call->signature);
2173 tmp_sig->param_count -= call->signature->sentinelpos;
2174 tmp_sig->sentinelpos = 0;
2175 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2177 sig_reg = mono_alloc_ireg (cfg);
2178 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2185 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2190 LLVMCallInfo *linfo;
2192 n = sig->param_count + sig->hasthis;
2194 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2196 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2199 * LLVM always uses the native ABI while we use our own ABI, the
2200 * only difference is the handling of vtypes:
2201 * - we only pass/receive them in registers in some cases, and only
2202 * in 1 or 2 integer registers.
2204 if (cinfo->vtype_retaddr) {
2205 /* Vtype returned using a hidden argument */
2206 linfo->ret.storage = LLVMArgVtypeRetAddr;
2207 linfo->vret_arg_index = cinfo->vret_arg_index;
2208 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2209 cfg->exception_message = g_strdup ("unknown ret conv");
2210 cfg->disable_llvm = TRUE;
2214 for (i = 0; i < n; ++i) {
2215 ainfo = cinfo->args + i;
2217 linfo->args [i].storage = LLVMArgNone;
2219 switch (ainfo->storage) {
2220 case RegTypeGeneral:
2221 case RegTypeIRegPair:
2223 linfo->args [i].storage = LLVMArgInIReg;
2225 case RegTypeStructByVal:
2226 linfo->args [i].storage = LLVMArgAsIArgs;
2227 linfo->args [i].nslots = ainfo->struct_size / sizeof (gpointer);
2230 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2231 cfg->disable_llvm = TRUE;
2241 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2244 MonoMethodSignature *sig;
2248 sig = call->signature;
2249 n = sig->param_count + sig->hasthis;
2251 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2253 for (i = 0; i < n; ++i) {
2254 ArgInfo *ainfo = cinfo->args + i;
2257 if (i >= sig->hasthis)
2258 t = sig->params [i - sig->hasthis];
2260 t = &mono_defaults.int_class->byval_arg;
2261 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2263 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2264 /* Emit the signature cookie just before the implicit arguments */
2265 emit_sig_cookie (cfg, call, cinfo);
2268 in = call->args [i];
2270 switch (ainfo->storage) {
2271 case RegTypeGeneral:
2272 case RegTypeIRegPair:
2273 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2274 MONO_INST_NEW (cfg, ins, OP_MOVE);
2275 ins->dreg = mono_alloc_ireg (cfg);
2276 ins->sreg1 = in->dreg + 1;
2277 MONO_ADD_INS (cfg->cbb, ins);
2278 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2280 MONO_INST_NEW (cfg, ins, OP_MOVE);
2281 ins->dreg = mono_alloc_ireg (cfg);
2282 ins->sreg1 = in->dreg + 2;
2283 MONO_ADD_INS (cfg->cbb, ins);
2284 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2285 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2286 if (ainfo->size == 4) {
2287 if (IS_SOFT_FLOAT) {
2288 /* mono_emit_call_args () have already done the r8->r4 conversion */
2289 /* The converted value is in an int vreg */
2290 MONO_INST_NEW (cfg, ins, OP_MOVE);
2291 ins->dreg = mono_alloc_ireg (cfg);
2292 ins->sreg1 = in->dreg;
2293 MONO_ADD_INS (cfg->cbb, ins);
2294 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2298 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2299 creg = mono_alloc_ireg (cfg);
2300 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2301 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2304 if (IS_SOFT_FLOAT) {
2305 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2306 ins->dreg = mono_alloc_ireg (cfg);
2307 ins->sreg1 = in->dreg;
2308 MONO_ADD_INS (cfg->cbb, ins);
2309 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2311 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2319 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2320 creg = mono_alloc_ireg (cfg);
2321 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2322 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2323 creg = mono_alloc_ireg (cfg);
2324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2325 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2328 cfg->flags |= MONO_CFG_HAS_FPOUT;
2330 MONO_INST_NEW (cfg, ins, OP_MOVE);
2331 ins->dreg = mono_alloc_ireg (cfg);
2332 ins->sreg1 = in->dreg;
2333 MONO_ADD_INS (cfg->cbb, ins);
2335 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2338 case RegTypeStructByAddr:
2341 /* FIXME: where si the data allocated? */
2342 arg->backend.reg3 = ainfo->reg;
2343 call->used_iregs |= 1 << ainfo->reg;
2344 g_assert_not_reached ();
2347 case RegTypeStructByVal:
2348 case RegTypeGSharedVtInReg:
2349 case RegTypeGSharedVtOnStack:
2350 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2351 ins->opcode = OP_OUTARG_VT;
2352 ins->sreg1 = in->dreg;
2353 ins->klass = in->klass;
2354 ins->inst_p0 = call;
2355 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2356 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2357 mono_call_inst_add_outarg_vt (cfg, call, ins);
2358 MONO_ADD_INS (cfg->cbb, ins);
2361 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2363 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2364 if (t->type == MONO_TYPE_R8) {
2365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2373 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2376 case RegTypeBaseGen:
2377 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2378 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2379 MONO_INST_NEW (cfg, ins, OP_MOVE);
2380 ins->dreg = mono_alloc_ireg (cfg);
2381 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2382 MONO_ADD_INS (cfg->cbb, ins);
2383 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2384 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2387 /* This should work for soft-float as well */
2389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2390 creg = mono_alloc_ireg (cfg);
2391 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2392 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2393 creg = mono_alloc_ireg (cfg);
2394 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2396 cfg->flags |= MONO_CFG_HAS_FPOUT;
2398 g_assert_not_reached ();
2402 int fdreg = mono_alloc_freg (cfg);
2404 if (ainfo->size == 8) {
2405 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2406 ins->sreg1 = in->dreg;
2408 MONO_ADD_INS (cfg->cbb, ins);
2410 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2415 * Mono's register allocator doesn't speak single-precision registers that
2416 * overlap double-precision registers (i.e. armhf). So we have to work around
2417 * the register allocator and load the value from memory manually.
2419 * So we create a variable for the float argument and an instruction to store
2420 * the argument into the variable. We then store the list of these arguments
2421 * in cfg->float_args. This list is then used by emit_float_args later to
2422 * pass the arguments in the various call opcodes.
2424 * This is not very nice, and we should really try to fix the allocator.
2427 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2429 /* Make sure the instruction isn't seen as pointless and removed.
2431 float_arg->flags |= MONO_INST_VOLATILE;
2433 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2435 /* We use the dreg to look up the instruction later. The hreg is used to
2436 * emit the instruction that loads the value into the FP reg.
2438 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2439 fad->vreg = float_arg->dreg;
2440 fad->hreg = ainfo->reg;
2442 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2445 call->used_iregs |= 1 << ainfo->reg;
2446 cfg->flags |= MONO_CFG_HAS_FPOUT;
2450 g_assert_not_reached ();
2454 /* Handle the case where there are no implicit arguments */
2455 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2456 emit_sig_cookie (cfg, call, cinfo);
2458 if (cinfo->ret.storage == RegTypeStructByVal) {
2459 /* The JIT will transform this into a normal call */
2460 call->vret_in_reg = TRUE;
2461 } else if (cinfo->vtype_retaddr) {
2463 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2464 vtarg->sreg1 = call->vret_var->dreg;
2465 vtarg->dreg = mono_alloc_preg (cfg);
2466 MONO_ADD_INS (cfg->cbb, vtarg);
2468 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2471 call->stack_usage = cinfo->stack_usage;
2477 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2479 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2480 ArgInfo *ainfo = ins->inst_p1;
2481 int ovf_size = ainfo->vtsize;
2482 int doffset = ainfo->offset;
2483 int struct_size = ainfo->struct_size;
2484 int i, soffset, dreg, tmpreg;
2486 if (ainfo->storage == RegTypeGSharedVtInReg) {
2488 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2491 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2492 /* Pass by addr on stack */
2493 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2498 for (i = 0; i < ainfo->size; ++i) {
2499 dreg = mono_alloc_ireg (cfg);
2500 switch (struct_size) {
2502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2508 tmpreg = mono_alloc_ireg (cfg);
2509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2512 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2515 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2518 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2521 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2522 soffset += sizeof (gpointer);
2523 struct_size -= sizeof (gpointer);
2525 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2527 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2531 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2533 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2536 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2539 if (COMPILE_LLVM (cfg)) {
2540 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2542 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2543 ins->sreg1 = val->dreg + 1;
2544 ins->sreg2 = val->dreg + 2;
2545 MONO_ADD_INS (cfg->cbb, ins);
2550 case MONO_ARM_FPU_NONE:
2551 if (ret->type == MONO_TYPE_R8) {
2554 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2555 ins->dreg = cfg->ret->dreg;
2556 ins->sreg1 = val->dreg;
2557 MONO_ADD_INS (cfg->cbb, ins);
2560 if (ret->type == MONO_TYPE_R4) {
2561 /* Already converted to an int in method_to_ir () */
2562 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2566 case MONO_ARM_FPU_VFP:
2567 case MONO_ARM_FPU_VFP_HARD:
2568 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2571 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2572 ins->dreg = cfg->ret->dreg;
2573 ins->sreg1 = val->dreg;
2574 MONO_ADD_INS (cfg->cbb, ins);
2579 g_assert_not_reached ();
2583 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2586 #endif /* #ifndef DISABLE_JIT */
2589 mono_arch_is_inst_imm (gint64 imm)
2595 MonoMethodSignature *sig;
2598 MonoType **param_types;
2602 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2606 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2609 switch (cinfo->ret.storage) {
2611 case RegTypeGeneral:
2612 case RegTypeIRegPair:
2613 case RegTypeStructByAddr:
2624 for (i = 0; i < cinfo->nargs; ++i) {
2625 ArgInfo *ainfo = &cinfo->args [i];
2628 switch (ainfo->storage) {
2629 case RegTypeGeneral:
2631 case RegTypeIRegPair:
2634 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2637 case RegTypeStructByVal:
2638 if (ainfo->size == 0)
2639 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2641 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2642 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2650 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2651 for (i = 0; i < sig->param_count; ++i) {
2652 MonoType *t = sig->params [i];
2657 t = mini_replace_type (t);
2680 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2682 ArchDynCallInfo *info;
2686 cinfo = get_call_info (NULL, NULL, sig);
2688 if (!dyn_call_supported (cinfo, sig)) {
2693 info = g_new0 (ArchDynCallInfo, 1);
2694 // FIXME: Preprocess the info to speed up start_dyn_call ()
2696 info->cinfo = cinfo;
2697 info->rtype = mini_replace_type (sig->ret);
2698 info->param_types = g_new0 (MonoType*, sig->param_count);
2699 for (i = 0; i < sig->param_count; ++i)
2700 info->param_types [i] = mini_replace_type (sig->params [i]);
2702 return (MonoDynCallInfo*)info;
2706 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2708 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2710 g_free (ainfo->cinfo);
2715 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2717 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2718 DynCallArgs *p = (DynCallArgs*)buf;
2719 int arg_index, greg, i, j, pindex;
2720 MonoMethodSignature *sig = dinfo->sig;
2722 g_assert (buf_len >= sizeof (DynCallArgs));
2731 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2732 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2737 if (dinfo->cinfo->vtype_retaddr)
2738 p->regs [greg ++] = (mgreg_t)ret;
2740 for (i = pindex; i < sig->param_count; i++) {
2741 MonoType *t = dinfo->param_types [i];
2742 gpointer *arg = args [arg_index ++];
2743 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2746 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2748 else if (ainfo->storage == RegTypeBase)
2749 slot = PARAM_REGS + (ainfo->offset / 4);
2751 g_assert_not_reached ();
2754 p->regs [slot] = (mgreg_t)*arg;
2759 case MONO_TYPE_STRING:
2760 case MONO_TYPE_CLASS:
2761 case MONO_TYPE_ARRAY:
2762 case MONO_TYPE_SZARRAY:
2763 case MONO_TYPE_OBJECT:
2767 p->regs [slot] = (mgreg_t)*arg;
2769 case MONO_TYPE_BOOLEAN:
2771 p->regs [slot] = *(guint8*)arg;
2774 p->regs [slot] = *(gint8*)arg;
2777 p->regs [slot] = *(gint16*)arg;
2780 case MONO_TYPE_CHAR:
2781 p->regs [slot] = *(guint16*)arg;
2784 p->regs [slot] = *(gint32*)arg;
2787 p->regs [slot] = *(guint32*)arg;
2791 p->regs [slot ++] = (mgreg_t)arg [0];
2792 p->regs [slot] = (mgreg_t)arg [1];
2795 p->regs [slot] = *(mgreg_t*)arg;
2798 p->regs [slot ++] = (mgreg_t)arg [0];
2799 p->regs [slot] = (mgreg_t)arg [1];
2801 case MONO_TYPE_GENERICINST:
2802 if (MONO_TYPE_IS_REFERENCE (t)) {
2803 p->regs [slot] = (mgreg_t)*arg;
2808 case MONO_TYPE_VALUETYPE:
2809 g_assert (ainfo->storage == RegTypeStructByVal);
2811 if (ainfo->size == 0)
2812 slot = PARAM_REGS + (ainfo->offset / 4);
2816 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2817 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2820 g_assert_not_reached ();
2826 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2828 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2829 MonoType *ptype = ainfo->rtype;
2830 guint8 *ret = ((DynCallArgs*)buf)->ret;
2831 mgreg_t res = ((DynCallArgs*)buf)->res;
2832 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2834 switch (ptype->type) {
2835 case MONO_TYPE_VOID:
2836 *(gpointer*)ret = NULL;
2838 case MONO_TYPE_STRING:
2839 case MONO_TYPE_CLASS:
2840 case MONO_TYPE_ARRAY:
2841 case MONO_TYPE_SZARRAY:
2842 case MONO_TYPE_OBJECT:
2846 *(gpointer*)ret = (gpointer)res;
2852 case MONO_TYPE_BOOLEAN:
2853 *(guint8*)ret = res;
2856 *(gint16*)ret = res;
2859 case MONO_TYPE_CHAR:
2860 *(guint16*)ret = res;
2863 *(gint32*)ret = res;
2866 *(guint32*)ret = res;
2870 /* This handles endianness as well */
2871 ((gint32*)ret) [0] = res;
2872 ((gint32*)ret) [1] = res2;
2874 case MONO_TYPE_GENERICINST:
2875 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2876 *(gpointer*)ret = (gpointer)res;
2881 case MONO_TYPE_VALUETYPE:
2882 g_assert (ainfo->cinfo->vtype_retaddr);
2887 *(float*)ret = *(float*)&res;
2889 case MONO_TYPE_R8: {
2896 *(double*)ret = *(double*)®s;
2900 g_assert_not_reached ();
2907 * Allow tracing to work with this interface (with an optional argument)
2911 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2915 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2916 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2917 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2918 code = emit_call_reg (code, ARMREG_R2);
2932 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2935 int save_mode = SAVE_NONE;
2937 MonoMethod *method = cfg->method;
2938 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2939 int rtype = ret_type->type;
2940 int save_offset = cfg->param_area;
2944 offset = code - cfg->native_code;
2945 /* we need about 16 instructions */
2946 if (offset > (cfg->code_size - 16 * 4)) {
2947 cfg->code_size *= 2;
2948 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2949 code = cfg->native_code + offset;
2952 case MONO_TYPE_VOID:
2953 /* special case string .ctor icall */
2954 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2955 save_mode = SAVE_ONE;
2957 save_mode = SAVE_NONE;
2961 save_mode = SAVE_TWO;
2965 save_mode = SAVE_ONE_FP;
2967 save_mode = SAVE_ONE;
2971 save_mode = SAVE_TWO_FP;
2973 save_mode = SAVE_TWO;
2975 case MONO_TYPE_GENERICINST:
2976 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2977 save_mode = SAVE_ONE;
2981 case MONO_TYPE_VALUETYPE:
2982 save_mode = SAVE_STRUCT;
2985 save_mode = SAVE_ONE;
2989 switch (save_mode) {
2991 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2992 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2993 if (enable_arguments) {
2994 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2995 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2999 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3000 if (enable_arguments) {
3001 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3005 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3006 if (enable_arguments) {
3007 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3011 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3012 if (enable_arguments) {
3013 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3017 if (enable_arguments) {
3018 /* FIXME: get the actual address */
3019 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3027 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3028 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3029 code = emit_call_reg (code, ARMREG_IP);
3031 switch (save_mode) {
3033 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3034 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3037 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3040 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3043 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3054 * The immediate field for cond branches is big enough for all reasonable methods
3056 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3057 if (0 && ins->inst_true_bb->native_offset) { \
3058 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3060 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3061 ARM_B_COND (code, (condcode), 0); \
3064 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3066 /* emit an exception if condition is fail
3068 * We assign the extra code used to throw the implicit exceptions
3069 * to cfg->bb_exit as far as the big branch handling is concerned
3071 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3073 mono_add_patch_info (cfg, code - cfg->native_code, \
3074 MONO_PATCH_INFO_EXC, exc_name); \
3075 ARM_BL_COND (code, (condcode), 0); \
3078 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3081 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3086 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3088 MonoInst *ins, *n, *last_ins = NULL;
3090 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3091 switch (ins->opcode) {
3094 /* Already done by an arch-independent pass */
3096 case OP_LOAD_MEMBASE:
3097 case OP_LOADI4_MEMBASE:
3099 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3100 * OP_LOAD_MEMBASE offset(basereg), reg
3102 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3103 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3104 ins->inst_basereg == last_ins->inst_destbasereg &&
3105 ins->inst_offset == last_ins->inst_offset) {
3106 if (ins->dreg == last_ins->sreg1) {
3107 MONO_DELETE_INS (bb, ins);
3110 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3111 ins->opcode = OP_MOVE;
3112 ins->sreg1 = last_ins->sreg1;
3116 * Note: reg1 must be different from the basereg in the second load
3117 * OP_LOAD_MEMBASE offset(basereg), reg1
3118 * OP_LOAD_MEMBASE offset(basereg), reg2
3120 * OP_LOAD_MEMBASE offset(basereg), reg1
3121 * OP_MOVE reg1, reg2
3123 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3124 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3125 ins->inst_basereg != last_ins->dreg &&
3126 ins->inst_basereg == last_ins->inst_basereg &&
3127 ins->inst_offset == last_ins->inst_offset) {
3129 if (ins->dreg == last_ins->dreg) {
3130 MONO_DELETE_INS (bb, ins);
3133 ins->opcode = OP_MOVE;
3134 ins->sreg1 = last_ins->dreg;
3137 //g_assert_not_reached ();
3141 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3142 * OP_LOAD_MEMBASE offset(basereg), reg
3144 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3145 * OP_ICONST reg, imm
3147 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3148 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3149 ins->inst_basereg == last_ins->inst_destbasereg &&
3150 ins->inst_offset == last_ins->inst_offset) {
3151 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3152 ins->opcode = OP_ICONST;
3153 ins->inst_c0 = last_ins->inst_imm;
3154 g_assert_not_reached (); // check this rule
3158 case OP_LOADU1_MEMBASE:
3159 case OP_LOADI1_MEMBASE:
3160 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3161 ins->inst_basereg == last_ins->inst_destbasereg &&
3162 ins->inst_offset == last_ins->inst_offset) {
3163 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3164 ins->sreg1 = last_ins->sreg1;
3167 case OP_LOADU2_MEMBASE:
3168 case OP_LOADI2_MEMBASE:
3169 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3170 ins->inst_basereg == last_ins->inst_destbasereg &&
3171 ins->inst_offset == last_ins->inst_offset) {
3172 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3173 ins->sreg1 = last_ins->sreg1;
3177 ins->opcode = OP_MOVE;
3181 if (ins->dreg == ins->sreg1) {
3182 MONO_DELETE_INS (bb, ins);
3186 * OP_MOVE sreg, dreg
3187 * OP_MOVE dreg, sreg
3189 if (last_ins && last_ins->opcode == OP_MOVE &&
3190 ins->sreg1 == last_ins->dreg &&
3191 ins->dreg == last_ins->sreg1) {
3192 MONO_DELETE_INS (bb, ins);
3200 bb->last_ins = last_ins;
3204 * the branch_cc_table should maintain the order of these
3218 branch_cc_table [] = {
3232 #define ADD_NEW_INS(cfg,dest,op) do { \
3233 MONO_INST_NEW ((cfg), (dest), (op)); \
3234 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3238 map_to_reg_reg_op (int op)
3247 case OP_COMPARE_IMM:
3249 case OP_ICOMPARE_IMM:
3263 case OP_LOAD_MEMBASE:
3264 return OP_LOAD_MEMINDEX;
3265 case OP_LOADI4_MEMBASE:
3266 return OP_LOADI4_MEMINDEX;
3267 case OP_LOADU4_MEMBASE:
3268 return OP_LOADU4_MEMINDEX;
3269 case OP_LOADU1_MEMBASE:
3270 return OP_LOADU1_MEMINDEX;
3271 case OP_LOADI2_MEMBASE:
3272 return OP_LOADI2_MEMINDEX;
3273 case OP_LOADU2_MEMBASE:
3274 return OP_LOADU2_MEMINDEX;
3275 case OP_LOADI1_MEMBASE:
3276 return OP_LOADI1_MEMINDEX;
3277 case OP_STOREI1_MEMBASE_REG:
3278 return OP_STOREI1_MEMINDEX;
3279 case OP_STOREI2_MEMBASE_REG:
3280 return OP_STOREI2_MEMINDEX;
3281 case OP_STOREI4_MEMBASE_REG:
3282 return OP_STOREI4_MEMINDEX;
3283 case OP_STORE_MEMBASE_REG:
3284 return OP_STORE_MEMINDEX;
3285 case OP_STORER4_MEMBASE_REG:
3286 return OP_STORER4_MEMINDEX;
3287 case OP_STORER8_MEMBASE_REG:
3288 return OP_STORER8_MEMINDEX;
3289 case OP_STORE_MEMBASE_IMM:
3290 return OP_STORE_MEMBASE_REG;
3291 case OP_STOREI1_MEMBASE_IMM:
3292 return OP_STOREI1_MEMBASE_REG;
3293 case OP_STOREI2_MEMBASE_IMM:
3294 return OP_STOREI2_MEMBASE_REG;
3295 case OP_STOREI4_MEMBASE_IMM:
3296 return OP_STOREI4_MEMBASE_REG;
3298 g_assert_not_reached ();
3302 * Remove from the instruction list the instructions that can't be
3303 * represented with very simple instructions with no register
3307 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3309 MonoInst *ins, *temp, *last_ins = NULL;
3310 int rot_amount, imm8, low_imm;
3312 MONO_BB_FOR_EACH_INS (bb, ins) {
3314 switch (ins->opcode) {
3318 case OP_COMPARE_IMM:
3319 case OP_ICOMPARE_IMM:
3333 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3334 ADD_NEW_INS (cfg, temp, OP_ICONST);
3335 temp->inst_c0 = ins->inst_imm;
3336 temp->dreg = mono_alloc_ireg (cfg);
3337 ins->sreg2 = temp->dreg;
3338 ins->opcode = mono_op_imm_to_op (ins->opcode);
3340 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3346 if (ins->inst_imm == 1) {
3347 ins->opcode = OP_MOVE;
3350 if (ins->inst_imm == 0) {
3351 ins->opcode = OP_ICONST;
3355 imm8 = mono_is_power_of_two (ins->inst_imm);
3357 ins->opcode = OP_SHL_IMM;
3358 ins->inst_imm = imm8;
3361 ADD_NEW_INS (cfg, temp, OP_ICONST);
3362 temp->inst_c0 = ins->inst_imm;
3363 temp->dreg = mono_alloc_ireg (cfg);
3364 ins->sreg2 = temp->dreg;
3365 ins->opcode = OP_IMUL;
3371 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3372 /* ARM sets the C flag to 1 if there was _no_ overflow */
3373 ins->next->opcode = OP_COND_EXC_NC;
3376 case OP_IDIV_UN_IMM:
3378 case OP_IREM_UN_IMM:
3379 ADD_NEW_INS (cfg, temp, OP_ICONST);
3380 temp->inst_c0 = ins->inst_imm;
3381 temp->dreg = mono_alloc_ireg (cfg);
3382 ins->sreg2 = temp->dreg;
3383 ins->opcode = mono_op_imm_to_op (ins->opcode);
3385 case OP_LOCALLOC_IMM:
3386 ADD_NEW_INS (cfg, temp, OP_ICONST);
3387 temp->inst_c0 = ins->inst_imm;
3388 temp->dreg = mono_alloc_ireg (cfg);
3389 ins->sreg1 = temp->dreg;
3390 ins->opcode = OP_LOCALLOC;
3392 case OP_LOAD_MEMBASE:
3393 case OP_LOADI4_MEMBASE:
3394 case OP_LOADU4_MEMBASE:
3395 case OP_LOADU1_MEMBASE:
3396 /* we can do two things: load the immed in a register
3397 * and use an indexed load, or see if the immed can be
3398 * represented as an ad_imm + a load with a smaller offset
3399 * that fits. We just do the first for now, optimize later.
3401 if (arm_is_imm12 (ins->inst_offset))
3403 ADD_NEW_INS (cfg, temp, OP_ICONST);
3404 temp->inst_c0 = ins->inst_offset;
3405 temp->dreg = mono_alloc_ireg (cfg);
3406 ins->sreg2 = temp->dreg;
3407 ins->opcode = map_to_reg_reg_op (ins->opcode);
3409 case OP_LOADI2_MEMBASE:
3410 case OP_LOADU2_MEMBASE:
3411 case OP_LOADI1_MEMBASE:
3412 if (arm_is_imm8 (ins->inst_offset))
3414 ADD_NEW_INS (cfg, temp, OP_ICONST);
3415 temp->inst_c0 = ins->inst_offset;
3416 temp->dreg = mono_alloc_ireg (cfg);
3417 ins->sreg2 = temp->dreg;
3418 ins->opcode = map_to_reg_reg_op (ins->opcode);
3420 case OP_LOADR4_MEMBASE:
3421 case OP_LOADR8_MEMBASE:
3422 if (arm_is_fpimm8 (ins->inst_offset))
3424 low_imm = ins->inst_offset & 0x1ff;
3425 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3426 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3427 temp->inst_imm = ins->inst_offset & ~0x1ff;
3428 temp->sreg1 = ins->inst_basereg;
3429 temp->dreg = mono_alloc_ireg (cfg);
3430 ins->inst_basereg = temp->dreg;
3431 ins->inst_offset = low_imm;
3435 ADD_NEW_INS (cfg, temp, OP_ICONST);
3436 temp->inst_c0 = ins->inst_offset;
3437 temp->dreg = mono_alloc_ireg (cfg);
3439 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3440 add_ins->sreg1 = ins->inst_basereg;
3441 add_ins->sreg2 = temp->dreg;
3442 add_ins->dreg = mono_alloc_ireg (cfg);
3444 ins->inst_basereg = add_ins->dreg;
3445 ins->inst_offset = 0;
3448 case OP_STORE_MEMBASE_REG:
3449 case OP_STOREI4_MEMBASE_REG:
3450 case OP_STOREI1_MEMBASE_REG:
3451 if (arm_is_imm12 (ins->inst_offset))
3453 ADD_NEW_INS (cfg, temp, OP_ICONST);
3454 temp->inst_c0 = ins->inst_offset;
3455 temp->dreg = mono_alloc_ireg (cfg);
3456 ins->sreg2 = temp->dreg;
3457 ins->opcode = map_to_reg_reg_op (ins->opcode);
3459 case OP_STOREI2_MEMBASE_REG:
3460 if (arm_is_imm8 (ins->inst_offset))
3462 ADD_NEW_INS (cfg, temp, OP_ICONST);
3463 temp->inst_c0 = ins->inst_offset;
3464 temp->dreg = mono_alloc_ireg (cfg);
3465 ins->sreg2 = temp->dreg;
3466 ins->opcode = map_to_reg_reg_op (ins->opcode);
3468 case OP_STORER4_MEMBASE_REG:
3469 case OP_STORER8_MEMBASE_REG:
3470 if (arm_is_fpimm8 (ins->inst_offset))
3472 low_imm = ins->inst_offset & 0x1ff;
3473 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3474 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3475 temp->inst_imm = ins->inst_offset & ~0x1ff;
3476 temp->sreg1 = ins->inst_destbasereg;
3477 temp->dreg = mono_alloc_ireg (cfg);
3478 ins->inst_destbasereg = temp->dreg;
3479 ins->inst_offset = low_imm;
3483 ADD_NEW_INS (cfg, temp, OP_ICONST);
3484 temp->inst_c0 = ins->inst_offset;
3485 temp->dreg = mono_alloc_ireg (cfg);
3487 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3488 add_ins->sreg1 = ins->inst_destbasereg;
3489 add_ins->sreg2 = temp->dreg;
3490 add_ins->dreg = mono_alloc_ireg (cfg);
3492 ins->inst_destbasereg = add_ins->dreg;
3493 ins->inst_offset = 0;
3496 case OP_STORE_MEMBASE_IMM:
3497 case OP_STOREI1_MEMBASE_IMM:
3498 case OP_STOREI2_MEMBASE_IMM:
3499 case OP_STOREI4_MEMBASE_IMM:
3500 ADD_NEW_INS (cfg, temp, OP_ICONST);
3501 temp->inst_c0 = ins->inst_imm;
3502 temp->dreg = mono_alloc_ireg (cfg);
3503 ins->sreg1 = temp->dreg;
3504 ins->opcode = map_to_reg_reg_op (ins->opcode);
3506 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3508 gboolean swap = FALSE;
3512 /* Optimized away */
3517 /* Some fp compares require swapped operands */
3518 switch (ins->next->opcode) {
3520 ins->next->opcode = OP_FBLT;
3524 ins->next->opcode = OP_FBLT_UN;
3528 ins->next->opcode = OP_FBGE;
3532 ins->next->opcode = OP_FBGE_UN;
3540 ins->sreg1 = ins->sreg2;
3549 bb->last_ins = last_ins;
3550 bb->max_vreg = cfg->next_vreg;
3554 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3558 if (long_ins->opcode == OP_LNEG) {
3560 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3561 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3567 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3569 /* sreg is a float, dreg is an integer reg */
3571 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3573 ARM_TOSIZD (code, vfp_scratch1, sreg);
3575 ARM_TOUIZD (code, vfp_scratch1, sreg);
3576 ARM_FMRS (code, dreg, vfp_scratch1);
3577 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3581 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3582 else if (size == 2) {
3583 ARM_SHL_IMM (code, dreg, dreg, 16);
3584 ARM_SHR_IMM (code, dreg, dreg, 16);
3588 ARM_SHL_IMM (code, dreg, dreg, 24);
3589 ARM_SAR_IMM (code, dreg, dreg, 24);
3590 } else if (size == 2) {
3591 ARM_SHL_IMM (code, dreg, dreg, 16);
3592 ARM_SAR_IMM (code, dreg, dreg, 16);
3598 #endif /* #ifndef DISABLE_JIT */
3602 const guchar *target;
3607 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3610 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3611 PatchData *pdata = (PatchData*)user_data;
3612 guchar *code = data;
3613 guint32 *thunks = data;
3614 guint32 *endthunks = (guint32*)(code + bsize);
3616 int difflow, diffhigh;
3618 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3619 difflow = (char*)pdata->code - (char*)thunks;
3620 diffhigh = (char*)pdata->code - (char*)endthunks;
3621 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3625 * The thunk is composed of 3 words:
3626 * load constant from thunks [2] into ARM_IP
3629 * Note that the LR register is already setup
3631 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3632 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3633 while (thunks < endthunks) {
3634 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3635 if (thunks [2] == (guint32)pdata->target) {
3636 arm_patch (pdata->code, (guchar*)thunks);
3637 mono_arch_flush_icache (pdata->code, 4);
3640 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3641 /* found a free slot instead: emit thunk */
3642 /* ARMREG_IP is fine to use since this can't be an IMT call
3645 code = (guchar*)thunks;
3646 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3647 if (thumb_supported)
3648 ARM_BX (code, ARMREG_IP);
3650 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3651 thunks [2] = (guint32)pdata->target;
3652 mono_arch_flush_icache ((guchar*)thunks, 12);
3654 arm_patch (pdata->code, (guchar*)thunks);
3655 mono_arch_flush_icache (pdata->code, 4);
3659 /* skip 12 bytes, the size of the thunk */
3663 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3669 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3674 domain = mono_domain_get ();
3677 pdata.target = target;
3678 pdata.absolute = absolute;
3682 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3685 if (pdata.found != 1) {
3686 mono_domain_lock (domain);
3687 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3690 /* this uses the first available slot */
3692 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3694 mono_domain_unlock (domain);
3697 if (pdata.found != 1) {
3699 GHashTableIter iter;
3700 MonoJitDynamicMethodInfo *ji;
3703 * This might be a dynamic method, search its code manager. We can only
3704 * use the dynamic method containing CODE, since the others might be freed later.
3708 mono_domain_lock (domain);
3709 hash = domain_jit_info (domain)->dynamic_code_hash;
3711 /* FIXME: Speed this up */
3712 g_hash_table_iter_init (&iter, hash);
3713 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3714 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3715 if (pdata.found == 1)
3719 mono_domain_unlock (domain);
3721 if (pdata.found != 1)
3722 g_print ("thunk failed for %p from %p\n", target, code);
3723 g_assert (pdata.found == 1);
3727 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3729 guint32 *code32 = (void*)code;
3730 guint32 ins = *code32;
3731 guint32 prim = (ins >> 25) & 7;
3732 guint32 tval = GPOINTER_TO_UINT (target);
3734 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3735 if (prim == 5) { /* 101b */
3736 /* the diff starts 8 bytes from the branch opcode */
3737 gint diff = target - code - 8;
3739 gint tmask = 0xffffffff;
3740 if (tval & 1) { /* entering thumb mode */
3741 diff = target - 1 - code - 8;
3742 g_assert (thumb_supported);
3743 tbits = 0xf << 28; /* bl->blx bit pattern */
3744 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3745 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3749 tmask = ~(1 << 24); /* clear the link bit */
3750 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3755 if (diff <= 33554431) {
3757 ins = (ins & 0xff000000) | diff;
3759 *code32 = ins | tbits;
3763 /* diff between 0 and -33554432 */
3764 if (diff >= -33554432) {
3766 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3768 *code32 = ins | tbits;
3773 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3777 #ifdef USE_JUMP_TABLES
3779 gpointer *jte = mono_jumptable_get_entry (code);
3781 jte [0] = (gpointer) target;
3785 * The alternative call sequences looks like this:
3787 * ldr ip, [pc] // loads the address constant
3788 * b 1f // jumps around the constant
3789 * address constant embedded in the code
3794 * There are two cases for patching:
3795 * a) at the end of method emission: in this case code points to the start
3796 * of the call sequence
3797 * b) during runtime patching of the call site: in this case code points
3798 * to the mov pc, ip instruction
3800 * We have to handle also the thunk jump code sequence:
3804 * address constant // execution never reaches here
3806 if ((ins & 0x0ffffff0) == 0x12fff10) {
3807 /* Branch and exchange: the address is constructed in a reg
3808 * We can patch BX when the code sequence is the following:
3809 * ldr ip, [pc, #0] ; 0x8
3816 guint8 *emit = (guint8*)ccode;
3817 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3819 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3820 ARM_BX (emit, ARMREG_IP);
3822 /*patching from magic trampoline*/
3823 if (ins == ccode [3]) {
3824 g_assert (code32 [-4] == ccode [0]);
3825 g_assert (code32 [-3] == ccode [1]);
3826 g_assert (code32 [-1] == ccode [2]);
3827 code32 [-2] = (guint32)target;
3830 /*patching from JIT*/
3831 if (ins == ccode [0]) {
3832 g_assert (code32 [1] == ccode [1]);
3833 g_assert (code32 [3] == ccode [2]);
3834 g_assert (code32 [4] == ccode [3]);
3835 code32 [2] = (guint32)target;
3838 g_assert_not_reached ();
3839 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3847 guint8 *emit = (guint8*)ccode;
3848 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3850 ARM_BLX_REG (emit, ARMREG_IP);
3852 g_assert (code32 [-3] == ccode [0]);
3853 g_assert (code32 [-2] == ccode [1]);
3854 g_assert (code32 [0] == ccode [2]);
3856 code32 [-1] = (guint32)target;
3859 guint32 *tmp = ccode;
3860 guint8 *emit = (guint8*)tmp;
3861 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3862 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3863 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3864 ARM_BX (emit, ARMREG_IP);
3865 if (ins == ccode [2]) {
3866 g_assert_not_reached (); // should be -2 ...
3867 code32 [-1] = (guint32)target;
3870 if (ins == ccode [0]) {
3871 /* handles both thunk jump code and the far call sequence */
3872 code32 [2] = (guint32)target;
3875 g_assert_not_reached ();
3877 // g_print ("patched with 0x%08x\n", ins);
3882 arm_patch (guchar *code, const guchar *target)
3884 arm_patch_general (NULL, code, target, NULL);
3888 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3889 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3890 * to be used with the emit macros.
3891 * Return -1 otherwise.
3894 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3897 for (i = 0; i < 31; i+= 2) {
3898 res = (val << (32 - i)) | (val >> i);
3901 *rot_amount = i? 32 - i: 0;
3908 * Emits in code a sequence of instructions that load the value 'val'
3909 * into the dreg register. Uses at most 4 instructions.
3912 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3914 int imm8, rot_amount;
3916 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3917 /* skip the constant pool */
3923 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3924 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3925 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3926 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3929 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3931 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3935 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3937 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3939 if (val & 0xFF0000) {
3940 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3942 if (val & 0xFF000000) {
3943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3945 } else if (val & 0xFF00) {
3946 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3947 if (val & 0xFF0000) {
3948 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3950 if (val & 0xFF000000) {
3951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3953 } else if (val & 0xFF0000) {
3954 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3955 if (val & 0xFF000000) {
3956 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3959 //g_assert_not_reached ();
3965 mono_arm_thumb_supported (void)
3967 return thumb_supported;
3973 * emit_load_volatile_arguments:
3975 * Load volatile arguments from the stack to the original input registers.
3976 * Required before a tail call.
3979 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3981 MonoMethod *method = cfg->method;
3982 MonoMethodSignature *sig;
3987 /* FIXME: Generate intermediate code instead */
3989 sig = mono_method_signature (method);
3991 /* This is the opposite of the code in emit_prolog */
3995 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3997 if (cinfo->vtype_retaddr) {
3998 ArgInfo *ainfo = &cinfo->ret;
3999 inst = cfg->vret_addr;
4000 g_assert (arm_is_imm12 (inst->inst_offset));
4001 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4003 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4004 ArgInfo *ainfo = cinfo->args + i;
4005 inst = cfg->args [pos];
4007 if (cfg->verbose_level > 2)
4008 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4009 if (inst->opcode == OP_REGVAR) {
4010 if (ainfo->storage == RegTypeGeneral)
4011 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4012 else if (ainfo->storage == RegTypeFP) {
4013 g_assert_not_reached ();
4014 } else if (ainfo->storage == RegTypeBase) {
4018 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4019 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4021 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4022 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4026 g_assert_not_reached ();
4028 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4029 switch (ainfo->size) {
4036 g_assert (arm_is_imm12 (inst->inst_offset));
4037 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4038 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4039 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4042 if (arm_is_imm12 (inst->inst_offset)) {
4043 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4045 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4046 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4050 } else if (ainfo->storage == RegTypeBaseGen) {
4053 } else if (ainfo->storage == RegTypeBase) {
4055 } else if (ainfo->storage == RegTypeFP) {
4056 g_assert_not_reached ();
4057 } else if (ainfo->storage == RegTypeStructByVal) {
4058 int doffset = inst->inst_offset;
4062 if (mono_class_from_mono_type (inst->inst_vtype))
4063 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4064 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4065 if (arm_is_imm12 (doffset)) {
4066 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4068 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4069 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4071 soffset += sizeof (gpointer);
4072 doffset += sizeof (gpointer);
4077 } else if (ainfo->storage == RegTypeStructByAddr) {
4092 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4097 guint8 *code = cfg->native_code + cfg->code_len;
4098 MonoInst *last_ins = NULL;
4099 guint last_offset = 0;
4101 int imm8, rot_amount;
4103 /* we don't align basic blocks of loops on arm */
4105 if (cfg->verbose_level > 2)
4106 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4108 cpos = bb->max_offset;
4110 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4111 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4112 //g_assert (!mono_compile_aot);
4115 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4116 /* this is not thread save, but good enough */
4117 /* fixme: howto handle overflows? */
4118 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4121 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4122 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4123 (gpointer)"mono_break");
4124 code = emit_call_seq (cfg, code);
4127 MONO_BB_FOR_EACH_INS (bb, ins) {
4128 offset = code - cfg->native_code;
4130 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4132 if (offset > (cfg->code_size - max_len - 16)) {
4133 cfg->code_size *= 2;
4134 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4135 code = cfg->native_code + offset;
4137 // if (ins->cil_code)
4138 // g_print ("cil code\n");
4139 mono_debug_record_line_number (cfg, ins, offset);
4141 switch (ins->opcode) {
4142 case OP_MEMORY_BARRIER:
4144 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4145 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4149 #ifdef HAVE_AEABI_READ_TP
4150 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4151 (gpointer)"__aeabi_read_tp");
4152 code = emit_call_seq (cfg, code);
4154 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4156 g_assert_not_reached ();
4159 case OP_ATOMIC_EXCHANGE_I4:
4160 case OP_ATOMIC_CAS_I4:
4161 case OP_ATOMIC_ADD_I4: {
4165 g_assert (v7_supported);
4168 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4170 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4172 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4176 g_assert (cfg->arch.atomic_tmp_offset != -1);
4177 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4179 switch (ins->opcode) {
4180 case OP_ATOMIC_EXCHANGE_I4:
4182 ARM_DMB (code, ARM_DMB_SY);
4183 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4184 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4185 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4187 ARM_B_COND (code, ARMCOND_NE, 0);
4188 arm_patch (buf [1], buf [0]);
4190 case OP_ATOMIC_CAS_I4:
4191 ARM_DMB (code, ARM_DMB_SY);
4193 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4194 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4196 ARM_B_COND (code, ARMCOND_NE, 0);
4197 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4198 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4200 ARM_B_COND (code, ARMCOND_NE, 0);
4201 arm_patch (buf [2], buf [0]);
4202 arm_patch (buf [1], code);
4204 case OP_ATOMIC_ADD_I4:
4206 ARM_DMB (code, ARM_DMB_SY);
4207 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4208 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4209 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4210 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4212 ARM_B_COND (code, ARMCOND_NE, 0);
4213 arm_patch (buf [1], buf [0]);
4216 g_assert_not_reached ();
4219 ARM_DMB (code, ARM_DMB_SY);
4220 if (tmpreg != ins->dreg)
4221 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4222 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4225 case OP_ATOMIC_LOAD_I1:
4226 case OP_ATOMIC_LOAD_U1:
4227 case OP_ATOMIC_LOAD_I2:
4228 case OP_ATOMIC_LOAD_U2:
4229 case OP_ATOMIC_LOAD_I4:
4230 case OP_ATOMIC_LOAD_U4:
4231 case OP_ATOMIC_LOAD_R4:
4232 case OP_ATOMIC_LOAD_R8: {
4233 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4234 ARM_DMB (code, ARM_DMB_SY);
4236 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4238 switch (ins->opcode) {
4239 case OP_ATOMIC_LOAD_I1:
4240 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4242 case OP_ATOMIC_LOAD_U1:
4243 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4245 case OP_ATOMIC_LOAD_I2:
4246 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4248 case OP_ATOMIC_LOAD_U2:
4249 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4251 case OP_ATOMIC_LOAD_I4:
4252 case OP_ATOMIC_LOAD_U4:
4253 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4255 case OP_ATOMIC_LOAD_R4:
4256 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4257 ARM_FLDS (code, vfp_scratch1, ARMREG_LR, 0);
4258 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4259 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4261 case OP_ATOMIC_LOAD_R8:
4262 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_basereg, ARMREG_LR);
4263 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4267 ARM_DMB (code, ARM_DMB_SY);
4270 case OP_ATOMIC_STORE_I1:
4271 case OP_ATOMIC_STORE_U1:
4272 case OP_ATOMIC_STORE_I2:
4273 case OP_ATOMIC_STORE_U2:
4274 case OP_ATOMIC_STORE_I4:
4275 case OP_ATOMIC_STORE_U4:
4276 case OP_ATOMIC_STORE_R4:
4277 case OP_ATOMIC_STORE_R8: {
4278 ARM_DMB (code, ARM_DMB_SY);
4280 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4282 switch (ins->opcode) {
4283 case OP_ATOMIC_STORE_I1:
4284 case OP_ATOMIC_STORE_U1:
4285 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4287 case OP_ATOMIC_STORE_I2:
4288 case OP_ATOMIC_STORE_U2:
4289 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4291 case OP_ATOMIC_STORE_I4:
4292 case OP_ATOMIC_STORE_U4:
4293 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4295 case OP_ATOMIC_STORE_R4:
4296 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
4297 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4298 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
4299 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
4301 case OP_ATOMIC_STORE_R8:
4302 ARM_ADD_REG_REG (code, ARMREG_LR, ins->inst_destbasereg, ARMREG_LR);
4303 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4307 if (ins->backend.memory_barrier_kind == MONO_MEMORY_BARRIER_SEQ)
4308 ARM_DMB (code, ARM_DMB_SY);
4312 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4313 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4316 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4317 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4319 case OP_STOREI1_MEMBASE_IMM:
4320 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4321 g_assert (arm_is_imm12 (ins->inst_offset));
4322 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4324 case OP_STOREI2_MEMBASE_IMM:
4325 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4326 g_assert (arm_is_imm8 (ins->inst_offset));
4327 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4329 case OP_STORE_MEMBASE_IMM:
4330 case OP_STOREI4_MEMBASE_IMM:
4331 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4332 g_assert (arm_is_imm12 (ins->inst_offset));
4333 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4335 case OP_STOREI1_MEMBASE_REG:
4336 g_assert (arm_is_imm12 (ins->inst_offset));
4337 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4339 case OP_STOREI2_MEMBASE_REG:
4340 g_assert (arm_is_imm8 (ins->inst_offset));
4341 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4343 case OP_STORE_MEMBASE_REG:
4344 case OP_STOREI4_MEMBASE_REG:
4345 /* this case is special, since it happens for spill code after lowering has been called */
4346 if (arm_is_imm12 (ins->inst_offset)) {
4347 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4349 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4350 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4353 case OP_STOREI1_MEMINDEX:
4354 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4356 case OP_STOREI2_MEMINDEX:
4357 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4359 case OP_STORE_MEMINDEX:
4360 case OP_STOREI4_MEMINDEX:
4361 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4364 g_assert_not_reached ();
4366 case OP_LOAD_MEMINDEX:
4367 case OP_LOADI4_MEMINDEX:
4368 case OP_LOADU4_MEMINDEX:
4369 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4371 case OP_LOADI1_MEMINDEX:
4372 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4374 case OP_LOADU1_MEMINDEX:
4375 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4377 case OP_LOADI2_MEMINDEX:
4378 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4380 case OP_LOADU2_MEMINDEX:
4381 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4383 case OP_LOAD_MEMBASE:
4384 case OP_LOADI4_MEMBASE:
4385 case OP_LOADU4_MEMBASE:
4386 /* this case is special, since it happens for spill code after lowering has been called */
4387 if (arm_is_imm12 (ins->inst_offset)) {
4388 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4390 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4391 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4394 case OP_LOADI1_MEMBASE:
4395 g_assert (arm_is_imm8 (ins->inst_offset));
4396 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4398 case OP_LOADU1_MEMBASE:
4399 g_assert (arm_is_imm12 (ins->inst_offset));
4400 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4402 case OP_LOADU2_MEMBASE:
4403 g_assert (arm_is_imm8 (ins->inst_offset));
4404 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4406 case OP_LOADI2_MEMBASE:
4407 g_assert (arm_is_imm8 (ins->inst_offset));
4408 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4410 case OP_ICONV_TO_I1:
4411 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4412 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4414 case OP_ICONV_TO_I2:
4415 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4416 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4418 case OP_ICONV_TO_U1:
4419 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4421 case OP_ICONV_TO_U2:
4422 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4423 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4427 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4429 case OP_COMPARE_IMM:
4430 case OP_ICOMPARE_IMM:
4431 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4432 g_assert (imm8 >= 0);
4433 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4437 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4438 * So instead of emitting a trap, we emit a call a C function and place a
4441 //*(int*)code = 0xef9f0001;
4444 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4445 (gpointer)"mono_break");
4446 code = emit_call_seq (cfg, code);
4448 case OP_RELAXED_NOP:
4453 case OP_DUMMY_STORE:
4454 case OP_DUMMY_ICONST:
4455 case OP_DUMMY_R8CONST:
4456 case OP_NOT_REACHED:
4459 case OP_IL_SEQ_POINT:
4460 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4462 case OP_SEQ_POINT: {
4464 MonoInst *info_var = cfg->arch.seq_point_info_var;
4465 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4466 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4467 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4468 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4470 int dreg = ARMREG_LR;
4472 if (cfg->soft_breakpoints) {
4473 g_assert (!cfg->compile_aot);
4477 * For AOT, we use one got slot per method, which will point to a
4478 * SeqPointInfo structure, containing all the information required
4479 * by the code below.
4481 if (cfg->compile_aot) {
4482 g_assert (info_var);
4483 g_assert (info_var->opcode == OP_REGOFFSET);
4484 g_assert (arm_is_imm12 (info_var->inst_offset));
4487 if (!cfg->soft_breakpoints) {
4489 * Read from the single stepping trigger page. This will cause a
4490 * SIGSEGV when single stepping is enabled.
4491 * We do this _before_ the breakpoint, so single stepping after
4492 * a breakpoint is hit will step to the next IL offset.
4494 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4497 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4498 if (cfg->soft_breakpoints) {
4499 /* Load the address of the sequence point trigger variable. */
4502 g_assert (var->opcode == OP_REGOFFSET);
4503 g_assert (arm_is_imm12 (var->inst_offset));
4504 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4506 /* Read the value and check whether it is non-zero. */
4507 ARM_LDR_IMM (code, dreg, dreg, 0);
4508 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4510 /* Load the address of the sequence point method. */
4511 var = ss_method_var;
4513 g_assert (var->opcode == OP_REGOFFSET);
4514 g_assert (arm_is_imm12 (var->inst_offset));
4515 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4517 /* Call it conditionally. */
4518 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4520 if (cfg->compile_aot) {
4521 /* Load the trigger page addr from the variable initialized in the prolog */
4522 var = ss_trigger_page_var;
4524 g_assert (var->opcode == OP_REGOFFSET);
4525 g_assert (arm_is_imm12 (var->inst_offset));
4526 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4528 #ifdef USE_JUMP_TABLES
4529 gpointer *jte = mono_jumptable_add_entry ();
4530 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4531 jte [0] = ss_trigger_page;
4533 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4535 *(int*)code = (int)ss_trigger_page;
4539 ARM_LDR_IMM (code, dreg, dreg, 0);
4543 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4545 if (cfg->soft_breakpoints) {
4546 /* Load the address of the breakpoint method into ip. */
4547 var = bp_method_var;
4549 g_assert (var->opcode == OP_REGOFFSET);
4550 g_assert (arm_is_imm12 (var->inst_offset));
4551 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4554 * A placeholder for a possible breakpoint inserted by
4555 * mono_arch_set_breakpoint ().
4558 } else if (cfg->compile_aot) {
4559 guint32 offset = code - cfg->native_code;
4562 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4563 /* Add the offset */
4564 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4565 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4566 if (arm_is_imm12 ((int)val)) {
4567 ARM_LDR_IMM (code, dreg, dreg, val);
4569 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4571 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4573 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4574 g_assert (!(val & 0xFF000000));
4576 ARM_LDR_IMM (code, dreg, dreg, 0);
4578 /* What is faster, a branch or a load ? */
4579 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4580 /* The breakpoint instruction */
4581 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4584 * A placeholder for a possible breakpoint inserted by
4585 * mono_arch_set_breakpoint ().
4587 for (i = 0; i < 4; ++i)
4594 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4597 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4601 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4604 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4605 g_assert (imm8 >= 0);
4606 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4610 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4611 g_assert (imm8 >= 0);
4612 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4616 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4617 g_assert (imm8 >= 0);
4618 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4621 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4622 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4624 case OP_IADD_OVF_UN:
4625 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4626 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4629 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4630 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4632 case OP_ISUB_OVF_UN:
4633 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4634 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4636 case OP_ADD_OVF_CARRY:
4637 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4638 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4640 case OP_ADD_OVF_UN_CARRY:
4641 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4642 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4644 case OP_SUB_OVF_CARRY:
4645 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4646 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4648 case OP_SUB_OVF_UN_CARRY:
4649 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4650 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4654 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4657 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4658 g_assert (imm8 >= 0);
4659 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4662 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4666 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4670 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4671 g_assert (imm8 >= 0);
4672 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4676 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4677 g_assert (imm8 >= 0);
4678 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4680 case OP_ARM_RSBS_IMM:
4681 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4682 g_assert (imm8 >= 0);
4683 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4685 case OP_ARM_RSC_IMM:
4686 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4687 g_assert (imm8 >= 0);
4688 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4691 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4695 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4696 g_assert (imm8 >= 0);
4697 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4700 g_assert (v7s_supported);
4701 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4704 g_assert (v7s_supported);
4705 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4708 g_assert (v7s_supported);
4709 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4710 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4713 g_assert (v7s_supported);
4714 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4715 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4719 g_assert_not_reached ();
4721 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4725 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4726 g_assert (imm8 >= 0);
4727 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4730 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4734 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4735 g_assert (imm8 >= 0);
4736 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4739 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4744 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4745 else if (ins->dreg != ins->sreg1)
4746 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4749 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4754 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4755 else if (ins->dreg != ins->sreg1)
4756 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4759 case OP_ISHR_UN_IMM:
4761 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4762 else if (ins->dreg != ins->sreg1)
4763 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4766 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4769 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4772 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4775 if (ins->dreg == ins->sreg2)
4776 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4778 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4781 g_assert_not_reached ();
4784 /* FIXME: handle ovf/ sreg2 != dreg */
4785 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4786 /* FIXME: MUL doesn't set the C/O flags on ARM */
4788 case OP_IMUL_OVF_UN:
4789 /* FIXME: handle ovf/ sreg2 != dreg */
4790 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4791 /* FIXME: MUL doesn't set the C/O flags on ARM */
4794 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4797 /* Load the GOT offset */
4798 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4799 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4801 *(gpointer*)code = NULL;
4803 /* Load the value from the GOT */
4804 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4806 case OP_OBJC_GET_SELECTOR:
4807 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4808 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4810 *(gpointer*)code = NULL;
4812 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4814 case OP_ICONV_TO_I4:
4815 case OP_ICONV_TO_U4:
4817 if (ins->dreg != ins->sreg1)
4818 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4821 int saved = ins->sreg2;
4822 if (ins->sreg2 == ARM_LSW_REG) {
4823 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4826 if (ins->sreg1 != ARM_LSW_REG)
4827 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4828 if (saved != ARM_MSW_REG)
4829 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4834 ARM_CPYD (code, ins->dreg, ins->sreg1);
4836 case OP_FCONV_TO_R4:
4838 ARM_CVTD (code, ins->dreg, ins->sreg1);
4839 ARM_CVTS (code, ins->dreg, ins->dreg);
4844 * Keep in sync with mono_arch_emit_epilog
4846 g_assert (!cfg->method->save_lmf);
4848 code = emit_load_volatile_arguments (cfg, code);
4850 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4852 if (cfg->used_int_regs)
4853 ARM_POP (code, cfg->used_int_regs);
4854 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4856 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4858 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4859 if (cfg->compile_aot) {
4860 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4862 *(gpointer*)code = NULL;
4864 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4866 code = mono_arm_patchable_b (code, ARMCOND_AL);
4870 MonoCallInst *call = (MonoCallInst*)ins;
4873 * The stack looks like the following:
4874 * <caller argument area>
4877 * <callee argument area>
4878 * Need to copy the arguments from the callee argument area to
4879 * the caller argument area, and pop the frame.
4881 if (call->stack_usage) {
4882 int i, prev_sp_offset = 0;
4884 /* Compute size of saved registers restored below */
4886 prev_sp_offset = 2 * 4;
4888 prev_sp_offset = 1 * 4;
4889 for (i = 0; i < 16; ++i) {
4890 if (cfg->used_int_regs & (1 << i))
4891 prev_sp_offset += 4;
4894 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4896 /* Copy arguments on the stack to our argument area */
4897 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4898 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4899 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4904 * Keep in sync with mono_arch_emit_epilog
4906 g_assert (!cfg->method->save_lmf);
4908 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4910 if (cfg->used_int_regs)
4911 ARM_POP (code, cfg->used_int_regs);
4912 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4914 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4917 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4918 if (cfg->compile_aot) {
4919 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4921 *(gpointer*)code = NULL;
4923 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4925 code = mono_arm_patchable_b (code, ARMCOND_AL);
4930 /* ensure ins->sreg1 is not NULL */
4931 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4934 g_assert (cfg->sig_cookie < 128);
4935 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4936 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4945 call = (MonoCallInst*)ins;
4948 code = emit_float_args (cfg, call, code, &max_len, &offset);
4950 if (ins->flags & MONO_INST_HAS_METHOD)
4951 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4953 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4954 code = emit_call_seq (cfg, code);
4955 ins->flags |= MONO_INST_GC_CALLSITE;
4956 ins->backend.pc_offset = code - cfg->native_code;
4957 code = emit_move_return_value (cfg, ins, code);
4963 case OP_VOIDCALL_REG:
4966 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4968 code = emit_call_reg (code, ins->sreg1);
4969 ins->flags |= MONO_INST_GC_CALLSITE;
4970 ins->backend.pc_offset = code - cfg->native_code;
4971 code = emit_move_return_value (cfg, ins, code);
4973 case OP_FCALL_MEMBASE:
4974 case OP_LCALL_MEMBASE:
4975 case OP_VCALL_MEMBASE:
4976 case OP_VCALL2_MEMBASE:
4977 case OP_VOIDCALL_MEMBASE:
4978 case OP_CALL_MEMBASE: {
4979 gboolean imt_arg = FALSE;
4981 g_assert (ins->sreg1 != ARMREG_LR);
4982 call = (MonoCallInst*)ins;
4985 code = emit_float_args (cfg, call, code, &max_len, &offset);
4987 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4989 if (!arm_is_imm12 (ins->inst_offset))
4990 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4991 #ifdef USE_JUMP_TABLES
4997 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4999 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5001 if (!arm_is_imm12 (ins->inst_offset))
5002 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
5004 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
5007 * We can't embed the method in the code stream in PIC code, or
5009 * Instead, we put it in V5 in code emitted by
5010 * mono_arch_emit_imt_argument (), and embed NULL here to
5011 * signal the IMT thunk that the value is in V5.
5013 #ifdef USE_JUMP_TABLES
5014 /* In case of jumptables we always use value in V5. */
5017 if (call->dynamic_imt_arg)
5018 *((gpointer*)code) = NULL;
5020 *((gpointer*)code) = (gpointer)call->method;
5024 ins->flags |= MONO_INST_GC_CALLSITE;
5025 ins->backend.pc_offset = code - cfg->native_code;
5026 code = emit_move_return_value (cfg, ins, code);
5030 /* round the size to 8 bytes */
5031 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
5032 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
5033 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
5034 /* memzero the area: dreg holds the size, sp is the pointer */
5035 if (ins->flags & MONO_INST_INIT) {
5036 guint8 *start_loop, *branch_to_cond;
5037 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
5038 branch_to_cond = code;
5041 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
5042 arm_patch (branch_to_cond, code);
5043 /* decrement by 4 and set flags */
5044 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
5045 ARM_B_COND (code, ARMCOND_GE, 0);
5046 arm_patch (code - 4, start_loop);
5048 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
5049 if (cfg->param_area)
5050 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
5055 MonoInst *var = cfg->dyn_call_var;
5057 g_assert (var->opcode == OP_REGOFFSET);
5058 g_assert (arm_is_imm12 (var->inst_offset));
5060 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
5061 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
5063 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
5065 /* Save args buffer */
5066 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5068 /* Set stack slots using R0 as scratch reg */
5069 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5070 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5071 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5072 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5075 /* Set argument registers */
5076 for (i = 0; i < PARAM_REGS; ++i)
5077 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5080 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5081 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5084 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5085 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5086 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5090 if (ins->sreg1 != ARMREG_R0)
5091 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5092 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5093 (gpointer)"mono_arch_throw_exception");
5094 code = emit_call_seq (cfg, code);
5098 if (ins->sreg1 != ARMREG_R0)
5099 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5100 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5101 (gpointer)"mono_arch_rethrow_exception");
5102 code = emit_call_seq (cfg, code);
5105 case OP_START_HANDLER: {
5106 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5109 /* Reserve a param area, see filter-stack.exe */
5110 if (cfg->param_area) {
5111 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5112 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5114 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5115 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5119 if (arm_is_imm12 (spvar->inst_offset)) {
5120 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5122 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5123 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5127 case OP_ENDFILTER: {
5128 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5131 /* Free the param area */
5132 if (cfg->param_area) {
5133 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5134 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5136 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5137 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5141 if (ins->sreg1 != ARMREG_R0)
5142 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5143 if (arm_is_imm12 (spvar->inst_offset)) {
5144 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5146 g_assert (ARMREG_IP != spvar->inst_basereg);
5147 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5148 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5150 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5153 case OP_ENDFINALLY: {
5154 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5157 /* Free the param area */
5158 if (cfg->param_area) {
5159 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5160 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5162 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5163 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5167 if (arm_is_imm12 (spvar->inst_offset)) {
5168 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5170 g_assert (ARMREG_IP != spvar->inst_basereg);
5171 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5172 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5174 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5177 case OP_CALL_HANDLER:
5178 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5179 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5180 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5183 ins->inst_c0 = code - cfg->native_code;
5186 /*if (ins->inst_target_bb->native_offset) {
5188 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5190 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5191 code = mono_arm_patchable_b (code, ARMCOND_AL);
5195 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5199 * In the normal case we have:
5200 * ldr pc, [pc, ins->sreg1 << 2]
5203 * ldr lr, [pc, ins->sreg1 << 2]
5205 * After follows the data.
5206 * FIXME: add aot support.
5208 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5209 #ifdef USE_JUMP_TABLES
5211 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5212 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5213 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5217 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5218 if (offset + max_len > (cfg->code_size - 16)) {
5219 cfg->code_size += max_len;
5220 cfg->code_size *= 2;
5221 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5222 code = cfg->native_code + offset;
5224 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5226 code += 4 * GPOINTER_TO_INT (ins->klass);
5231 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5232 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5236 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5237 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5241 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5242 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5246 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5247 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5251 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5252 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5255 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5256 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5259 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5260 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5263 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5264 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5267 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5268 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5271 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5272 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_HI);
5274 case OP_COND_EXC_EQ:
5275 case OP_COND_EXC_NE_UN:
5276 case OP_COND_EXC_LT:
5277 case OP_COND_EXC_LT_UN:
5278 case OP_COND_EXC_GT:
5279 case OP_COND_EXC_GT_UN:
5280 case OP_COND_EXC_GE:
5281 case OP_COND_EXC_GE_UN:
5282 case OP_COND_EXC_LE:
5283 case OP_COND_EXC_LE_UN:
5284 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5286 case OP_COND_EXC_IEQ:
5287 case OP_COND_EXC_INE_UN:
5288 case OP_COND_EXC_ILT:
5289 case OP_COND_EXC_ILT_UN:
5290 case OP_COND_EXC_IGT:
5291 case OP_COND_EXC_IGT_UN:
5292 case OP_COND_EXC_IGE:
5293 case OP_COND_EXC_IGE_UN:
5294 case OP_COND_EXC_ILE:
5295 case OP_COND_EXC_ILE_UN:
5296 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5299 case OP_COND_EXC_IC:
5300 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5302 case OP_COND_EXC_OV:
5303 case OP_COND_EXC_IOV:
5304 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5306 case OP_COND_EXC_NC:
5307 case OP_COND_EXC_INC:
5308 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5310 case OP_COND_EXC_NO:
5311 case OP_COND_EXC_INO:
5312 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5324 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5327 /* floating point opcodes */
5329 if (cfg->compile_aot) {
5330 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5332 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5334 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5337 /* FIXME: we can optimize the imm load by dealing with part of
5338 * the displacement in LDFD (aligning to 512).
5340 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5341 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5345 if (cfg->compile_aot) {
5346 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5348 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5350 ARM_CVTS (code, ins->dreg, ins->dreg);
5352 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5353 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5354 ARM_CVTS (code, ins->dreg, ins->dreg);
5357 case OP_STORER8_MEMBASE_REG:
5358 /* This is generated by the local regalloc pass which runs after the lowering pass */
5359 if (!arm_is_fpimm8 (ins->inst_offset)) {
5360 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5361 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5362 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5364 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5367 case OP_LOADR8_MEMBASE:
5368 /* This is generated by the local regalloc pass which runs after the lowering pass */
5369 if (!arm_is_fpimm8 (ins->inst_offset)) {
5370 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5371 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5372 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5374 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5377 case OP_STORER4_MEMBASE_REG:
5378 g_assert (arm_is_fpimm8 (ins->inst_offset));
5379 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5380 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5381 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5382 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5384 case OP_LOADR4_MEMBASE:
5385 g_assert (arm_is_fpimm8 (ins->inst_offset));
5386 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5387 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5388 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5389 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5391 case OP_ICONV_TO_R_UN: {
5392 g_assert_not_reached ();
5395 case OP_ICONV_TO_R4:
5396 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5397 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5398 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5399 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5400 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5402 case OP_ICONV_TO_R8:
5403 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5404 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5405 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5406 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5410 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5411 if (sig_ret->type == MONO_TYPE_R4) {
5412 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5414 if (!IS_HARD_FLOAT) {
5415 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5418 if (IS_HARD_FLOAT) {
5419 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5421 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5426 case OP_FCONV_TO_I1:
5427 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5429 case OP_FCONV_TO_U1:
5430 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5432 case OP_FCONV_TO_I2:
5433 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5435 case OP_FCONV_TO_U2:
5436 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5438 case OP_FCONV_TO_I4:
5440 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5442 case OP_FCONV_TO_U4:
5444 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5446 case OP_FCONV_TO_I8:
5447 case OP_FCONV_TO_U8:
5448 g_assert_not_reached ();
5449 /* Implemented as helper calls */
5451 case OP_LCONV_TO_R_UN:
5452 g_assert_not_reached ();
5453 /* Implemented as helper calls */
5455 case OP_LCONV_TO_OVF_I4_2: {
5456 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5458 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5461 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5462 high_bit_not_set = code;
5463 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5465 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5466 valid_negative = code;
5467 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5468 invalid_negative = code;
5469 ARM_B_COND (code, ARMCOND_AL, 0);
5471 arm_patch (high_bit_not_set, code);
5473 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5474 valid_positive = code;
5475 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5477 arm_patch (invalid_negative, code);
5478 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5480 arm_patch (valid_negative, code);
5481 arm_patch (valid_positive, code);
5483 if (ins->dreg != ins->sreg1)
5484 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5488 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5491 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5494 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5497 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5500 ARM_NEGD (code, ins->dreg, ins->sreg1);
5504 g_assert_not_reached ();
5508 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5514 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5517 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5518 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5522 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5525 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5526 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5530 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5533 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5534 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5535 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5539 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5542 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5543 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5547 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5550 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5551 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5552 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5556 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5559 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5560 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5564 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5567 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5568 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5572 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5575 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5576 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5579 /* ARM FPA flags table:
5580 * N Less than ARMCOND_MI
5581 * Z Equal ARMCOND_EQ
5582 * C Greater Than or Equal ARMCOND_CS
5583 * V Unordered ARMCOND_VS
5586 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5589 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5592 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5595 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5596 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5602 g_assert_not_reached ();
5606 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5608 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5609 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5610 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5614 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5615 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5620 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5621 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5623 #ifdef USE_JUMP_TABLES
5625 gpointer *jte = mono_jumptable_add_entries (2);
5626 jte [0] = GUINT_TO_POINTER (0xffffffff);
5627 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5628 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5629 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5632 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5633 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5635 *(guint32*)code = 0xffffffff;
5637 *(guint32*)code = 0x7fefffff;
5640 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5642 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5643 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5645 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5646 ARM_CPYD (code, ins->dreg, ins->sreg1);
5648 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5649 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5654 case OP_GC_LIVENESS_DEF:
5655 case OP_GC_LIVENESS_USE:
5656 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5657 ins->backend.pc_offset = code - cfg->native_code;
5659 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5660 ins->backend.pc_offset = code - cfg->native_code;
5661 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5665 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5666 g_assert_not_reached ();
5669 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5670 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5671 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5672 g_assert_not_reached ();
5678 last_offset = offset;
5681 cfg->code_len = code - cfg->native_code;
5684 #endif /* DISABLE_JIT */
5686 #ifdef HAVE_AEABI_READ_TP
5687 void __aeabi_read_tp (void);
5691 mono_arch_register_lowlevel_calls (void)
5693 /* The signature doesn't matter */
5694 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5695 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5697 #ifndef MONO_CROSS_COMPILE
5698 #ifdef HAVE_AEABI_READ_TP
5699 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5704 #define patch_lis_ori(ip,val) do {\
5705 guint16 *__lis_ori = (guint16*)(ip); \
5706 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5707 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5711 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5713 MonoJumpInfo *patch_info;
5714 gboolean compile_aot = !run_cctors;
5716 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5717 unsigned char *ip = patch_info->ip.i + code;
5718 const unsigned char *target;
5720 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5721 #ifdef USE_JUMP_TABLES
5722 gpointer *jt = mono_jumptable_get_entry (ip);
5724 gpointer *jt = (gpointer*)(ip + 8);
5727 /* jt is the inlined jump table, 2 instructions after ip
5728 * In the normal case we store the absolute addresses,
5729 * otherwise the displacements.
5731 for (i = 0; i < patch_info->data.table->table_size; i++)
5732 jt [i] = code + (int)patch_info->data.table->table [i];
5737 switch (patch_info->type) {
5738 case MONO_PATCH_INFO_BB:
5739 case MONO_PATCH_INFO_LABEL:
5742 /* No need to patch these */
5747 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5749 switch (patch_info->type) {
5750 case MONO_PATCH_INFO_IP:
5751 g_assert_not_reached ();
5752 patch_lis_ori (ip, ip);
5754 case MONO_PATCH_INFO_METHOD_REL:
5755 g_assert_not_reached ();
5756 *((gpointer *)(ip)) = code + patch_info->data.offset;
5758 case MONO_PATCH_INFO_METHODCONST:
5759 case MONO_PATCH_INFO_CLASS:
5760 case MONO_PATCH_INFO_IMAGE:
5761 case MONO_PATCH_INFO_FIELD:
5762 case MONO_PATCH_INFO_VTABLE:
5763 case MONO_PATCH_INFO_IID:
5764 case MONO_PATCH_INFO_SFLDA:
5765 case MONO_PATCH_INFO_LDSTR:
5766 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5767 case MONO_PATCH_INFO_LDTOKEN:
5768 g_assert_not_reached ();
5769 /* from OP_AOTCONST : lis + ori */
5770 patch_lis_ori (ip, target);
5772 case MONO_PATCH_INFO_R4:
5773 case MONO_PATCH_INFO_R8:
5774 g_assert_not_reached ();
5775 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5777 case MONO_PATCH_INFO_EXC_NAME:
5778 g_assert_not_reached ();
5779 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5781 case MONO_PATCH_INFO_NONE:
5782 case MONO_PATCH_INFO_BB_OVF:
5783 case MONO_PATCH_INFO_EXC_OVF:
5784 /* everything is dealt with at epilog output time */
5789 arm_patch_general (domain, ip, target, dyn_code_mp);
5796 * Stack frame layout:
5798 * ------------------- fp
5799 * MonoLMF structure or saved registers
5800 * -------------------
5802 * -------------------
5804 * -------------------
5805 * optional 8 bytes for tracing
5806 * -------------------
5807 * param area size is cfg->param_area
5808 * ------------------- sp
5811 mono_arch_emit_prolog (MonoCompile *cfg)
5813 MonoMethod *method = cfg->method;
5815 MonoMethodSignature *sig;
5817 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5822 int prev_sp_offset, reg_offset;
5824 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5827 sig = mono_method_signature (method);
5828 cfg->code_size = 256 + sig->param_count * 64;
5829 code = cfg->native_code = g_malloc (cfg->code_size);
5831 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5833 alloc_size = cfg->stack_offset;
5839 * The iphone uses R7 as the frame pointer, and it points at the saved
5844 * We can't use r7 as a frame pointer since it points into the middle of
5845 * the frame, so we keep using our own frame pointer.
5846 * FIXME: Optimize this.
5848 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5849 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5850 prev_sp_offset += 8; /* r7 and lr */
5851 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5852 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5855 if (!method->save_lmf) {
5857 /* No need to push LR again */
5858 if (cfg->used_int_regs)
5859 ARM_PUSH (code, cfg->used_int_regs);
5861 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5862 prev_sp_offset += 4;
5864 for (i = 0; i < 16; ++i) {
5865 if (cfg->used_int_regs & (1 << i))
5866 prev_sp_offset += 4;
5868 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5870 for (i = 0; i < 16; ++i) {
5871 if ((cfg->used_int_regs & (1 << i))) {
5872 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5873 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5878 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5879 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5881 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5882 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5885 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5886 ARM_PUSH (code, 0x5ff0);
5887 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5888 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5890 for (i = 0; i < 16; ++i) {
5891 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5892 /* The original r7 is saved at the start */
5893 if (!(iphone_abi && i == ARMREG_R7))
5894 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5898 g_assert (reg_offset == 4 * 10);
5899 pos += sizeof (MonoLMF) - (4 * 10);
5903 orig_alloc_size = alloc_size;
5904 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5905 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5906 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5907 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5910 /* the stack used in the pushed regs */
5911 if (prev_sp_offset & 4)
5913 cfg->stack_usage = alloc_size;
5915 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5916 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5918 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5919 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5921 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5923 if (cfg->frame_reg != ARMREG_SP) {
5924 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5925 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5927 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5928 prev_sp_offset += alloc_size;
5930 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5931 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5933 /* compute max_offset in order to use short forward jumps
5934 * we could skip do it on arm because the immediate displacement
5935 * for jumps is large enough, it may be useful later for constant pools
5938 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5939 MonoInst *ins = bb->code;
5940 bb->max_offset = max_offset;
5942 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5945 MONO_BB_FOR_EACH_INS (bb, ins)
5946 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5949 /* store runtime generic context */
5950 if (cfg->rgctx_var) {
5951 MonoInst *ins = cfg->rgctx_var;
5953 g_assert (ins->opcode == OP_REGOFFSET);
5955 if (arm_is_imm12 (ins->inst_offset)) {
5956 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5958 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5959 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5963 /* load arguments allocated to register from the stack */
5966 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5968 if (cinfo->vtype_retaddr) {
5969 ArgInfo *ainfo = &cinfo->ret;
5970 inst = cfg->vret_addr;
5971 g_assert (arm_is_imm12 (inst->inst_offset));
5972 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5975 if (sig->call_convention == MONO_CALL_VARARG) {
5976 ArgInfo *cookie = &cinfo->sig_cookie;
5978 /* Save the sig cookie address */
5979 g_assert (cookie->storage == RegTypeBase);
5981 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5982 g_assert (arm_is_imm12 (cfg->sig_cookie));
5983 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5984 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5987 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5988 ArgInfo *ainfo = cinfo->args + i;
5989 inst = cfg->args [pos];
5991 if (cfg->verbose_level > 2)
5992 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5993 if (inst->opcode == OP_REGVAR) {
5994 if (ainfo->storage == RegTypeGeneral)
5995 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5996 else if (ainfo->storage == RegTypeFP) {
5997 g_assert_not_reached ();
5998 } else if (ainfo->storage == RegTypeBase) {
5999 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6000 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6002 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6003 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
6006 g_assert_not_reached ();
6008 if (cfg->verbose_level > 2)
6009 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
6011 /* the argument should be put on the stack: FIXME handle size != word */
6012 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
6013 switch (ainfo->size) {
6015 if (arm_is_imm12 (inst->inst_offset))
6016 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6018 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6019 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6023 if (arm_is_imm8 (inst->inst_offset)) {
6024 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6026 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6027 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6031 if (arm_is_imm12 (inst->inst_offset)) {
6032 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6034 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6035 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6037 if (arm_is_imm12 (inst->inst_offset + 4)) {
6038 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
6040 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6041 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
6045 if (arm_is_imm12 (inst->inst_offset)) {
6046 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
6048 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6049 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
6053 } else if (ainfo->storage == RegTypeBaseGen) {
6054 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6055 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6057 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6058 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6060 if (arm_is_imm12 (inst->inst_offset + 4)) {
6061 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6062 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
6064 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6065 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6066 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6067 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
6069 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
6070 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
6071 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6073 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6074 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6077 switch (ainfo->size) {
6079 if (arm_is_imm8 (inst->inst_offset)) {
6080 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6082 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6083 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6087 if (arm_is_imm8 (inst->inst_offset)) {
6088 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6090 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6091 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6095 if (arm_is_imm12 (inst->inst_offset)) {
6096 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6098 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6099 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6101 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6102 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6104 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6105 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6107 if (arm_is_imm12 (inst->inst_offset + 4)) {
6108 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6110 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6111 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6115 if (arm_is_imm12 (inst->inst_offset)) {
6116 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6118 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6119 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6123 } else if (ainfo->storage == RegTypeFP) {
6124 int imm8, rot_amount;
6126 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6127 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6128 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6130 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6132 if (ainfo->size == 8)
6133 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6135 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6136 } else if (ainfo->storage == RegTypeStructByVal) {
6137 int doffset = inst->inst_offset;
6141 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6142 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6143 if (arm_is_imm12 (doffset)) {
6144 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6146 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6147 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6149 soffset += sizeof (gpointer);
6150 doffset += sizeof (gpointer);
6152 if (ainfo->vtsize) {
6153 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6154 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6155 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6157 } else if (ainfo->storage == RegTypeStructByAddr) {
6158 g_assert_not_reached ();
6159 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6160 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6162 g_assert_not_reached ();
6167 if (method->save_lmf)
6168 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6171 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6173 if (cfg->arch.seq_point_info_var) {
6174 MonoInst *ins = cfg->arch.seq_point_info_var;
6176 /* Initialize the variable from a GOT slot */
6177 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6178 #ifdef USE_JUMP_TABLES
6180 gpointer *jte = mono_jumptable_add_entry ();
6181 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6182 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6184 /** XXX: is it correct? */
6186 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6188 *(gpointer*)code = NULL;
6191 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6193 g_assert (ins->opcode == OP_REGOFFSET);
6195 if (arm_is_imm12 (ins->inst_offset)) {
6196 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6198 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6199 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6203 /* Initialize ss_trigger_page_var */
6204 if (!cfg->soft_breakpoints) {
6205 MonoInst *info_var = cfg->arch.seq_point_info_var;
6206 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6207 int dreg = ARMREG_LR;
6210 g_assert (info_var->opcode == OP_REGOFFSET);
6211 g_assert (arm_is_imm12 (info_var->inst_offset));
6213 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6214 /* Load the trigger page addr */
6215 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6216 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6220 if (cfg->arch.seq_point_read_var) {
6221 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6222 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6223 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6224 #ifdef USE_JUMP_TABLES
6227 g_assert (read_ins->opcode == OP_REGOFFSET);
6228 g_assert (arm_is_imm12 (read_ins->inst_offset));
6229 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6230 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6231 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6232 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6234 #ifdef USE_JUMP_TABLES
6235 jte = mono_jumptable_add_entries (3);
6236 jte [0] = (gpointer)&ss_trigger_var;
6237 jte [1] = single_step_func_wrapper;
6238 jte [2] = breakpoint_func_wrapper;
6239 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6241 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6243 *(volatile int **)code = &ss_trigger_var;
6245 *(gpointer*)code = single_step_func_wrapper;
6247 *(gpointer*)code = breakpoint_func_wrapper;
6251 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6252 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6253 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6254 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6255 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6256 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6259 cfg->code_len = code - cfg->native_code;
6260 g_assert (cfg->code_len < cfg->code_size);
6267 mono_arch_emit_epilog (MonoCompile *cfg)
6269 MonoMethod *method = cfg->method;
6270 int pos, i, rot_amount;
6271 int max_epilog_size = 16 + 20*4;
6275 if (cfg->method->save_lmf)
6276 max_epilog_size += 128;
6278 if (mono_jit_trace_calls != NULL)
6279 max_epilog_size += 50;
6281 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6282 max_epilog_size += 50;
6284 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6285 cfg->code_size *= 2;
6286 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6287 cfg->stat_code_reallocs++;
6291 * Keep in sync with OP_JMP
6293 code = cfg->native_code + cfg->code_len;
6295 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6296 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6300 /* Load returned vtypes into registers if needed */
6301 cinfo = cfg->arch.cinfo;
6302 if (cinfo->ret.storage == RegTypeStructByVal) {
6303 MonoInst *ins = cfg->ret;
6305 if (arm_is_imm12 (ins->inst_offset)) {
6306 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6308 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6309 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6313 if (method->save_lmf) {
6314 int lmf_offset, reg, sp_adj, regmask;
6315 /* all but r0-r3, sp and pc */
6316 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6319 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6321 /* This points to r4 inside MonoLMF->iregs */
6322 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6324 regmask = 0x9ff0; /* restore lr to pc */
6325 /* Skip caller saved registers not used by the method */
6326 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6327 regmask &= ~(1 << reg);
6332 /* Restored later */
6333 regmask &= ~(1 << ARMREG_PC);
6334 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6335 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6337 ARM_POP (code, regmask);
6339 /* Restore saved r7, restore LR to PC */
6340 /* Skip lr from the lmf */
6341 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6342 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6345 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6346 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6348 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6349 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6353 /* Restore saved gregs */
6354 if (cfg->used_int_regs)
6355 ARM_POP (code, cfg->used_int_regs);
6356 /* Restore saved r7, restore LR to PC */
6357 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6359 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6363 cfg->code_len = code - cfg->native_code;
6365 g_assert (cfg->code_len < cfg->code_size);
6370 mono_arch_emit_exceptions (MonoCompile *cfg)
6372 MonoJumpInfo *patch_info;
6375 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6376 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6377 int max_epilog_size = 50;
6379 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6380 exc_throw_pos [i] = NULL;
6381 exc_throw_found [i] = 0;
6384 /* count the number of exception infos */
6387 * make sure we have enough space for exceptions
6389 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6390 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6391 i = mini_exception_id_by_name (patch_info->data.target);
6392 if (!exc_throw_found [i]) {
6393 max_epilog_size += 32;
6394 exc_throw_found [i] = TRUE;
6399 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6400 cfg->code_size *= 2;
6401 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6402 cfg->stat_code_reallocs++;
6405 code = cfg->native_code + cfg->code_len;
6407 /* add code to raise exceptions */
6408 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6409 switch (patch_info->type) {
6410 case MONO_PATCH_INFO_EXC: {
6411 MonoClass *exc_class;
6412 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6414 i = mini_exception_id_by_name (patch_info->data.target);
6415 if (exc_throw_pos [i]) {
6416 arm_patch (ip, exc_throw_pos [i]);
6417 patch_info->type = MONO_PATCH_INFO_NONE;
6420 exc_throw_pos [i] = code;
6422 arm_patch (ip, code);
6424 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6425 g_assert (exc_class);
6427 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6428 #ifdef USE_JUMP_TABLES
6430 gpointer *jte = mono_jumptable_add_entries (2);
6431 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6432 patch_info->data.name = "mono_arch_throw_corlib_exception";
6433 patch_info->ip.i = code - cfg->native_code;
6434 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6435 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6436 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6437 ARM_BLX_REG (code, ARMREG_IP);
6438 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6441 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6442 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6443 patch_info->data.name = "mono_arch_throw_corlib_exception";
6444 patch_info->ip.i = code - cfg->native_code;
6446 *(guint32*)(gpointer)code = exc_class->type_token;
6457 cfg->code_len = code - cfg->native_code;
6459 g_assert (cfg->code_len < cfg->code_size);
6463 #endif /* #ifndef DISABLE_JIT */
6466 mono_arch_finish_init (void)
6471 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6476 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6483 mono_arch_print_tree (MonoInst *tree, int arity)
6493 mono_arch_get_patch_offset (guint8 *code)
6500 mono_arch_flush_register_windows (void)
6507 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6509 int method_reg = mono_alloc_ireg (cfg);
6510 #ifdef USE_JUMP_TABLES
6511 int use_jumptables = TRUE;
6513 int use_jumptables = FALSE;
6516 if (cfg->compile_aot) {
6519 call->dynamic_imt_arg = TRUE;
6522 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6524 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6525 ins->dreg = method_reg;
6526 ins->inst_p0 = call->method;
6527 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6528 MONO_ADD_INS (cfg->cbb, ins);
6530 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6531 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6532 /* Always pass in a register for simplicity */
6533 call->dynamic_imt_arg = TRUE;
6535 cfg->uses_rgctx_reg = TRUE;
6538 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6542 MONO_INST_NEW (cfg, ins, OP_PCONST);
6543 ins->inst_p0 = call->method;
6544 ins->dreg = method_reg;
6545 MONO_ADD_INS (cfg->cbb, ins);
6548 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6552 #endif /* DISABLE_JIT */
6555 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6557 #ifdef USE_JUMP_TABLES
6558 return (MonoMethod*)regs [ARMREG_V5];
6561 guint32 *code_ptr = (guint32*)code;
6563 method = GUINT_TO_POINTER (code_ptr [1]);
6567 return (MonoMethod*)regs [ARMREG_V5];
6569 /* The IMT value is stored in the code stream right after the LDC instruction. */
6570 /* This is no longer true for the gsharedvt_in trampoline */
6572 if (!IS_LDR_PC (code_ptr [0])) {
6573 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6574 g_assert (IS_LDR_PC (code_ptr [0]));
6578 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6579 return (MonoMethod*)regs [ARMREG_V5];
6581 return (MonoMethod*) method;
6586 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6588 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6591 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6592 #define BASE_SIZE (6 * 4)
6593 #define BSEARCH_ENTRY_SIZE (4 * 4)
6594 #define CMP_SIZE (3 * 4)
6595 #define BRANCH_SIZE (1 * 4)
6596 #define CALL_SIZE (2 * 4)
6597 #define WMC_SIZE (8 * 4)
6598 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6600 #ifdef USE_JUMP_TABLES
6602 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6604 g_assert (base [index] == NULL);
6605 base [index] = value;
6608 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6610 if (arm_is_imm12 (jti * 4)) {
6611 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6613 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6614 if ((jti * 4) >> 16)
6615 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6616 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6622 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6624 guint32 delta = DISTANCE (target, code);
6626 g_assert (delta >= 0 && delta <= 0xFFF);
6627 *target = *target | delta;
6633 #ifdef ENABLE_WRONG_METHOD_CHECK
6635 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6637 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6643 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6644 gpointer fail_tramp)
6647 arminstr_t *code, *start;
6648 #ifdef USE_JUMP_TABLES
6651 gboolean large_offsets = FALSE;
6652 guint32 **constant_pool_starts;
6653 arminstr_t *vtable_target = NULL;
6654 int extra_space = 0;
6656 #ifdef ENABLE_WRONG_METHOD_CHECK
6661 #ifdef USE_JUMP_TABLES
6662 for (i = 0; i < count; ++i) {
6663 MonoIMTCheckItem *item = imt_entries [i];
6664 item->chunk_size += 4 * 16;
6665 if (!item->is_equals)
6666 imt_entries [item->check_target_idx]->compare_done = TRUE;
6667 size += item->chunk_size;
6670 constant_pool_starts = g_new0 (guint32*, count);
6672 for (i = 0; i < count; ++i) {
6673 MonoIMTCheckItem *item = imt_entries [i];
6674 if (item->is_equals) {
6675 gboolean fail_case = !item->check_target_idx && fail_tramp;
6677 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6678 item->chunk_size += 32;
6679 large_offsets = TRUE;
6682 if (item->check_target_idx || fail_case) {
6683 if (!item->compare_done || fail_case)
6684 item->chunk_size += CMP_SIZE;
6685 item->chunk_size += BRANCH_SIZE;
6687 #ifdef ENABLE_WRONG_METHOD_CHECK
6688 item->chunk_size += WMC_SIZE;
6692 item->chunk_size += 16;
6693 large_offsets = TRUE;
6695 item->chunk_size += CALL_SIZE;
6697 item->chunk_size += BSEARCH_ENTRY_SIZE;
6698 imt_entries [item->check_target_idx]->compare_done = TRUE;
6700 size += item->chunk_size;
6704 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6708 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6710 code = mono_domain_code_reserve (domain, size);
6714 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6715 for (i = 0; i < count; ++i) {
6716 MonoIMTCheckItem *item = imt_entries [i];
6717 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6721 #ifdef USE_JUMP_TABLES
6722 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6723 /* If jumptables we always pass the IMT method in R5 */
6724 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6725 #define VTABLE_JTI 0
6726 #define IMT_METHOD_OFFSET 0
6727 #define TARGET_CODE_OFFSET 1
6728 #define JUMP_CODE_OFFSET 2
6729 #define RECORDS_PER_ENTRY 3
6730 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6731 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6732 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6734 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6735 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6736 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6737 set_jumptable_element (jte, VTABLE_JTI, vtable);
6740 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6742 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6743 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6744 vtable_target = code;
6745 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6747 if (mono_use_llvm) {
6748 /* LLVM always passes the IMT method in R5 */
6749 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6751 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6752 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6753 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6757 for (i = 0; i < count; ++i) {
6758 MonoIMTCheckItem *item = imt_entries [i];
6759 #ifdef USE_JUMP_TABLES
6760 guint32 imt_method_jti = 0, target_code_jti = 0;
6762 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6764 gint32 vtable_offset;
6766 item->code_target = (guint8*)code;
6768 if (item->is_equals) {
6769 gboolean fail_case = !item->check_target_idx && fail_tramp;
6771 if (item->check_target_idx || fail_case) {
6772 if (!item->compare_done || fail_case) {
6773 #ifdef USE_JUMP_TABLES
6774 imt_method_jti = IMT_METHOD_JTI (i);
6775 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6778 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6780 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6782 #ifdef USE_JUMP_TABLES
6783 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6784 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6785 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6787 item->jmp_code = (guint8*)code;
6788 ARM_B_COND (code, ARMCOND_NE, 0);
6791 /*Enable the commented code to assert on wrong method*/
6792 #ifdef ENABLE_WRONG_METHOD_CHECK
6793 #ifdef USE_JUMP_TABLES
6794 imt_method_jti = IMT_METHOD_JTI (i);
6795 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6798 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6800 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6802 ARM_B_COND (code, ARMCOND_EQ, 0);
6804 /* Define this if your system is so bad that gdb is failing. */
6805 #ifdef BROKEN_DEV_ENV
6806 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6808 arm_patch (code - 1, mini_dump_bad_imt);
6812 arm_patch (cond, code);
6816 if (item->has_target_code) {
6817 /* Load target address */
6818 #ifdef USE_JUMP_TABLES
6819 target_code_jti = TARGET_CODE_JTI (i);
6820 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6821 /* Restore registers */
6822 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6824 ARM_BX (code, ARMREG_R1);
6825 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6827 target_code_ins = code;
6828 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6829 /* Save it to the fourth slot */
6830 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6831 /* Restore registers and branch */
6832 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6834 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6837 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6838 if (!arm_is_imm12 (vtable_offset)) {
6840 * We need to branch to a computed address but we don't have
6841 * a free register to store it, since IP must contain the
6842 * vtable address. So we push the two values to the stack, and
6843 * load them both using LDM.
6845 /* Compute target address */
6846 #ifdef USE_JUMP_TABLES
6847 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6848 if (vtable_offset >> 16)
6849 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6850 /* IP had vtable base. */
6851 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6852 /* Restore registers and branch */
6853 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6854 ARM_BX (code, ARMREG_IP);
6856 vtable_offset_ins = code;
6857 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6858 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6859 /* Save it to the fourth slot */
6860 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6861 /* Restore registers and branch */
6862 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6864 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6867 #ifdef USE_JUMP_TABLES
6868 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6869 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6870 ARM_BX (code, ARMREG_IP);
6872 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6874 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6875 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6881 #ifdef USE_JUMP_TABLES
6882 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6883 target_code_jti = TARGET_CODE_JTI (i);
6884 /* Load target address */
6885 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6886 /* Restore registers */
6887 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6889 ARM_BX (code, ARMREG_R1);
6890 set_jumptable_element (jte, target_code_jti, fail_tramp);
6892 arm_patch (item->jmp_code, (guchar*)code);
6894 target_code_ins = code;
6895 /* Load target address */
6896 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6897 /* Save it to the fourth slot */
6898 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6899 /* Restore registers and branch */
6900 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6902 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6904 item->jmp_code = NULL;
6907 #ifdef USE_JUMP_TABLES
6909 set_jumptable_element (jte, imt_method_jti, item->key);
6912 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6914 /*must emit after unconditional branch*/
6915 if (vtable_target) {
6916 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6917 item->chunk_size += 4;
6918 vtable_target = NULL;
6921 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6922 constant_pool_starts [i] = code;
6924 code += extra_space;
6929 #ifdef USE_JUMP_TABLES
6930 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6931 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6932 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6933 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6934 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6936 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6937 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6939 item->jmp_code = (guint8*)code;
6940 ARM_B_COND (code, ARMCOND_HS, 0);
6946 for (i = 0; i < count; ++i) {
6947 MonoIMTCheckItem *item = imt_entries [i];
6948 if (item->jmp_code) {
6949 if (item->check_target_idx)
6950 #ifdef USE_JUMP_TABLES
6951 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6953 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6956 if (i > 0 && item->is_equals) {
6958 #ifdef USE_JUMP_TABLES
6959 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6960 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6962 arminstr_t *space_start = constant_pool_starts [i];
6963 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6964 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6972 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6973 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6978 #ifndef USE_JUMP_TABLES
6979 g_free (constant_pool_starts);
6982 mono_arch_flush_icache ((guint8*)start, size);
6983 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
6984 mono_stats.imt_thunks_size += code - start;
6986 g_assert (DISTANCE (start, code) <= size);
6991 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6993 return ctx->regs [reg];
6997 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6999 ctx->regs [reg] = val;
7003 * mono_arch_get_trampolines:
7005 * Return a list of MonoTrampInfo structures describing arch specific trampolines
7009 mono_arch_get_trampolines (gboolean aot)
7011 return mono_arm_get_exception_trampolines (aot);
7015 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
7022 bp = MONO_CONTEXT_GET_BP (ctx);
7023 lr_loc = (gpointer*)(bp + clause->exvar_offset);
7025 old_value = *lr_loc;
7026 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
7029 *lr_loc = new_value;
7034 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
7036 * mono_arch_set_breakpoint:
7038 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
7039 * The location should contain code emitted by OP_SEQ_POINT.
7042 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
7045 guint32 native_offset = ip - (guint8*)ji->code_start;
7046 MonoDebugOptions *opt = mini_get_debug_options ();
7048 if (opt->soft_breakpoints) {
7049 g_assert (!ji->from_aot);
7051 ARM_BLX_REG (code, ARMREG_LR);
7052 mono_arch_flush_icache (code - 4, 4);
7053 } else if (ji->from_aot) {
7054 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7056 g_assert (native_offset % 4 == 0);
7057 g_assert (info->bp_addrs [native_offset / 4] == 0);
7058 info->bp_addrs [native_offset / 4] = bp_trigger_page;
7060 int dreg = ARMREG_LR;
7062 /* Read from another trigger page */
7063 #ifdef USE_JUMP_TABLES
7064 gpointer *jte = mono_jumptable_add_entry ();
7065 code = mono_arm_load_jumptable_entry (code, jte, dreg);
7066 jte [0] = bp_trigger_page;
7068 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
7070 *(int*)code = (int)bp_trigger_page;
7073 ARM_LDR_IMM (code, dreg, dreg, 0);
7075 mono_arch_flush_icache (code - 16, 16);
7078 /* This is currently implemented by emitting an SWI instruction, which
7079 * qemu/linux seems to convert to a SIGILL.
7081 *(int*)code = (0xef << 24) | 8;
7083 mono_arch_flush_icache (code - 4, 4);
7089 * mono_arch_clear_breakpoint:
7091 * Clear the breakpoint at IP.
7094 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7096 MonoDebugOptions *opt = mini_get_debug_options ();
7100 if (opt->soft_breakpoints) {
7101 g_assert (!ji->from_aot);
7104 mono_arch_flush_icache (code - 4, 4);
7105 } else if (ji->from_aot) {
7106 guint32 native_offset = ip - (guint8*)ji->code_start;
7107 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7109 g_assert (native_offset % 4 == 0);
7110 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7111 info->bp_addrs [native_offset / 4] = 0;
7113 for (i = 0; i < 4; ++i)
7116 mono_arch_flush_icache (ip, code - ip);
7121 * mono_arch_start_single_stepping:
7123 * Start single stepping.
7126 mono_arch_start_single_stepping (void)
7128 if (ss_trigger_page)
7129 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7135 * mono_arch_stop_single_stepping:
7137 * Stop single stepping.
7140 mono_arch_stop_single_stepping (void)
7142 if (ss_trigger_page)
7143 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7149 #define DBG_SIGNAL SIGBUS
7151 #define DBG_SIGNAL SIGSEGV
7155 * mono_arch_is_single_step_event:
7157 * Return whenever the machine state in SIGCTX corresponds to a single
7161 mono_arch_is_single_step_event (void *info, void *sigctx)
7163 siginfo_t *sinfo = info;
7165 if (!ss_trigger_page)
7168 /* Sometimes the address is off by 4 */
7169 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7176 * mono_arch_is_breakpoint_event:
7178 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7181 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7183 siginfo_t *sinfo = info;
7185 if (!ss_trigger_page)
7188 if (sinfo->si_signo == DBG_SIGNAL) {
7189 /* Sometimes the address is off by 4 */
7190 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7200 * mono_arch_skip_breakpoint:
7202 * See mini-amd64.c for docs.
7205 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7207 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7211 * mono_arch_skip_single_step:
7213 * See mini-amd64.c for docs.
7216 mono_arch_skip_single_step (MonoContext *ctx)
7218 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7221 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7224 * mono_arch_get_seq_point_info:
7226 * See mini-amd64.c for docs.
7229 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7234 // FIXME: Add a free function
7236 mono_domain_lock (domain);
7237 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7239 mono_domain_unlock (domain);
7242 ji = mono_jit_info_table_find (domain, (char*)code);
7245 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7247 info->ss_trigger_page = ss_trigger_page;
7248 info->bp_trigger_page = bp_trigger_page;
7250 mono_domain_lock (domain);
7251 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7253 mono_domain_unlock (domain);
7260 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7262 ext->lmf.previous_lmf = prev_lmf;
7263 /* Mark that this is a MonoLMFExt */
7264 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7265 ext->lmf.sp = (gssize)ext;
7269 * mono_arch_set_target:
7271 * Set the target architecture the JIT backend should generate code for, in the form
7272 * of a GNU target triplet. Only used in AOT mode.
7275 mono_arch_set_target (char *mtriple)
7277 /* The GNU target triple format is not very well documented */
7278 if (strstr (mtriple, "armv7")) {
7279 v5_supported = TRUE;
7280 v6_supported = TRUE;
7281 v7_supported = TRUE;
7283 if (strstr (mtriple, "armv6")) {
7284 v5_supported = TRUE;
7285 v6_supported = TRUE;
7287 if (strstr (mtriple, "armv7s")) {
7288 v7s_supported = TRUE;
7290 if (strstr (mtriple, "thumbv7s")) {
7291 v5_supported = TRUE;
7292 v6_supported = TRUE;
7293 v7_supported = TRUE;
7294 v7s_supported = TRUE;
7295 thumb_supported = TRUE;
7296 thumb2_supported = TRUE;
7298 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7299 v5_supported = TRUE;
7300 v6_supported = TRUE;
7301 thumb_supported = TRUE;
7304 if (strstr (mtriple, "gnueabi"))
7305 eabi_supported = TRUE;
7309 mono_arch_opcode_supported (int opcode)
7312 case OP_ATOMIC_ADD_I4:
7313 case OP_ATOMIC_EXCHANGE_I4:
7314 case OP_ATOMIC_CAS_I4:
7315 case OP_ATOMIC_LOAD_I1:
7316 case OP_ATOMIC_LOAD_I2:
7317 case OP_ATOMIC_LOAD_I4:
7318 case OP_ATOMIC_LOAD_U1:
7319 case OP_ATOMIC_LOAD_U2:
7320 case OP_ATOMIC_LOAD_U4:
7321 case OP_ATOMIC_STORE_I1:
7322 case OP_ATOMIC_STORE_I2:
7323 case OP_ATOMIC_STORE_I4:
7324 case OP_ATOMIC_STORE_U1:
7325 case OP_ATOMIC_STORE_U2:
7326 case OP_ATOMIC_STORE_U4:
7327 return v7_supported;
7328 case OP_ATOMIC_LOAD_R4:
7329 case OP_ATOMIC_LOAD_R8:
7330 case OP_ATOMIC_STORE_R4:
7331 case OP_ATOMIC_STORE_R8:
7332 return v7_supported && IS_VFP;
7338 #if defined(ENABLE_GSHAREDVT)
7340 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7342 #endif /* !MONOTOUCH */