2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 //#define DEBUG_IMT 0
183 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
187 mono_arch_regname (int reg)
189 static const char * rnames[] = {
190 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
191 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
192 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
195 if (reg >= 0 && reg < 16)
201 mono_arch_fregname (int reg)
203 static const char * rnames[] = {
204 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
205 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
206 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
207 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
208 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
209 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
212 if (reg >= 0 && reg < 32)
220 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
222 int imm8, rot_amount;
223 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
224 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
228 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
229 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
231 code = mono_arm_emit_load_imm (code, dreg, imm);
232 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
237 /* If dreg == sreg, this clobbers IP */
239 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
241 int imm8, rot_amount;
242 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
243 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
247 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
248 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
250 code = mono_arm_emit_load_imm (code, dreg, imm);
251 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
257 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
259 /* we can use r0-r3, since this is called only for incoming args on the stack */
260 if (size > sizeof (gpointer) * 4) {
262 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
263 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
264 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
265 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
266 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
267 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
268 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
269 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
270 ARM_B_COND (code, ARMCOND_NE, 0);
271 arm_patch (code - 4, start_loop);
274 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
275 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
277 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
278 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
284 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
285 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
286 doffset = soffset = 0;
288 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
289 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
295 g_assert (size == 0);
300 emit_call_reg (guint8 *code, int reg)
303 ARM_BLX_REG (code, reg);
305 #ifdef USE_JUMP_TABLES
306 g_assert_not_reached ();
308 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
312 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
318 emit_call_seq (MonoCompile *cfg, guint8 *code)
320 #ifdef USE_JUMP_TABLES
321 code = mono_arm_patchable_bl (code, ARMCOND_AL);
323 if (cfg->method->dynamic) {
324 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
326 *(gpointer*)code = NULL;
328 code = emit_call_reg (code, ARMREG_IP);
337 mono_arm_patchable_b (guint8 *code, int cond)
339 #ifdef USE_JUMP_TABLES
342 jte = mono_jumptable_add_entry ();
343 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
344 ARM_BX_COND (code, cond, ARMREG_IP);
346 ARM_B_COND (code, cond, 0);
352 mono_arm_patchable_bl (guint8 *code, int cond)
354 #ifdef USE_JUMP_TABLES
357 jte = mono_jumptable_add_entry ();
358 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
359 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
361 ARM_BL_COND (code, cond, 0);
366 #ifdef USE_JUMP_TABLES
368 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
370 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
371 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
376 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
378 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
379 ARM_LDR_IMM (code, reg, reg, 0);
385 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
387 switch (ins->opcode) {
390 case OP_FCALL_MEMBASE:
392 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
393 if (sig_ret->type == MONO_TYPE_R4) {
395 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
397 ARM_FMSR (code, ins->dreg, ARMREG_R0);
398 ARM_CVTS (code, ins->dreg, ins->dreg);
402 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
404 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
417 * Emit code to push an LMF structure on the LMF stack.
418 * On arm, this is intermixed with the initialization of other fields of the structure.
421 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
423 gboolean get_lmf_fast = FALSE;
426 #ifdef HAVE_AEABI_READ_TP
427 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
429 if (lmf_addr_tls_offset != -1) {
432 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
433 (gpointer)"__aeabi_read_tp");
434 code = emit_call_seq (cfg, code);
436 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
442 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
445 /* Inline mono_get_lmf_addr () */
446 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
448 /* Load mono_jit_tls_id */
450 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
451 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
453 *(gpointer*)code = NULL;
455 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
456 /* call pthread_getspecific () */
457 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
458 (gpointer)"pthread_getspecific");
459 code = emit_call_seq (cfg, code);
460 /* lmf_addr = &jit_tls->lmf */
461 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
462 g_assert (arm_is_imm8 (lmf_offset));
463 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
470 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
471 (gpointer)"mono_get_lmf_addr");
472 code = emit_call_seq (cfg, code);
474 /* we build the MonoLMF structure on the stack - see mini-arm.h */
475 /* lmf_offset is the offset from the previous stack pointer,
476 * alloc_size is the total stack space allocated, so the offset
477 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
478 * The pointer to the struct is put in r1 (new_lmf).
479 * ip is used as scratch
480 * The callee-saved registers are already in the MonoLMF structure
482 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
483 /* r0 is the result from mono_get_lmf_addr () */
484 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
485 /* new_lmf->previous_lmf = *lmf_addr */
486 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
487 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
488 /* *(lmf_addr) = r1 */
489 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 /* Skip method (only needed for trampoline LMF frames) */
491 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
492 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
493 /* save the current IP */
494 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
495 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
497 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
498 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
509 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
513 for (list = inst->float_args; list; list = list->next) {
514 FloatArgData *fad = list->data;
515 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
516 gboolean imm = arm_is_fpimm8 (var->inst_offset);
518 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
524 if (*offset + *max_len > cfg->code_size) {
525 cfg->code_size += *max_len;
526 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
528 code = cfg->native_code + *offset;
532 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
533 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
535 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
537 *offset = code - cfg->native_code;
544 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
548 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
550 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
553 if (!arm_is_fpimm8 (inst->inst_offset)) {
554 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
555 ARM_FSTD (code, reg, ARMREG_LR, 0);
557 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
564 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
568 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
570 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
573 if (!arm_is_fpimm8 (inst->inst_offset)) {
574 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
575 ARM_FLDD (code, reg, ARMREG_LR, 0);
577 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
586 * Emit code to pop an LMF structure from the LMF stack.
589 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
593 if (lmf_offset < 32) {
594 basereg = cfg->frame_reg;
599 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
602 /* ip = previous_lmf */
603 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
605 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
606 /* *(lmf_addr) = previous_lmf */
607 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
612 #endif /* #ifndef DISABLE_JIT */
615 * mono_arch_get_argument_info:
616 * @csig: a method signature
617 * @param_count: the number of parameters to consider
618 * @arg_info: an array to store the result infos
620 * Gathers information on parameters such as size, alignment and
621 * padding. arg_info should be large enought to hold param_count + 1 entries.
623 * Returns the size of the activation frame.
626 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
628 int k, frame_size = 0;
629 guint32 size, align, pad;
633 t = mini_type_get_underlying_type (gsctx, csig->ret);
634 if (MONO_TYPE_ISSTRUCT (t)) {
635 frame_size += sizeof (gpointer);
639 arg_info [0].offset = offset;
642 frame_size += sizeof (gpointer);
646 arg_info [0].size = frame_size;
648 for (k = 0; k < param_count; k++) {
649 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
651 /* ignore alignment for now */
654 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
655 arg_info [k].pad = pad;
657 arg_info [k + 1].pad = 0;
658 arg_info [k + 1].size = size;
660 arg_info [k + 1].offset = offset;
664 align = MONO_ARCH_FRAME_ALIGNMENT;
665 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
666 arg_info [k].pad = pad;
671 #define MAX_ARCH_DELEGATE_PARAMS 3
674 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
676 guint8 *code, *start;
679 start = code = mono_global_codeman_reserve (12);
681 /* Replace the this argument with the target */
682 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
683 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
684 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
686 g_assert ((code - start) <= 12);
688 mono_arch_flush_icache (start, 12);
692 size = 8 + param_count * 4;
693 start = code = mono_global_codeman_reserve (size);
695 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
696 /* slide down the arguments */
697 for (i = 0; i < param_count; ++i) {
698 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
700 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
702 g_assert ((code - start) <= size);
704 mono_arch_flush_icache (start, size);
708 *code_size = code - start;
714 * mono_arch_get_delegate_invoke_impls:
716 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
720 mono_arch_get_delegate_invoke_impls (void)
728 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
729 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
731 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
732 code = get_delegate_invoke_impl (FALSE, i, &code_len);
733 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
734 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
742 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
744 guint8 *code, *start;
747 /* FIXME: Support more cases */
748 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
749 if (MONO_TYPE_ISSTRUCT (sig_ret))
753 static guint8* cached = NULL;
754 mono_mini_arch_lock ();
756 mono_mini_arch_unlock ();
761 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
763 start = get_delegate_invoke_impl (TRUE, 0, NULL);
765 mono_mini_arch_unlock ();
768 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
771 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
773 for (i = 0; i < sig->param_count; ++i)
774 if (!mono_is_regsize_var (sig->params [i]))
777 mono_mini_arch_lock ();
778 code = cache [sig->param_count];
780 mono_mini_arch_unlock ();
785 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
786 start = mono_aot_get_trampoline (name);
789 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
791 cache [sig->param_count] = start;
792 mono_mini_arch_unlock ();
800 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
806 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
808 return (gpointer)regs [ARMREG_R0];
812 * Initialize the cpu to execute managed code.
815 mono_arch_cpu_init (void)
817 i8_align = MONO_ABI_ALIGNOF (gint64);
818 #ifdef MONO_CROSS_COMPILE
819 /* Need to set the alignment of i8 since it can different on the target */
820 #ifdef TARGET_ANDROID
822 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
828 create_function_wrapper (gpointer function)
830 guint8 *start, *code;
832 start = code = mono_global_codeman_reserve (96);
835 * Construct the MonoContext structure on the stack.
838 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
840 /* save ip, lr and pc into their correspodings ctx.regs slots. */
841 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
842 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
843 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
845 /* save r0..r10 and fp */
846 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
847 ARM_STM (code, ARMREG_IP, 0x0fff);
849 /* now we can update fp. */
850 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
852 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
853 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
854 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
855 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
857 /* make ctx.eip hold the address of the call. */
858 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
859 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
861 /* r0 now points to the MonoContext */
862 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
865 #ifdef USE_JUMP_TABLES
867 gpointer *jte = mono_jumptable_add_entry ();
868 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
872 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
874 *(gpointer*)code = function;
877 ARM_BLX_REG (code, ARMREG_IP);
879 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
880 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
881 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
882 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
884 /* make ip point to the regs array, then restore everything, including pc. */
885 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
886 ARM_LDM (code, ARMREG_IP, 0xffff);
888 mono_arch_flush_icache (start, code - start);
894 * Initialize architecture specific code.
897 mono_arch_init (void)
899 const char *cpu_arch;
901 mono_mutex_init_recursive (&mini_arch_mutex);
902 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
903 if (mini_get_debug_options ()->soft_breakpoints) {
904 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
905 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
910 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
911 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
912 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
915 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
916 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
917 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
918 #if defined(ENABLE_GSHAREDVT)
919 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
922 #if defined(__ARM_EABI__)
923 eabi_supported = TRUE;
926 #if defined(ARM_FPU_VFP_HARD)
927 arm_fpu = MONO_ARM_FPU_VFP_HARD;
929 arm_fpu = MONO_ARM_FPU_VFP;
931 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
932 /* If we're compiling with a soft float fallback and it
933 turns out that no VFP unit is available, we need to
934 switch to soft float. We don't do this for iOS, since
935 iOS devices always have a VFP unit. */
936 if (!mono_hwcap_arm_has_vfp)
937 arm_fpu = MONO_ARM_FPU_NONE;
941 v5_supported = mono_hwcap_arm_is_v5;
942 v6_supported = mono_hwcap_arm_is_v6;
943 v7_supported = mono_hwcap_arm_is_v7;
944 v7s_supported = mono_hwcap_arm_is_v7s;
946 #if defined(__APPLE__)
947 /* iOS is special-cased here because we don't yet
948 have a way to properly detect CPU features on it. */
949 thumb_supported = TRUE;
952 thumb_supported = mono_hwcap_arm_has_thumb;
953 thumb2_supported = mono_hwcap_arm_has_thumb2;
956 /* Format: armv(5|6|7[s])[-thumb[2]] */
957 cpu_arch = g_getenv ("MONO_CPU_ARCH");
959 /* Do this here so it overrides any detection. */
961 if (strncmp (cpu_arch, "armv", 4) == 0) {
962 v5_supported = cpu_arch [4] >= '5';
963 v6_supported = cpu_arch [4] >= '6';
964 v7_supported = cpu_arch [4] >= '7';
965 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
968 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
969 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
974 * Cleanup architecture specific code.
977 mono_arch_cleanup (void)
982 * This function returns the optimizations supported on this cpu.
985 mono_arch_cpu_optimizations (guint32 *exclude_mask)
987 /* no arm-specific optimizations yet */
993 * This function test for all SIMD functions supported.
995 * Returns a bitmask corresponding to all supported versions.
999 mono_arch_cpu_enumerate_simd_versions (void)
1001 /* SIMD is currently unimplemented */
1009 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1011 if (v7s_supported) {
1025 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1027 mono_arch_is_soft_float (void)
1029 return arm_fpu == MONO_ARM_FPU_NONE;
1034 mono_arm_is_hard_float (void)
1036 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1040 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1043 t = mini_type_get_underlying_type (gsctx, t);
1050 case MONO_TYPE_FNPTR:
1052 case MONO_TYPE_OBJECT:
1053 case MONO_TYPE_STRING:
1054 case MONO_TYPE_CLASS:
1055 case MONO_TYPE_SZARRAY:
1056 case MONO_TYPE_ARRAY:
1058 case MONO_TYPE_GENERICINST:
1059 if (!mono_type_generic_inst_is_valuetype (t))
1062 case MONO_TYPE_VALUETYPE:
1069 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1074 for (i = 0; i < cfg->num_varinfo; i++) {
1075 MonoInst *ins = cfg->varinfo [i];
1076 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1079 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1082 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1085 /* we can only allocate 32 bit values */
1086 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1087 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1088 g_assert (i == vmv->idx);
1089 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1097 mono_arch_get_global_int_regs (MonoCompile *cfg)
1101 mono_arch_compute_omit_fp (cfg);
1104 * FIXME: Interface calls might go through a static rgctx trampoline which
1105 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1108 if (cfg->flags & MONO_CFG_HAS_CALLS)
1109 cfg->uses_rgctx_reg = TRUE;
1111 if (cfg->arch.omit_fp)
1112 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1113 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1117 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1118 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1120 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1121 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1122 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1123 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1124 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1125 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1131 * mono_arch_regalloc_cost:
1133 * Return the cost, in number of memory references, of the action of
1134 * allocating the variable VMV into a register during global register
1138 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1144 #endif /* #ifndef DISABLE_JIT */
1146 #ifndef __GNUC_PREREQ
1147 #define __GNUC_PREREQ(maj, min) (0)
1151 mono_arch_flush_icache (guint8 *code, gint size)
1153 #if defined(__native_client__)
1154 // For Native Client we don't have to flush i-cache here,
1155 // as it's being done by dyncode interface.
1158 #ifdef MONO_CROSS_COMPILE
1160 sys_icache_invalidate (code, size);
1161 #elif __GNUC_PREREQ(4, 3)
1162 __builtin___clear_cache (code, code + size);
1163 #elif __GNUC_PREREQ(4, 1)
1164 __clear_cache (code, code + size);
1165 #elif defined(PLATFORM_ANDROID)
1166 const int syscall = 0xf0002;
1174 : "r" (code), "r" (code + size), "r" (syscall)
1175 : "r0", "r1", "r7", "r2"
1178 __asm __volatile ("mov r0, %0\n"
1181 "swi 0x9f0002 @ sys_cacheflush"
1183 : "r" (code), "r" (code + size), "r" (0)
1184 : "r0", "r1", "r3" );
1186 #endif /* !__native_client__ */
1197 RegTypeStructByAddr,
1198 /* gsharedvt argument passed by addr in greg */
1199 RegTypeGSharedVtInReg,
1200 /* gsharedvt argument passed by addr on stack */
1201 RegTypeGSharedVtOnStack,
1206 guint16 vtsize; /* in param area */
1210 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1215 guint32 stack_usage;
1216 gboolean vtype_retaddr;
1217 /* The index of the vret arg in the argument list */
1227 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1230 if (*gr > ARMREG_R3) {
1232 ainfo->offset = *stack_size;
1233 ainfo->reg = ARMREG_SP; /* in the caller */
1234 ainfo->storage = RegTypeBase;
1237 ainfo->storage = RegTypeGeneral;
1244 split = i8_align == 4;
1249 if (*gr == ARMREG_R3 && split) {
1250 /* first word in r3 and the second on the stack */
1251 ainfo->offset = *stack_size;
1252 ainfo->reg = ARMREG_SP; /* in the caller */
1253 ainfo->storage = RegTypeBaseGen;
1255 } else if (*gr >= ARMREG_R3) {
1256 if (eabi_supported) {
1257 /* darwin aligns longs to 4 byte only */
1258 if (i8_align == 8) {
1263 ainfo->offset = *stack_size;
1264 ainfo->reg = ARMREG_SP; /* in the caller */
1265 ainfo->storage = RegTypeBase;
1268 if (eabi_supported) {
1269 if (i8_align == 8 && ((*gr) & 1))
1272 ainfo->storage = RegTypeIRegPair;
1281 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1284 * If we're calling a function like this:
1286 * void foo(float a, double b, float c)
1288 * We pass a in s0 and b in d1. That leaves us
1289 * with s1 being unused. The armhf ABI recognizes
1290 * this and requires register assignment to then
1291 * use that for the next single-precision arg,
1292 * i.e. c in this example. So float_spare either
1293 * tells us which reg to use for the next single-
1294 * precision arg, or it's -1, meaning use *fpr.
1296 * Note that even though most of the JIT speaks
1297 * double-precision, fpr represents single-
1298 * precision registers.
1300 * See parts 5.5 and 6.1.2 of the AAPCS for how
1304 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1305 ainfo->storage = RegTypeFP;
1309 * If we're passing a double-precision value
1310 * and *fpr is odd (e.g. it's s1, s3, ...)
1311 * we need to use the next even register. So
1312 * we mark the current *fpr as a spare that
1313 * can be used for the next single-precision
1317 *float_spare = *fpr;
1322 * At this point, we have an even register
1323 * so we assign that and move along.
1327 } else if (*float_spare >= 0) {
1329 * We're passing a single-precision value
1330 * and it looks like a spare single-
1331 * precision register is available. Let's
1335 ainfo->reg = *float_spare;
1339 * If we hit this branch, we're passing a
1340 * single-precision value and we can simply
1341 * use the next available register.
1349 * We've exhausted available floating point
1350 * regs, so pass the rest on the stack.
1358 ainfo->offset = *stack_size;
1359 ainfo->reg = ARMREG_SP;
1360 ainfo->storage = RegTypeBase;
1367 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1369 guint i, gr, fpr, pstart;
1371 int n = sig->hasthis + sig->param_count;
1372 MonoType *simpletype;
1373 guint32 stack_size = 0;
1375 gboolean is_pinvoke = sig->pinvoke;
1379 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1381 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1388 t = mini_type_get_underlying_type (gsctx, sig->ret);
1389 if (MONO_TYPE_ISSTRUCT (t)) {
1392 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1393 cinfo->ret.storage = RegTypeStructByVal;
1395 cinfo->vtype_retaddr = TRUE;
1397 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1398 cinfo->vtype_retaddr = TRUE;
1404 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1405 * the first argument, allowing 'this' to be always passed in the first arg reg.
1406 * Also do this if the first argument is a reference type, since virtual calls
1407 * are sometimes made using calli without sig->hasthis set, like in the delegate
1410 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1412 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1414 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1418 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1419 cinfo->vret_arg_index = 1;
1423 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1427 if (cinfo->vtype_retaddr)
1428 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1431 DEBUG(printf("params: %d\n", sig->param_count));
1432 for (i = pstart; i < sig->param_count; ++i) {
1433 ArgInfo *ainfo = &cinfo->args [n];
1435 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1436 /* Prevent implicit arguments and sig_cookie from
1437 being passed in registers */
1440 /* Emit the signature cookie just before the implicit arguments */
1441 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1443 DEBUG(printf("param %d: ", i));
1444 if (sig->params [i]->byref) {
1445 DEBUG(printf("byref\n"));
1446 add_general (&gr, &stack_size, ainfo, TRUE);
1450 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1451 switch (simpletype->type) {
1452 case MONO_TYPE_BOOLEAN:
1455 cinfo->args [n].size = 1;
1456 add_general (&gr, &stack_size, ainfo, TRUE);
1459 case MONO_TYPE_CHAR:
1462 cinfo->args [n].size = 2;
1463 add_general (&gr, &stack_size, ainfo, TRUE);
1468 cinfo->args [n].size = 4;
1469 add_general (&gr, &stack_size, ainfo, TRUE);
1475 case MONO_TYPE_FNPTR:
1476 case MONO_TYPE_CLASS:
1477 case MONO_TYPE_OBJECT:
1478 case MONO_TYPE_STRING:
1479 case MONO_TYPE_SZARRAY:
1480 case MONO_TYPE_ARRAY:
1481 cinfo->args [n].size = sizeof (gpointer);
1482 add_general (&gr, &stack_size, ainfo, TRUE);
1485 case MONO_TYPE_GENERICINST:
1486 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1487 cinfo->args [n].size = sizeof (gpointer);
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1492 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1493 /* gsharedvt arguments are passed by ref */
1494 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1495 add_general (&gr, &stack_size, ainfo, TRUE);
1496 switch (ainfo->storage) {
1497 case RegTypeGeneral:
1498 ainfo->storage = RegTypeGSharedVtInReg;
1501 ainfo->storage = RegTypeGSharedVtOnStack;
1504 g_assert_not_reached ();
1510 case MONO_TYPE_TYPEDBYREF:
1511 case MONO_TYPE_VALUETYPE: {
1517 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1518 size = sizeof (MonoTypedRef);
1519 align = sizeof (gpointer);
1521 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1523 size = mono_class_native_size (klass, &align);
1525 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1527 DEBUG(printf ("load %d bytes struct\n", size));
1530 align_size += (sizeof (gpointer) - 1);
1531 align_size &= ~(sizeof (gpointer) - 1);
1532 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1533 ainfo->storage = RegTypeStructByVal;
1534 ainfo->struct_size = size;
1535 /* FIXME: align stack_size if needed */
1536 if (eabi_supported) {
1537 if (align >= 8 && (gr & 1))
1540 if (gr > ARMREG_R3) {
1542 ainfo->vtsize = nwords;
1544 int rest = ARMREG_R3 - gr + 1;
1545 int n_in_regs = rest >= nwords? nwords: rest;
1547 ainfo->size = n_in_regs;
1548 ainfo->vtsize = nwords - n_in_regs;
1551 nwords -= n_in_regs;
1553 if (sig->call_convention == MONO_CALL_VARARG)
1554 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1555 stack_size = ALIGN_TO (stack_size, align);
1556 ainfo->offset = stack_size;
1557 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1558 stack_size += nwords * sizeof (gpointer);
1565 add_general (&gr, &stack_size, ainfo, FALSE);
1572 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1574 add_general (&gr, &stack_size, ainfo, TRUE);
1582 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1584 add_general (&gr, &stack_size, ainfo, FALSE);
1589 case MONO_TYPE_MVAR:
1590 /* gsharedvt arguments are passed by ref */
1591 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1592 add_general (&gr, &stack_size, ainfo, TRUE);
1593 switch (ainfo->storage) {
1594 case RegTypeGeneral:
1595 ainfo->storage = RegTypeGSharedVtInReg;
1598 ainfo->storage = RegTypeGSharedVtOnStack;
1601 g_assert_not_reached ();
1606 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1610 /* Handle the case where there are no implicit arguments */
1611 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1612 /* Prevent implicit arguments and sig_cookie from
1613 being passed in registers */
1616 /* Emit the signature cookie just before the implicit arguments */
1617 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1621 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1622 switch (simpletype->type) {
1623 case MONO_TYPE_BOOLEAN:
1628 case MONO_TYPE_CHAR:
1634 case MONO_TYPE_FNPTR:
1635 case MONO_TYPE_CLASS:
1636 case MONO_TYPE_OBJECT:
1637 case MONO_TYPE_SZARRAY:
1638 case MONO_TYPE_ARRAY:
1639 case MONO_TYPE_STRING:
1640 cinfo->ret.storage = RegTypeGeneral;
1641 cinfo->ret.reg = ARMREG_R0;
1645 cinfo->ret.storage = RegTypeIRegPair;
1646 cinfo->ret.reg = ARMREG_R0;
1650 cinfo->ret.storage = RegTypeFP;
1652 if (IS_HARD_FLOAT) {
1653 cinfo->ret.reg = ARM_VFP_F0;
1655 cinfo->ret.reg = ARMREG_R0;
1659 case MONO_TYPE_GENERICINST:
1660 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1661 cinfo->ret.storage = RegTypeGeneral;
1662 cinfo->ret.reg = ARMREG_R0;
1665 // FIXME: Only for variable types
1666 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1667 cinfo->ret.storage = RegTypeStructByAddr;
1668 g_assert (cinfo->vtype_retaddr);
1672 case MONO_TYPE_VALUETYPE:
1673 case MONO_TYPE_TYPEDBYREF:
1674 if (cinfo->ret.storage != RegTypeStructByVal)
1675 cinfo->ret.storage = RegTypeStructByAddr;
1678 case MONO_TYPE_MVAR:
1679 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1680 cinfo->ret.storage = RegTypeStructByAddr;
1681 g_assert (cinfo->vtype_retaddr);
1683 case MONO_TYPE_VOID:
1686 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1690 /* align stack size to 8 */
1691 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1692 stack_size = (stack_size + 7) & ~7;
1694 cinfo->stack_usage = stack_size;
1700 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1702 MonoType *callee_ret;
1706 if (cfg->compile_aot && !cfg->full_aot)
1707 /* OP_TAILCALL doesn't work with AOT */
1710 c1 = get_call_info (NULL, NULL, caller_sig);
1711 c2 = get_call_info (NULL, NULL, callee_sig);
1714 * Tail calls with more callee stack usage than the caller cannot be supported, since
1715 * the extra stack space would be left on the stack after the tail call.
1717 res = c1->stack_usage >= c2->stack_usage;
1718 callee_ret = mini_replace_type (callee_sig->ret);
1719 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1720 /* An address on the callee's stack is passed as the first argument */
1723 if (c2->stack_usage > 16 * 4)
1735 debug_omit_fp (void)
1738 return mono_debug_count ();
1745 * mono_arch_compute_omit_fp:
1747 * Determine whenever the frame pointer can be eliminated.
1750 mono_arch_compute_omit_fp (MonoCompile *cfg)
1752 MonoMethodSignature *sig;
1753 MonoMethodHeader *header;
1757 if (cfg->arch.omit_fp_computed)
1760 header = cfg->header;
1762 sig = mono_method_signature (cfg->method);
1764 if (!cfg->arch.cinfo)
1765 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1766 cinfo = cfg->arch.cinfo;
1769 * FIXME: Remove some of the restrictions.
1771 cfg->arch.omit_fp = TRUE;
1772 cfg->arch.omit_fp_computed = TRUE;
1774 if (cfg->disable_omit_fp)
1775 cfg->arch.omit_fp = FALSE;
1776 if (!debug_omit_fp ())
1777 cfg->arch.omit_fp = FALSE;
1779 if (cfg->method->save_lmf)
1780 cfg->arch.omit_fp = FALSE;
1782 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1783 cfg->arch.omit_fp = FALSE;
1784 if (header->num_clauses)
1785 cfg->arch.omit_fp = FALSE;
1786 if (cfg->param_area)
1787 cfg->arch.omit_fp = FALSE;
1788 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1789 cfg->arch.omit_fp = FALSE;
1790 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1791 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1792 cfg->arch.omit_fp = FALSE;
1793 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1794 ArgInfo *ainfo = &cinfo->args [i];
1796 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1798 * The stack offset can only be determined when the frame
1801 cfg->arch.omit_fp = FALSE;
1806 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1807 MonoInst *ins = cfg->varinfo [i];
1810 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1815 * Set var information according to the calling convention. arm version.
1816 * The locals var stuff should most likely be split in another method.
1819 mono_arch_allocate_vars (MonoCompile *cfg)
1821 MonoMethodSignature *sig;
1822 MonoMethodHeader *header;
1825 int i, offset, size, align, curinst;
1829 sig = mono_method_signature (cfg->method);
1831 if (!cfg->arch.cinfo)
1832 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1833 cinfo = cfg->arch.cinfo;
1834 sig_ret = mini_replace_type (sig->ret);
1836 mono_arch_compute_omit_fp (cfg);
1838 if (cfg->arch.omit_fp)
1839 cfg->frame_reg = ARMREG_SP;
1841 cfg->frame_reg = ARMREG_FP;
1843 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1845 /* allow room for the vararg method args: void* and long/double */
1846 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1847 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1849 header = cfg->header;
1851 /* See mono_arch_get_global_int_regs () */
1852 if (cfg->flags & MONO_CFG_HAS_CALLS)
1853 cfg->uses_rgctx_reg = TRUE;
1855 if (cfg->frame_reg != ARMREG_SP)
1856 cfg->used_int_regs |= 1 << cfg->frame_reg;
1858 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1859 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1860 cfg->used_int_regs |= (1 << ARMREG_V5);
1864 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1865 if (sig_ret->type != MONO_TYPE_VOID) {
1866 cfg->ret->opcode = OP_REGVAR;
1867 cfg->ret->inst_c0 = ARMREG_R0;
1870 /* local vars are at a positive offset from the stack pointer */
1872 * also note that if the function uses alloca, we use FP
1873 * to point at the local variables.
1875 offset = 0; /* linkage area */
1876 /* align the offset to 16 bytes: not sure this is needed here */
1878 //offset &= ~(8 - 1);
1880 /* add parameter area size for called functions */
1881 offset += cfg->param_area;
1884 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1887 /* allow room to save the return value */
1888 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1891 /* the MonoLMF structure is stored just below the stack pointer */
1892 if (cinfo->ret.storage == RegTypeStructByVal) {
1893 cfg->ret->opcode = OP_REGOFFSET;
1894 cfg->ret->inst_basereg = cfg->frame_reg;
1895 offset += sizeof (gpointer) - 1;
1896 offset &= ~(sizeof (gpointer) - 1);
1897 cfg->ret->inst_offset = - offset;
1898 offset += sizeof(gpointer);
1899 } else if (cinfo->vtype_retaddr) {
1900 ins = cfg->vret_addr;
1901 offset += sizeof(gpointer) - 1;
1902 offset &= ~(sizeof(gpointer) - 1);
1903 ins->inst_offset = offset;
1904 ins->opcode = OP_REGOFFSET;
1905 ins->inst_basereg = cfg->frame_reg;
1906 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1907 printf ("vret_addr =");
1908 mono_print_ins (cfg->vret_addr);
1910 offset += sizeof(gpointer);
1913 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1914 if (cfg->arch.seq_point_info_var) {
1917 ins = cfg->arch.seq_point_info_var;
1921 offset += align - 1;
1922 offset &= ~(align - 1);
1923 ins->opcode = OP_REGOFFSET;
1924 ins->inst_basereg = cfg->frame_reg;
1925 ins->inst_offset = offset;
1928 ins = cfg->arch.ss_trigger_page_var;
1931 offset += align - 1;
1932 offset &= ~(align - 1);
1933 ins->opcode = OP_REGOFFSET;
1934 ins->inst_basereg = cfg->frame_reg;
1935 ins->inst_offset = offset;
1939 if (cfg->arch.seq_point_read_var) {
1942 ins = cfg->arch.seq_point_read_var;
1946 offset += align - 1;
1947 offset &= ~(align - 1);
1948 ins->opcode = OP_REGOFFSET;
1949 ins->inst_basereg = cfg->frame_reg;
1950 ins->inst_offset = offset;
1953 ins = cfg->arch.seq_point_ss_method_var;
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->opcode = OP_REGOFFSET;
1959 ins->inst_basereg = cfg->frame_reg;
1960 ins->inst_offset = offset;
1963 ins = cfg->arch.seq_point_bp_method_var;
1966 offset += align - 1;
1967 offset &= ~(align - 1);
1968 ins->opcode = OP_REGOFFSET;
1969 ins->inst_basereg = cfg->frame_reg;
1970 ins->inst_offset = offset;
1974 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1975 /* Allocate a temporary used by the atomic ops */
1979 /* Allocate a local slot to hold the sig cookie address */
1980 offset += align - 1;
1981 offset &= ~(align - 1);
1982 cfg->arch.atomic_tmp_offset = offset;
1985 cfg->arch.atomic_tmp_offset = -1;
1988 cfg->locals_min_stack_offset = offset;
1990 curinst = cfg->locals_start;
1991 for (i = curinst; i < cfg->num_varinfo; ++i) {
1994 ins = cfg->varinfo [i];
1995 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1998 t = ins->inst_vtype;
1999 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2002 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2003 * pinvoke wrappers when they call functions returning structure */
2004 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2005 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2009 size = mono_type_size (t, &align);
2011 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2012 * since it loads/stores misaligned words, which don't do the right thing.
2014 if (align < 4 && size >= 4)
2016 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2017 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2018 offset += align - 1;
2019 offset &= ~(align - 1);
2020 ins->opcode = OP_REGOFFSET;
2021 ins->inst_offset = offset;
2022 ins->inst_basereg = cfg->frame_reg;
2024 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2027 cfg->locals_max_stack_offset = offset;
2031 ins = cfg->args [curinst];
2032 if (ins->opcode != OP_REGVAR) {
2033 ins->opcode = OP_REGOFFSET;
2034 ins->inst_basereg = cfg->frame_reg;
2035 offset += sizeof (gpointer) - 1;
2036 offset &= ~(sizeof (gpointer) - 1);
2037 ins->inst_offset = offset;
2038 offset += sizeof (gpointer);
2043 if (sig->call_convention == MONO_CALL_VARARG) {
2047 /* Allocate a local slot to hold the sig cookie address */
2048 offset += align - 1;
2049 offset &= ~(align - 1);
2050 cfg->sig_cookie = offset;
2054 for (i = 0; i < sig->param_count; ++i) {
2055 ins = cfg->args [curinst];
2057 if (ins->opcode != OP_REGVAR) {
2058 ins->opcode = OP_REGOFFSET;
2059 ins->inst_basereg = cfg->frame_reg;
2060 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2062 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2063 * since it loads/stores misaligned words, which don't do the right thing.
2065 if (align < 4 && size >= 4)
2067 /* The code in the prolog () stores words when storing vtypes received in a register */
2068 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2070 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2071 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2072 offset += align - 1;
2073 offset &= ~(align - 1);
2074 ins->inst_offset = offset;
2080 /* align the offset to 8 bytes */
2081 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2082 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2087 cfg->stack_offset = offset;
2091 mono_arch_create_vars (MonoCompile *cfg)
2093 MonoMethodSignature *sig;
2097 sig = mono_method_signature (cfg->method);
2099 if (!cfg->arch.cinfo)
2100 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2101 cinfo = cfg->arch.cinfo;
2103 if (IS_HARD_FLOAT) {
2104 for (i = 0; i < 2; i++) {
2105 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2106 inst->flags |= MONO_INST_VOLATILE;
2108 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2112 if (cinfo->ret.storage == RegTypeStructByVal)
2113 cfg->ret_var_is_local = TRUE;
2115 if (cinfo->vtype_retaddr) {
2116 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2117 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2118 printf ("vret_addr = ");
2119 mono_print_ins (cfg->vret_addr);
2123 if (cfg->gen_seq_points) {
2124 if (cfg->soft_breakpoints) {
2125 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2126 ins->flags |= MONO_INST_VOLATILE;
2127 cfg->arch.seq_point_read_var = ins;
2129 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2130 ins->flags |= MONO_INST_VOLATILE;
2131 cfg->arch.seq_point_ss_method_var = ins;
2133 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2134 ins->flags |= MONO_INST_VOLATILE;
2135 cfg->arch.seq_point_bp_method_var = ins;
2137 g_assert (!cfg->compile_aot);
2138 } else if (cfg->compile_aot) {
2139 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2140 ins->flags |= MONO_INST_VOLATILE;
2141 cfg->arch.seq_point_info_var = ins;
2143 /* Allocate a separate variable for this to save 1 load per seq point */
2144 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2145 ins->flags |= MONO_INST_VOLATILE;
2146 cfg->arch.ss_trigger_page_var = ins;
2152 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2154 MonoMethodSignature *tmp_sig;
2157 if (call->tail_call)
2160 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2163 * mono_ArgIterator_Setup assumes the signature cookie is
2164 * passed first and all the arguments which were before it are
2165 * passed on the stack after the signature. So compensate by
2166 * passing a different signature.
2168 tmp_sig = mono_metadata_signature_dup (call->signature);
2169 tmp_sig->param_count -= call->signature->sentinelpos;
2170 tmp_sig->sentinelpos = 0;
2171 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2173 sig_reg = mono_alloc_ireg (cfg);
2174 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2181 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2186 LLVMCallInfo *linfo;
2188 n = sig->param_count + sig->hasthis;
2190 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2192 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2195 * LLVM always uses the native ABI while we use our own ABI, the
2196 * only difference is the handling of vtypes:
2197 * - we only pass/receive them in registers in some cases, and only
2198 * in 1 or 2 integer registers.
2200 if (cinfo->vtype_retaddr) {
2201 /* Vtype returned using a hidden argument */
2202 linfo->ret.storage = LLVMArgVtypeRetAddr;
2203 linfo->vret_arg_index = cinfo->vret_arg_index;
2204 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2205 cfg->exception_message = g_strdup ("unknown ret conv");
2206 cfg->disable_llvm = TRUE;
2210 for (i = 0; i < n; ++i) {
2211 ainfo = cinfo->args + i;
2213 linfo->args [i].storage = LLVMArgNone;
2215 switch (ainfo->storage) {
2216 case RegTypeGeneral:
2217 case RegTypeIRegPair:
2219 linfo->args [i].storage = LLVMArgInIReg;
2221 case RegTypeStructByVal:
2222 // FIXME: Passing entirely on the stack or split reg/stack
2223 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2224 linfo->args [i].storage = LLVMArgVtypeInReg;
2225 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2226 if (ainfo->size == 2)
2227 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2229 linfo->args [i].pair_storage [1] = LLVMArgNone;
2231 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2232 cfg->disable_llvm = TRUE;
2236 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2237 cfg->disable_llvm = TRUE;
2247 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2250 MonoMethodSignature *sig;
2254 sig = call->signature;
2255 n = sig->param_count + sig->hasthis;
2257 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2259 for (i = 0; i < n; ++i) {
2260 ArgInfo *ainfo = cinfo->args + i;
2263 if (i >= sig->hasthis)
2264 t = sig->params [i - sig->hasthis];
2266 t = &mono_defaults.int_class->byval_arg;
2267 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2269 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2270 /* Emit the signature cookie just before the implicit arguments */
2271 emit_sig_cookie (cfg, call, cinfo);
2274 in = call->args [i];
2276 switch (ainfo->storage) {
2277 case RegTypeGeneral:
2278 case RegTypeIRegPair:
2279 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2280 MONO_INST_NEW (cfg, ins, OP_MOVE);
2281 ins->dreg = mono_alloc_ireg (cfg);
2282 ins->sreg1 = in->dreg + 1;
2283 MONO_ADD_INS (cfg->cbb, ins);
2284 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2286 MONO_INST_NEW (cfg, ins, OP_MOVE);
2287 ins->dreg = mono_alloc_ireg (cfg);
2288 ins->sreg1 = in->dreg + 2;
2289 MONO_ADD_INS (cfg->cbb, ins);
2290 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2291 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2292 if (ainfo->size == 4) {
2293 if (IS_SOFT_FLOAT) {
2294 /* mono_emit_call_args () have already done the r8->r4 conversion */
2295 /* The converted value is in an int vreg */
2296 MONO_INST_NEW (cfg, ins, OP_MOVE);
2297 ins->dreg = mono_alloc_ireg (cfg);
2298 ins->sreg1 = in->dreg;
2299 MONO_ADD_INS (cfg->cbb, ins);
2300 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2305 creg = mono_alloc_ireg (cfg);
2306 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2307 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2310 if (IS_SOFT_FLOAT) {
2311 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2317 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2318 ins->dreg = mono_alloc_ireg (cfg);
2319 ins->sreg1 = in->dreg;
2320 MONO_ADD_INS (cfg->cbb, ins);
2321 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2326 creg = mono_alloc_ireg (cfg);
2327 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2328 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2329 creg = mono_alloc_ireg (cfg);
2330 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2331 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2334 cfg->flags |= MONO_CFG_HAS_FPOUT;
2336 MONO_INST_NEW (cfg, ins, OP_MOVE);
2337 ins->dreg = mono_alloc_ireg (cfg);
2338 ins->sreg1 = in->dreg;
2339 MONO_ADD_INS (cfg->cbb, ins);
2341 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2344 case RegTypeStructByAddr:
2347 /* FIXME: where si the data allocated? */
2348 arg->backend.reg3 = ainfo->reg;
2349 call->used_iregs |= 1 << ainfo->reg;
2350 g_assert_not_reached ();
2353 case RegTypeStructByVal:
2354 case RegTypeGSharedVtInReg:
2355 case RegTypeGSharedVtOnStack:
2356 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2357 ins->opcode = OP_OUTARG_VT;
2358 ins->sreg1 = in->dreg;
2359 ins->klass = in->klass;
2360 ins->inst_p0 = call;
2361 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2362 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2363 mono_call_inst_add_outarg_vt (cfg, call, ins);
2364 MONO_ADD_INS (cfg->cbb, ins);
2367 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2369 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2370 if (t->type == MONO_TYPE_R8) {
2371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2374 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2376 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2379 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2382 case RegTypeBaseGen:
2383 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2384 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2385 MONO_INST_NEW (cfg, ins, OP_MOVE);
2386 ins->dreg = mono_alloc_ireg (cfg);
2387 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2388 MONO_ADD_INS (cfg->cbb, ins);
2389 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2390 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2393 /* This should work for soft-float as well */
2395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2396 creg = mono_alloc_ireg (cfg);
2397 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2399 creg = mono_alloc_ireg (cfg);
2400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2402 cfg->flags |= MONO_CFG_HAS_FPOUT;
2404 g_assert_not_reached ();
2408 int fdreg = mono_alloc_freg (cfg);
2410 if (ainfo->size == 8) {
2411 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2412 ins->sreg1 = in->dreg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2416 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2421 * Mono's register allocator doesn't speak single-precision registers that
2422 * overlap double-precision registers (i.e. armhf). So we have to work around
2423 * the register allocator and load the value from memory manually.
2425 * So we create a variable for the float argument and an instruction to store
2426 * the argument into the variable. We then store the list of these arguments
2427 * in cfg->float_args. This list is then used by emit_float_args later to
2428 * pass the arguments in the various call opcodes.
2430 * This is not very nice, and we should really try to fix the allocator.
2433 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2435 /* Make sure the instruction isn't seen as pointless and removed.
2437 float_arg->flags |= MONO_INST_VOLATILE;
2439 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2441 /* We use the dreg to look up the instruction later. The hreg is used to
2442 * emit the instruction that loads the value into the FP reg.
2444 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2445 fad->vreg = float_arg->dreg;
2446 fad->hreg = ainfo->reg;
2448 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2451 call->used_iregs |= 1 << ainfo->reg;
2452 cfg->flags |= MONO_CFG_HAS_FPOUT;
2456 g_assert_not_reached ();
2460 /* Handle the case where there are no implicit arguments */
2461 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2462 emit_sig_cookie (cfg, call, cinfo);
2464 if (cinfo->ret.storage == RegTypeStructByVal) {
2465 /* The JIT will transform this into a normal call */
2466 call->vret_in_reg = TRUE;
2467 } else if (cinfo->vtype_retaddr) {
2469 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2470 vtarg->sreg1 = call->vret_var->dreg;
2471 vtarg->dreg = mono_alloc_preg (cfg);
2472 MONO_ADD_INS (cfg->cbb, vtarg);
2474 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2477 call->stack_usage = cinfo->stack_usage;
2483 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2485 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2486 ArgInfo *ainfo = ins->inst_p1;
2487 int ovf_size = ainfo->vtsize;
2488 int doffset = ainfo->offset;
2489 int struct_size = ainfo->struct_size;
2490 int i, soffset, dreg, tmpreg;
2492 if (ainfo->storage == RegTypeGSharedVtInReg) {
2494 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2497 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2498 /* Pass by addr on stack */
2499 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2504 for (i = 0; i < ainfo->size; ++i) {
2505 dreg = mono_alloc_ireg (cfg);
2506 switch (struct_size) {
2508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2514 tmpreg = mono_alloc_ireg (cfg);
2515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2518 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2521 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2527 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2528 soffset += sizeof (gpointer);
2529 struct_size -= sizeof (gpointer);
2531 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2533 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2537 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2539 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2542 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2545 if (COMPILE_LLVM (cfg)) {
2546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2548 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2549 ins->sreg1 = val->dreg + 1;
2550 ins->sreg2 = val->dreg + 2;
2551 MONO_ADD_INS (cfg->cbb, ins);
2556 case MONO_ARM_FPU_NONE:
2557 if (ret->type == MONO_TYPE_R8) {
2560 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2561 ins->dreg = cfg->ret->dreg;
2562 ins->sreg1 = val->dreg;
2563 MONO_ADD_INS (cfg->cbb, ins);
2566 if (ret->type == MONO_TYPE_R4) {
2567 /* Already converted to an int in method_to_ir () */
2568 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2572 case MONO_ARM_FPU_VFP:
2573 case MONO_ARM_FPU_VFP_HARD:
2574 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2577 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2578 ins->dreg = cfg->ret->dreg;
2579 ins->sreg1 = val->dreg;
2580 MONO_ADD_INS (cfg->cbb, ins);
2585 g_assert_not_reached ();
2589 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2592 #endif /* #ifndef DISABLE_JIT */
2595 mono_arch_is_inst_imm (gint64 imm)
2601 MonoMethodSignature *sig;
2604 MonoType **param_types;
2608 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2612 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2615 switch (cinfo->ret.storage) {
2617 case RegTypeGeneral:
2618 case RegTypeIRegPair:
2619 case RegTypeStructByAddr:
2630 for (i = 0; i < cinfo->nargs; ++i) {
2631 ArgInfo *ainfo = &cinfo->args [i];
2634 switch (ainfo->storage) {
2635 case RegTypeGeneral:
2637 case RegTypeIRegPair:
2640 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2643 case RegTypeStructByVal:
2644 if (ainfo->size == 0)
2645 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2647 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2648 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2656 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2657 for (i = 0; i < sig->param_count; ++i) {
2658 MonoType *t = sig->params [i];
2663 t = mini_replace_type (t);
2686 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2688 ArchDynCallInfo *info;
2692 cinfo = get_call_info (NULL, NULL, sig);
2694 if (!dyn_call_supported (cinfo, sig)) {
2699 info = g_new0 (ArchDynCallInfo, 1);
2700 // FIXME: Preprocess the info to speed up start_dyn_call ()
2702 info->cinfo = cinfo;
2703 info->rtype = mini_replace_type (sig->ret);
2704 info->param_types = g_new0 (MonoType*, sig->param_count);
2705 for (i = 0; i < sig->param_count; ++i)
2706 info->param_types [i] = mini_replace_type (sig->params [i]);
2708 return (MonoDynCallInfo*)info;
2712 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2714 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2716 g_free (ainfo->cinfo);
2721 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2723 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2724 DynCallArgs *p = (DynCallArgs*)buf;
2725 int arg_index, greg, i, j, pindex;
2726 MonoMethodSignature *sig = dinfo->sig;
2728 g_assert (buf_len >= sizeof (DynCallArgs));
2737 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2738 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2743 if (dinfo->cinfo->vtype_retaddr)
2744 p->regs [greg ++] = (mgreg_t)ret;
2746 for (i = pindex; i < sig->param_count; i++) {
2747 MonoType *t = dinfo->param_types [i];
2748 gpointer *arg = args [arg_index ++];
2749 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2752 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2754 else if (ainfo->storage == RegTypeBase)
2755 slot = PARAM_REGS + (ainfo->offset / 4);
2757 g_assert_not_reached ();
2760 p->regs [slot] = (mgreg_t)*arg;
2765 case MONO_TYPE_STRING:
2766 case MONO_TYPE_CLASS:
2767 case MONO_TYPE_ARRAY:
2768 case MONO_TYPE_SZARRAY:
2769 case MONO_TYPE_OBJECT:
2773 p->regs [slot] = (mgreg_t)*arg;
2775 case MONO_TYPE_BOOLEAN:
2777 p->regs [slot] = *(guint8*)arg;
2780 p->regs [slot] = *(gint8*)arg;
2783 p->regs [slot] = *(gint16*)arg;
2786 case MONO_TYPE_CHAR:
2787 p->regs [slot] = *(guint16*)arg;
2790 p->regs [slot] = *(gint32*)arg;
2793 p->regs [slot] = *(guint32*)arg;
2797 p->regs [slot ++] = (mgreg_t)arg [0];
2798 p->regs [slot] = (mgreg_t)arg [1];
2801 p->regs [slot] = *(mgreg_t*)arg;
2804 p->regs [slot ++] = (mgreg_t)arg [0];
2805 p->regs [slot] = (mgreg_t)arg [1];
2807 case MONO_TYPE_GENERICINST:
2808 if (MONO_TYPE_IS_REFERENCE (t)) {
2809 p->regs [slot] = (mgreg_t)*arg;
2814 case MONO_TYPE_VALUETYPE:
2815 g_assert (ainfo->storage == RegTypeStructByVal);
2817 if (ainfo->size == 0)
2818 slot = PARAM_REGS + (ainfo->offset / 4);
2822 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2823 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2826 g_assert_not_reached ();
2832 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2834 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2835 MonoType *ptype = ainfo->rtype;
2836 guint8 *ret = ((DynCallArgs*)buf)->ret;
2837 mgreg_t res = ((DynCallArgs*)buf)->res;
2838 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2840 switch (ptype->type) {
2841 case MONO_TYPE_VOID:
2842 *(gpointer*)ret = NULL;
2844 case MONO_TYPE_STRING:
2845 case MONO_TYPE_CLASS:
2846 case MONO_TYPE_ARRAY:
2847 case MONO_TYPE_SZARRAY:
2848 case MONO_TYPE_OBJECT:
2852 *(gpointer*)ret = (gpointer)res;
2858 case MONO_TYPE_BOOLEAN:
2859 *(guint8*)ret = res;
2862 *(gint16*)ret = res;
2865 case MONO_TYPE_CHAR:
2866 *(guint16*)ret = res;
2869 *(gint32*)ret = res;
2872 *(guint32*)ret = res;
2876 /* This handles endianness as well */
2877 ((gint32*)ret) [0] = res;
2878 ((gint32*)ret) [1] = res2;
2880 case MONO_TYPE_GENERICINST:
2881 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2882 *(gpointer*)ret = (gpointer)res;
2887 case MONO_TYPE_VALUETYPE:
2888 g_assert (ainfo->cinfo->vtype_retaddr);
2893 *(float*)ret = *(float*)&res;
2895 case MONO_TYPE_R8: {
2902 *(double*)ret = *(double*)®s;
2906 g_assert_not_reached ();
2913 * Allow tracing to work with this interface (with an optional argument)
2917 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2921 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2922 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2923 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2924 code = emit_call_reg (code, ARMREG_R2);
2938 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2941 int save_mode = SAVE_NONE;
2943 MonoMethod *method = cfg->method;
2944 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2945 int rtype = ret_type->type;
2946 int save_offset = cfg->param_area;
2950 offset = code - cfg->native_code;
2951 /* we need about 16 instructions */
2952 if (offset > (cfg->code_size - 16 * 4)) {
2953 cfg->code_size *= 2;
2954 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2955 code = cfg->native_code + offset;
2958 case MONO_TYPE_VOID:
2959 /* special case string .ctor icall */
2960 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2961 save_mode = SAVE_ONE;
2963 save_mode = SAVE_NONE;
2967 save_mode = SAVE_TWO;
2971 save_mode = SAVE_ONE_FP;
2973 save_mode = SAVE_ONE;
2977 save_mode = SAVE_TWO_FP;
2979 save_mode = SAVE_TWO;
2981 case MONO_TYPE_GENERICINST:
2982 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2983 save_mode = SAVE_ONE;
2987 case MONO_TYPE_VALUETYPE:
2988 save_mode = SAVE_STRUCT;
2991 save_mode = SAVE_ONE;
2995 switch (save_mode) {
2997 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2998 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2999 if (enable_arguments) {
3000 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3001 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3005 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3006 if (enable_arguments) {
3007 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3011 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3012 if (enable_arguments) {
3013 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3017 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3018 if (enable_arguments) {
3019 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3023 if (enable_arguments) {
3024 /* FIXME: get the actual address */
3025 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3033 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3034 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3035 code = emit_call_reg (code, ARMREG_IP);
3037 switch (save_mode) {
3039 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3040 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3043 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3046 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3049 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3060 * The immediate field for cond branches is big enough for all reasonable methods
3062 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3063 if (0 && ins->inst_true_bb->native_offset) { \
3064 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3066 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3067 ARM_B_COND (code, (condcode), 0); \
3070 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3072 /* emit an exception if condition is fail
3074 * We assign the extra code used to throw the implicit exceptions
3075 * to cfg->bb_exit as far as the big branch handling is concerned
3077 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3079 mono_add_patch_info (cfg, code - cfg->native_code, \
3080 MONO_PATCH_INFO_EXC, exc_name); \
3081 ARM_BL_COND (code, (condcode), 0); \
3084 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3087 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3092 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3094 MonoInst *ins, *n, *last_ins = NULL;
3096 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3097 switch (ins->opcode) {
3100 /* Already done by an arch-independent pass */
3102 case OP_LOAD_MEMBASE:
3103 case OP_LOADI4_MEMBASE:
3105 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3106 * OP_LOAD_MEMBASE offset(basereg), reg
3108 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3109 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3110 ins->inst_basereg == last_ins->inst_destbasereg &&
3111 ins->inst_offset == last_ins->inst_offset) {
3112 if (ins->dreg == last_ins->sreg1) {
3113 MONO_DELETE_INS (bb, ins);
3116 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3117 ins->opcode = OP_MOVE;
3118 ins->sreg1 = last_ins->sreg1;
3122 * Note: reg1 must be different from the basereg in the second load
3123 * OP_LOAD_MEMBASE offset(basereg), reg1
3124 * OP_LOAD_MEMBASE offset(basereg), reg2
3126 * OP_LOAD_MEMBASE offset(basereg), reg1
3127 * OP_MOVE reg1, reg2
3129 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3130 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3131 ins->inst_basereg != last_ins->dreg &&
3132 ins->inst_basereg == last_ins->inst_basereg &&
3133 ins->inst_offset == last_ins->inst_offset) {
3135 if (ins->dreg == last_ins->dreg) {
3136 MONO_DELETE_INS (bb, ins);
3139 ins->opcode = OP_MOVE;
3140 ins->sreg1 = last_ins->dreg;
3143 //g_assert_not_reached ();
3147 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3148 * OP_LOAD_MEMBASE offset(basereg), reg
3150 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3151 * OP_ICONST reg, imm
3153 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3154 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3155 ins->inst_basereg == last_ins->inst_destbasereg &&
3156 ins->inst_offset == last_ins->inst_offset) {
3157 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3158 ins->opcode = OP_ICONST;
3159 ins->inst_c0 = last_ins->inst_imm;
3160 g_assert_not_reached (); // check this rule
3164 case OP_LOADU1_MEMBASE:
3165 case OP_LOADI1_MEMBASE:
3166 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3167 ins->inst_basereg == last_ins->inst_destbasereg &&
3168 ins->inst_offset == last_ins->inst_offset) {
3169 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3170 ins->sreg1 = last_ins->sreg1;
3173 case OP_LOADU2_MEMBASE:
3174 case OP_LOADI2_MEMBASE:
3175 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3176 ins->inst_basereg == last_ins->inst_destbasereg &&
3177 ins->inst_offset == last_ins->inst_offset) {
3178 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3179 ins->sreg1 = last_ins->sreg1;
3183 ins->opcode = OP_MOVE;
3187 if (ins->dreg == ins->sreg1) {
3188 MONO_DELETE_INS (bb, ins);
3192 * OP_MOVE sreg, dreg
3193 * OP_MOVE dreg, sreg
3195 if (last_ins && last_ins->opcode == OP_MOVE &&
3196 ins->sreg1 == last_ins->dreg &&
3197 ins->dreg == last_ins->sreg1) {
3198 MONO_DELETE_INS (bb, ins);
3206 bb->last_ins = last_ins;
3210 * the branch_cc_table should maintain the order of these
3224 branch_cc_table [] = {
3238 #define ADD_NEW_INS(cfg,dest,op) do { \
3239 MONO_INST_NEW ((cfg), (dest), (op)); \
3240 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3244 map_to_reg_reg_op (int op)
3253 case OP_COMPARE_IMM:
3255 case OP_ICOMPARE_IMM:
3269 case OP_LOAD_MEMBASE:
3270 return OP_LOAD_MEMINDEX;
3271 case OP_LOADI4_MEMBASE:
3272 return OP_LOADI4_MEMINDEX;
3273 case OP_LOADU4_MEMBASE:
3274 return OP_LOADU4_MEMINDEX;
3275 case OP_LOADU1_MEMBASE:
3276 return OP_LOADU1_MEMINDEX;
3277 case OP_LOADI2_MEMBASE:
3278 return OP_LOADI2_MEMINDEX;
3279 case OP_LOADU2_MEMBASE:
3280 return OP_LOADU2_MEMINDEX;
3281 case OP_LOADI1_MEMBASE:
3282 return OP_LOADI1_MEMINDEX;
3283 case OP_STOREI1_MEMBASE_REG:
3284 return OP_STOREI1_MEMINDEX;
3285 case OP_STOREI2_MEMBASE_REG:
3286 return OP_STOREI2_MEMINDEX;
3287 case OP_STOREI4_MEMBASE_REG:
3288 return OP_STOREI4_MEMINDEX;
3289 case OP_STORE_MEMBASE_REG:
3290 return OP_STORE_MEMINDEX;
3291 case OP_STORER4_MEMBASE_REG:
3292 return OP_STORER4_MEMINDEX;
3293 case OP_STORER8_MEMBASE_REG:
3294 return OP_STORER8_MEMINDEX;
3295 case OP_STORE_MEMBASE_IMM:
3296 return OP_STORE_MEMBASE_REG;
3297 case OP_STOREI1_MEMBASE_IMM:
3298 return OP_STOREI1_MEMBASE_REG;
3299 case OP_STOREI2_MEMBASE_IMM:
3300 return OP_STOREI2_MEMBASE_REG;
3301 case OP_STOREI4_MEMBASE_IMM:
3302 return OP_STOREI4_MEMBASE_REG;
3304 g_assert_not_reached ();
3308 * Remove from the instruction list the instructions that can't be
3309 * represented with very simple instructions with no register
3313 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3315 MonoInst *ins, *temp, *last_ins = NULL;
3316 int rot_amount, imm8, low_imm;
3318 MONO_BB_FOR_EACH_INS (bb, ins) {
3320 switch (ins->opcode) {
3324 case OP_COMPARE_IMM:
3325 case OP_ICOMPARE_IMM:
3339 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3340 ADD_NEW_INS (cfg, temp, OP_ICONST);
3341 temp->inst_c0 = ins->inst_imm;
3342 temp->dreg = mono_alloc_ireg (cfg);
3343 ins->sreg2 = temp->dreg;
3344 ins->opcode = mono_op_imm_to_op (ins->opcode);
3346 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3352 if (ins->inst_imm == 1) {
3353 ins->opcode = OP_MOVE;
3356 if (ins->inst_imm == 0) {
3357 ins->opcode = OP_ICONST;
3361 imm8 = mono_is_power_of_two (ins->inst_imm);
3363 ins->opcode = OP_SHL_IMM;
3364 ins->inst_imm = imm8;
3367 ADD_NEW_INS (cfg, temp, OP_ICONST);
3368 temp->inst_c0 = ins->inst_imm;
3369 temp->dreg = mono_alloc_ireg (cfg);
3370 ins->sreg2 = temp->dreg;
3371 ins->opcode = OP_IMUL;
3377 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3378 /* ARM sets the C flag to 1 if there was _no_ overflow */
3379 ins->next->opcode = OP_COND_EXC_NC;
3382 case OP_IDIV_UN_IMM:
3384 case OP_IREM_UN_IMM:
3385 ADD_NEW_INS (cfg, temp, OP_ICONST);
3386 temp->inst_c0 = ins->inst_imm;
3387 temp->dreg = mono_alloc_ireg (cfg);
3388 ins->sreg2 = temp->dreg;
3389 ins->opcode = mono_op_imm_to_op (ins->opcode);
3391 case OP_LOCALLOC_IMM:
3392 ADD_NEW_INS (cfg, temp, OP_ICONST);
3393 temp->inst_c0 = ins->inst_imm;
3394 temp->dreg = mono_alloc_ireg (cfg);
3395 ins->sreg1 = temp->dreg;
3396 ins->opcode = OP_LOCALLOC;
3398 case OP_LOAD_MEMBASE:
3399 case OP_LOADI4_MEMBASE:
3400 case OP_LOADU4_MEMBASE:
3401 case OP_LOADU1_MEMBASE:
3402 /* we can do two things: load the immed in a register
3403 * and use an indexed load, or see if the immed can be
3404 * represented as an ad_imm + a load with a smaller offset
3405 * that fits. We just do the first for now, optimize later.
3407 if (arm_is_imm12 (ins->inst_offset))
3409 ADD_NEW_INS (cfg, temp, OP_ICONST);
3410 temp->inst_c0 = ins->inst_offset;
3411 temp->dreg = mono_alloc_ireg (cfg);
3412 ins->sreg2 = temp->dreg;
3413 ins->opcode = map_to_reg_reg_op (ins->opcode);
3415 case OP_LOADI2_MEMBASE:
3416 case OP_LOADU2_MEMBASE:
3417 case OP_LOADI1_MEMBASE:
3418 if (arm_is_imm8 (ins->inst_offset))
3420 ADD_NEW_INS (cfg, temp, OP_ICONST);
3421 temp->inst_c0 = ins->inst_offset;
3422 temp->dreg = mono_alloc_ireg (cfg);
3423 ins->sreg2 = temp->dreg;
3424 ins->opcode = map_to_reg_reg_op (ins->opcode);
3426 case OP_LOADR4_MEMBASE:
3427 case OP_LOADR8_MEMBASE:
3428 if (arm_is_fpimm8 (ins->inst_offset))
3430 low_imm = ins->inst_offset & 0x1ff;
3431 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3432 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3433 temp->inst_imm = ins->inst_offset & ~0x1ff;
3434 temp->sreg1 = ins->inst_basereg;
3435 temp->dreg = mono_alloc_ireg (cfg);
3436 ins->inst_basereg = temp->dreg;
3437 ins->inst_offset = low_imm;
3441 ADD_NEW_INS (cfg, temp, OP_ICONST);
3442 temp->inst_c0 = ins->inst_offset;
3443 temp->dreg = mono_alloc_ireg (cfg);
3445 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3446 add_ins->sreg1 = ins->inst_basereg;
3447 add_ins->sreg2 = temp->dreg;
3448 add_ins->dreg = mono_alloc_ireg (cfg);
3450 ins->inst_basereg = add_ins->dreg;
3451 ins->inst_offset = 0;
3454 case OP_STORE_MEMBASE_REG:
3455 case OP_STOREI4_MEMBASE_REG:
3456 case OP_STOREI1_MEMBASE_REG:
3457 if (arm_is_imm12 (ins->inst_offset))
3459 ADD_NEW_INS (cfg, temp, OP_ICONST);
3460 temp->inst_c0 = ins->inst_offset;
3461 temp->dreg = mono_alloc_ireg (cfg);
3462 ins->sreg2 = temp->dreg;
3463 ins->opcode = map_to_reg_reg_op (ins->opcode);
3465 case OP_STOREI2_MEMBASE_REG:
3466 if (arm_is_imm8 (ins->inst_offset))
3468 ADD_NEW_INS (cfg, temp, OP_ICONST);
3469 temp->inst_c0 = ins->inst_offset;
3470 temp->dreg = mono_alloc_ireg (cfg);
3471 ins->sreg2 = temp->dreg;
3472 ins->opcode = map_to_reg_reg_op (ins->opcode);
3474 case OP_STORER4_MEMBASE_REG:
3475 case OP_STORER8_MEMBASE_REG:
3476 if (arm_is_fpimm8 (ins->inst_offset))
3478 low_imm = ins->inst_offset & 0x1ff;
3479 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3480 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3481 temp->inst_imm = ins->inst_offset & ~0x1ff;
3482 temp->sreg1 = ins->inst_destbasereg;
3483 temp->dreg = mono_alloc_ireg (cfg);
3484 ins->inst_destbasereg = temp->dreg;
3485 ins->inst_offset = low_imm;
3489 ADD_NEW_INS (cfg, temp, OP_ICONST);
3490 temp->inst_c0 = ins->inst_offset;
3491 temp->dreg = mono_alloc_ireg (cfg);
3493 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3494 add_ins->sreg1 = ins->inst_destbasereg;
3495 add_ins->sreg2 = temp->dreg;
3496 add_ins->dreg = mono_alloc_ireg (cfg);
3498 ins->inst_destbasereg = add_ins->dreg;
3499 ins->inst_offset = 0;
3502 case OP_STORE_MEMBASE_IMM:
3503 case OP_STOREI1_MEMBASE_IMM:
3504 case OP_STOREI2_MEMBASE_IMM:
3505 case OP_STOREI4_MEMBASE_IMM:
3506 ADD_NEW_INS (cfg, temp, OP_ICONST);
3507 temp->inst_c0 = ins->inst_imm;
3508 temp->dreg = mono_alloc_ireg (cfg);
3509 ins->sreg1 = temp->dreg;
3510 ins->opcode = map_to_reg_reg_op (ins->opcode);
3512 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3514 gboolean swap = FALSE;
3518 /* Optimized away */
3523 /* Some fp compares require swapped operands */
3524 switch (ins->next->opcode) {
3526 ins->next->opcode = OP_FBLT;
3530 ins->next->opcode = OP_FBLT_UN;
3534 ins->next->opcode = OP_FBGE;
3538 ins->next->opcode = OP_FBGE_UN;
3546 ins->sreg1 = ins->sreg2;
3555 bb->last_ins = last_ins;
3556 bb->max_vreg = cfg->next_vreg;
3560 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3564 if (long_ins->opcode == OP_LNEG) {
3566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3573 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3575 /* sreg is a float, dreg is an integer reg */
3577 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3579 ARM_TOSIZD (code, vfp_scratch1, sreg);
3581 ARM_TOUIZD (code, vfp_scratch1, sreg);
3582 ARM_FMRS (code, dreg, vfp_scratch1);
3583 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3587 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3588 else if (size == 2) {
3589 ARM_SHL_IMM (code, dreg, dreg, 16);
3590 ARM_SHR_IMM (code, dreg, dreg, 16);
3594 ARM_SHL_IMM (code, dreg, dreg, 24);
3595 ARM_SAR_IMM (code, dreg, dreg, 24);
3596 } else if (size == 2) {
3597 ARM_SHL_IMM (code, dreg, dreg, 16);
3598 ARM_SAR_IMM (code, dreg, dreg, 16);
3604 #endif /* #ifndef DISABLE_JIT */
3608 const guchar *target;
3613 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3616 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3617 PatchData *pdata = (PatchData*)user_data;
3618 guchar *code = data;
3619 guint32 *thunks = data;
3620 guint32 *endthunks = (guint32*)(code + bsize);
3622 int difflow, diffhigh;
3624 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3625 difflow = (char*)pdata->code - (char*)thunks;
3626 diffhigh = (char*)pdata->code - (char*)endthunks;
3627 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3631 * The thunk is composed of 3 words:
3632 * load constant from thunks [2] into ARM_IP
3635 * Note that the LR register is already setup
3637 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3638 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3639 while (thunks < endthunks) {
3640 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3641 if (thunks [2] == (guint32)pdata->target) {
3642 arm_patch (pdata->code, (guchar*)thunks);
3643 mono_arch_flush_icache (pdata->code, 4);
3646 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3647 /* found a free slot instead: emit thunk */
3648 /* ARMREG_IP is fine to use since this can't be an IMT call
3651 code = (guchar*)thunks;
3652 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3653 if (thumb_supported)
3654 ARM_BX (code, ARMREG_IP);
3656 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3657 thunks [2] = (guint32)pdata->target;
3658 mono_arch_flush_icache ((guchar*)thunks, 12);
3660 arm_patch (pdata->code, (guchar*)thunks);
3661 mono_arch_flush_icache (pdata->code, 4);
3665 /* skip 12 bytes, the size of the thunk */
3669 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3675 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3680 domain = mono_domain_get ();
3683 pdata.target = target;
3684 pdata.absolute = absolute;
3688 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3691 if (pdata.found != 1) {
3692 mono_domain_lock (domain);
3693 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3696 /* this uses the first available slot */
3698 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3700 mono_domain_unlock (domain);
3703 if (pdata.found != 1) {
3705 GHashTableIter iter;
3706 MonoJitDynamicMethodInfo *ji;
3709 * This might be a dynamic method, search its code manager. We can only
3710 * use the dynamic method containing CODE, since the others might be freed later.
3714 mono_domain_lock (domain);
3715 hash = domain_jit_info (domain)->dynamic_code_hash;
3717 /* FIXME: Speed this up */
3718 g_hash_table_iter_init (&iter, hash);
3719 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3720 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3721 if (pdata.found == 1)
3725 mono_domain_unlock (domain);
3727 if (pdata.found != 1)
3728 g_print ("thunk failed for %p from %p\n", target, code);
3729 g_assert (pdata.found == 1);
3733 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3735 guint32 *code32 = (void*)code;
3736 guint32 ins = *code32;
3737 guint32 prim = (ins >> 25) & 7;
3738 guint32 tval = GPOINTER_TO_UINT (target);
3740 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3741 if (prim == 5) { /* 101b */
3742 /* the diff starts 8 bytes from the branch opcode */
3743 gint diff = target - code - 8;
3745 gint tmask = 0xffffffff;
3746 if (tval & 1) { /* entering thumb mode */
3747 diff = target - 1 - code - 8;
3748 g_assert (thumb_supported);
3749 tbits = 0xf << 28; /* bl->blx bit pattern */
3750 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3751 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3755 tmask = ~(1 << 24); /* clear the link bit */
3756 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3761 if (diff <= 33554431) {
3763 ins = (ins & 0xff000000) | diff;
3765 *code32 = ins | tbits;
3769 /* diff between 0 and -33554432 */
3770 if (diff >= -33554432) {
3772 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3774 *code32 = ins | tbits;
3779 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3783 #ifdef USE_JUMP_TABLES
3785 gpointer *jte = mono_jumptable_get_entry (code);
3787 jte [0] = (gpointer) target;
3791 * The alternative call sequences looks like this:
3793 * ldr ip, [pc] // loads the address constant
3794 * b 1f // jumps around the constant
3795 * address constant embedded in the code
3800 * There are two cases for patching:
3801 * a) at the end of method emission: in this case code points to the start
3802 * of the call sequence
3803 * b) during runtime patching of the call site: in this case code points
3804 * to the mov pc, ip instruction
3806 * We have to handle also the thunk jump code sequence:
3810 * address constant // execution never reaches here
3812 if ((ins & 0x0ffffff0) == 0x12fff10) {
3813 /* Branch and exchange: the address is constructed in a reg
3814 * We can patch BX when the code sequence is the following:
3815 * ldr ip, [pc, #0] ; 0x8
3822 guint8 *emit = (guint8*)ccode;
3823 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3825 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3826 ARM_BX (emit, ARMREG_IP);
3828 /*patching from magic trampoline*/
3829 if (ins == ccode [3]) {
3830 g_assert (code32 [-4] == ccode [0]);
3831 g_assert (code32 [-3] == ccode [1]);
3832 g_assert (code32 [-1] == ccode [2]);
3833 code32 [-2] = (guint32)target;
3836 /*patching from JIT*/
3837 if (ins == ccode [0]) {
3838 g_assert (code32 [1] == ccode [1]);
3839 g_assert (code32 [3] == ccode [2]);
3840 g_assert (code32 [4] == ccode [3]);
3841 code32 [2] = (guint32)target;
3844 g_assert_not_reached ();
3845 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3853 guint8 *emit = (guint8*)ccode;
3854 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3856 ARM_BLX_REG (emit, ARMREG_IP);
3858 g_assert (code32 [-3] == ccode [0]);
3859 g_assert (code32 [-2] == ccode [1]);
3860 g_assert (code32 [0] == ccode [2]);
3862 code32 [-1] = (guint32)target;
3865 guint32 *tmp = ccode;
3866 guint8 *emit = (guint8*)tmp;
3867 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3868 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3869 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3870 ARM_BX (emit, ARMREG_IP);
3871 if (ins == ccode [2]) {
3872 g_assert_not_reached (); // should be -2 ...
3873 code32 [-1] = (guint32)target;
3876 if (ins == ccode [0]) {
3877 /* handles both thunk jump code and the far call sequence */
3878 code32 [2] = (guint32)target;
3881 g_assert_not_reached ();
3883 // g_print ("patched with 0x%08x\n", ins);
3888 arm_patch (guchar *code, const guchar *target)
3890 arm_patch_general (NULL, code, target, NULL);
3894 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3895 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3896 * to be used with the emit macros.
3897 * Return -1 otherwise.
3900 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3903 for (i = 0; i < 31; i+= 2) {
3904 res = (val << (32 - i)) | (val >> i);
3907 *rot_amount = i? 32 - i: 0;
3914 * Emits in code a sequence of instructions that load the value 'val'
3915 * into the dreg register. Uses at most 4 instructions.
3918 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3920 int imm8, rot_amount;
3922 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3923 /* skip the constant pool */
3929 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3930 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3931 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3932 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3935 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3937 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3941 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3945 if (val & 0xFF0000) {
3946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3948 if (val & 0xFF000000) {
3949 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3951 } else if (val & 0xFF00) {
3952 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3953 if (val & 0xFF0000) {
3954 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3956 if (val & 0xFF000000) {
3957 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3959 } else if (val & 0xFF0000) {
3960 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3961 if (val & 0xFF000000) {
3962 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3965 //g_assert_not_reached ();
3971 mono_arm_thumb_supported (void)
3973 return thumb_supported;
3979 * emit_load_volatile_arguments:
3981 * Load volatile arguments from the stack to the original input registers.
3982 * Required before a tail call.
3985 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3987 MonoMethod *method = cfg->method;
3988 MonoMethodSignature *sig;
3993 /* FIXME: Generate intermediate code instead */
3995 sig = mono_method_signature (method);
3997 /* This is the opposite of the code in emit_prolog */
4001 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4003 if (cinfo->vtype_retaddr) {
4004 ArgInfo *ainfo = &cinfo->ret;
4005 inst = cfg->vret_addr;
4006 g_assert (arm_is_imm12 (inst->inst_offset));
4007 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4009 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4010 ArgInfo *ainfo = cinfo->args + i;
4011 inst = cfg->args [pos];
4013 if (cfg->verbose_level > 2)
4014 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4015 if (inst->opcode == OP_REGVAR) {
4016 if (ainfo->storage == RegTypeGeneral)
4017 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4018 else if (ainfo->storage == RegTypeFP) {
4019 g_assert_not_reached ();
4020 } else if (ainfo->storage == RegTypeBase) {
4024 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4025 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4027 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4028 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4032 g_assert_not_reached ();
4034 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4035 switch (ainfo->size) {
4042 g_assert (arm_is_imm12 (inst->inst_offset));
4043 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4044 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4045 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4048 if (arm_is_imm12 (inst->inst_offset)) {
4049 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4051 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4052 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4056 } else if (ainfo->storage == RegTypeBaseGen) {
4059 } else if (ainfo->storage == RegTypeBase) {
4061 } else if (ainfo->storage == RegTypeFP) {
4062 g_assert_not_reached ();
4063 } else if (ainfo->storage == RegTypeStructByVal) {
4064 int doffset = inst->inst_offset;
4068 if (mono_class_from_mono_type (inst->inst_vtype))
4069 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4070 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4071 if (arm_is_imm12 (doffset)) {
4072 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4074 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4075 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4077 soffset += sizeof (gpointer);
4078 doffset += sizeof (gpointer);
4083 } else if (ainfo->storage == RegTypeStructByAddr) {
4098 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4103 guint8 *code = cfg->native_code + cfg->code_len;
4104 MonoInst *last_ins = NULL;
4105 guint last_offset = 0;
4107 int imm8, rot_amount;
4109 /* we don't align basic blocks of loops on arm */
4111 if (cfg->verbose_level > 2)
4112 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4114 cpos = bb->max_offset;
4116 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4117 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4118 //g_assert (!mono_compile_aot);
4121 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4122 /* this is not thread save, but good enough */
4123 /* fixme: howto handle overflows? */
4124 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4127 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4128 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4129 (gpointer)"mono_break");
4130 code = emit_call_seq (cfg, code);
4133 MONO_BB_FOR_EACH_INS (bb, ins) {
4134 offset = code - cfg->native_code;
4136 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4138 if (offset > (cfg->code_size - max_len - 16)) {
4139 cfg->code_size *= 2;
4140 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4141 code = cfg->native_code + offset;
4143 // if (ins->cil_code)
4144 // g_print ("cil code\n");
4145 mono_debug_record_line_number (cfg, ins, offset);
4147 switch (ins->opcode) {
4148 case OP_MEMORY_BARRIER:
4150 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4151 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4155 #ifdef HAVE_AEABI_READ_TP
4156 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4157 (gpointer)"__aeabi_read_tp");
4158 code = emit_call_seq (cfg, code);
4160 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4162 g_assert_not_reached ();
4165 case OP_ATOMIC_EXCHANGE_I4:
4166 case OP_ATOMIC_CAS_I4:
4167 case OP_ATOMIC_ADD_I4: {
4171 g_assert (v7_supported);
4174 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4176 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4178 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4182 g_assert (cfg->arch.atomic_tmp_offset != -1);
4183 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4185 switch (ins->opcode) {
4186 case OP_ATOMIC_EXCHANGE_I4:
4188 ARM_DMB (code, ARM_DMB_SY);
4189 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4190 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4191 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4193 ARM_B_COND (code, ARMCOND_NE, 0);
4194 arm_patch (buf [1], buf [0]);
4196 case OP_ATOMIC_CAS_I4:
4197 ARM_DMB (code, ARM_DMB_SY);
4199 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4200 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4202 ARM_B_COND (code, ARMCOND_NE, 0);
4203 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4204 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4206 ARM_B_COND (code, ARMCOND_NE, 0);
4207 arm_patch (buf [2], buf [0]);
4208 arm_patch (buf [1], code);
4210 case OP_ATOMIC_ADD_I4:
4212 ARM_DMB (code, ARM_DMB_SY);
4213 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4214 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4215 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4216 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4218 ARM_B_COND (code, ARMCOND_NE, 0);
4219 arm_patch (buf [1], buf [0]);
4222 g_assert_not_reached ();
4225 ARM_DMB (code, ARM_DMB_SY);
4226 if (tmpreg != ins->dreg)
4227 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4228 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4233 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4234 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4237 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4238 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4240 case OP_STOREI1_MEMBASE_IMM:
4241 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4242 g_assert (arm_is_imm12 (ins->inst_offset));
4243 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4245 case OP_STOREI2_MEMBASE_IMM:
4246 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4247 g_assert (arm_is_imm8 (ins->inst_offset));
4248 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4250 case OP_STORE_MEMBASE_IMM:
4251 case OP_STOREI4_MEMBASE_IMM:
4252 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4253 g_assert (arm_is_imm12 (ins->inst_offset));
4254 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4256 case OP_STOREI1_MEMBASE_REG:
4257 g_assert (arm_is_imm12 (ins->inst_offset));
4258 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4260 case OP_STOREI2_MEMBASE_REG:
4261 g_assert (arm_is_imm8 (ins->inst_offset));
4262 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4264 case OP_STORE_MEMBASE_REG:
4265 case OP_STOREI4_MEMBASE_REG:
4266 /* this case is special, since it happens for spill code after lowering has been called */
4267 if (arm_is_imm12 (ins->inst_offset)) {
4268 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4270 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4271 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4274 case OP_STOREI1_MEMINDEX:
4275 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4277 case OP_STOREI2_MEMINDEX:
4278 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4280 case OP_STORE_MEMINDEX:
4281 case OP_STOREI4_MEMINDEX:
4282 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4285 g_assert_not_reached ();
4287 case OP_LOAD_MEMINDEX:
4288 case OP_LOADI4_MEMINDEX:
4289 case OP_LOADU4_MEMINDEX:
4290 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4292 case OP_LOADI1_MEMINDEX:
4293 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4295 case OP_LOADU1_MEMINDEX:
4296 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4298 case OP_LOADI2_MEMINDEX:
4299 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4301 case OP_LOADU2_MEMINDEX:
4302 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4304 case OP_LOAD_MEMBASE:
4305 case OP_LOADI4_MEMBASE:
4306 case OP_LOADU4_MEMBASE:
4307 /* this case is special, since it happens for spill code after lowering has been called */
4308 if (arm_is_imm12 (ins->inst_offset)) {
4309 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4311 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4312 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4315 case OP_LOADI1_MEMBASE:
4316 g_assert (arm_is_imm8 (ins->inst_offset));
4317 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4319 case OP_LOADU1_MEMBASE:
4320 g_assert (arm_is_imm12 (ins->inst_offset));
4321 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4323 case OP_LOADU2_MEMBASE:
4324 g_assert (arm_is_imm8 (ins->inst_offset));
4325 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4327 case OP_LOADI2_MEMBASE:
4328 g_assert (arm_is_imm8 (ins->inst_offset));
4329 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4331 case OP_ICONV_TO_I1:
4332 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4333 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4335 case OP_ICONV_TO_I2:
4336 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4337 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4339 case OP_ICONV_TO_U1:
4340 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4342 case OP_ICONV_TO_U2:
4343 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4344 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4348 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4350 case OP_COMPARE_IMM:
4351 case OP_ICOMPARE_IMM:
4352 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4353 g_assert (imm8 >= 0);
4354 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4358 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4359 * So instead of emitting a trap, we emit a call a C function and place a
4362 //*(int*)code = 0xef9f0001;
4365 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4366 (gpointer)"mono_break");
4367 code = emit_call_seq (cfg, code);
4369 case OP_RELAXED_NOP:
4374 case OP_DUMMY_STORE:
4375 case OP_DUMMY_ICONST:
4376 case OP_DUMMY_R8CONST:
4377 case OP_NOT_REACHED:
4380 case OP_IL_SEQ_POINT:
4381 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4383 case OP_SEQ_POINT: {
4385 MonoInst *info_var = cfg->arch.seq_point_info_var;
4386 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4387 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4388 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4389 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4391 int dreg = ARMREG_LR;
4393 if (cfg->soft_breakpoints) {
4394 g_assert (!cfg->compile_aot);
4398 * For AOT, we use one got slot per method, which will point to a
4399 * SeqPointInfo structure, containing all the information required
4400 * by the code below.
4402 if (cfg->compile_aot) {
4403 g_assert (info_var);
4404 g_assert (info_var->opcode == OP_REGOFFSET);
4405 g_assert (arm_is_imm12 (info_var->inst_offset));
4408 if (!cfg->soft_breakpoints) {
4410 * Read from the single stepping trigger page. This will cause a
4411 * SIGSEGV when single stepping is enabled.
4412 * We do this _before_ the breakpoint, so single stepping after
4413 * a breakpoint is hit will step to the next IL offset.
4415 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4418 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4419 if (cfg->soft_breakpoints) {
4420 /* Load the address of the sequence point trigger variable. */
4423 g_assert (var->opcode == OP_REGOFFSET);
4424 g_assert (arm_is_imm12 (var->inst_offset));
4425 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4427 /* Read the value and check whether it is non-zero. */
4428 ARM_LDR_IMM (code, dreg, dreg, 0);
4429 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4431 /* Load the address of the sequence point method. */
4432 var = ss_method_var;
4434 g_assert (var->opcode == OP_REGOFFSET);
4435 g_assert (arm_is_imm12 (var->inst_offset));
4436 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4438 /* Call it conditionally. */
4439 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4441 if (cfg->compile_aot) {
4442 /* Load the trigger page addr from the variable initialized in the prolog */
4443 var = ss_trigger_page_var;
4445 g_assert (var->opcode == OP_REGOFFSET);
4446 g_assert (arm_is_imm12 (var->inst_offset));
4447 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4449 #ifdef USE_JUMP_TABLES
4450 gpointer *jte = mono_jumptable_add_entry ();
4451 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4452 jte [0] = ss_trigger_page;
4454 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4456 *(int*)code = (int)ss_trigger_page;
4460 ARM_LDR_IMM (code, dreg, dreg, 0);
4464 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4466 if (cfg->soft_breakpoints) {
4467 /* Load the address of the breakpoint method into ip. */
4468 var = bp_method_var;
4470 g_assert (var->opcode == OP_REGOFFSET);
4471 g_assert (arm_is_imm12 (var->inst_offset));
4472 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4475 * A placeholder for a possible breakpoint inserted by
4476 * mono_arch_set_breakpoint ().
4479 } else if (cfg->compile_aot) {
4480 guint32 offset = code - cfg->native_code;
4483 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4484 /* Add the offset */
4485 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4486 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4487 if (arm_is_imm12 ((int)val)) {
4488 ARM_LDR_IMM (code, dreg, dreg, val);
4490 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4492 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4494 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4495 g_assert (!(val & 0xFF000000));
4497 ARM_LDR_IMM (code, dreg, dreg, 0);
4499 /* What is faster, a branch or a load ? */
4500 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4501 /* The breakpoint instruction */
4502 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4505 * A placeholder for a possible breakpoint inserted by
4506 * mono_arch_set_breakpoint ().
4508 for (i = 0; i < 4; ++i)
4515 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4518 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4522 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4525 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4526 g_assert (imm8 >= 0);
4527 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4531 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4532 g_assert (imm8 >= 0);
4533 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4537 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4538 g_assert (imm8 >= 0);
4539 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4542 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4543 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4545 case OP_IADD_OVF_UN:
4546 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4547 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4550 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4551 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4553 case OP_ISUB_OVF_UN:
4554 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4555 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4557 case OP_ADD_OVF_CARRY:
4558 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4559 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4561 case OP_ADD_OVF_UN_CARRY:
4562 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4563 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4565 case OP_SUB_OVF_CARRY:
4566 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4567 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4569 case OP_SUB_OVF_UN_CARRY:
4570 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4571 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4575 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4578 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4579 g_assert (imm8 >= 0);
4580 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4583 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4587 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4591 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4592 g_assert (imm8 >= 0);
4593 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4597 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4598 g_assert (imm8 >= 0);
4599 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4601 case OP_ARM_RSBS_IMM:
4602 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4603 g_assert (imm8 >= 0);
4604 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4606 case OP_ARM_RSC_IMM:
4607 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4608 g_assert (imm8 >= 0);
4609 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4612 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4616 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4617 g_assert (imm8 >= 0);
4618 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4621 g_assert (v7s_supported);
4622 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4625 g_assert (v7s_supported);
4626 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4629 g_assert (v7s_supported);
4630 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4631 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4634 g_assert (v7s_supported);
4635 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4636 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4640 g_assert_not_reached ();
4642 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4646 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4647 g_assert (imm8 >= 0);
4648 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4651 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4655 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4656 g_assert (imm8 >= 0);
4657 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4660 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4665 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4666 else if (ins->dreg != ins->sreg1)
4667 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4670 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4675 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4676 else if (ins->dreg != ins->sreg1)
4677 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4680 case OP_ISHR_UN_IMM:
4682 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4683 else if (ins->dreg != ins->sreg1)
4684 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4687 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4690 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4693 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4696 if (ins->dreg == ins->sreg2)
4697 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4699 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4702 g_assert_not_reached ();
4705 /* FIXME: handle ovf/ sreg2 != dreg */
4706 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4707 /* FIXME: MUL doesn't set the C/O flags on ARM */
4709 case OP_IMUL_OVF_UN:
4710 /* FIXME: handle ovf/ sreg2 != dreg */
4711 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4712 /* FIXME: MUL doesn't set the C/O flags on ARM */
4715 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4718 /* Load the GOT offset */
4719 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4720 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4722 *(gpointer*)code = NULL;
4724 /* Load the value from the GOT */
4725 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4727 case OP_OBJC_GET_SELECTOR:
4728 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4729 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4731 *(gpointer*)code = NULL;
4733 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4735 case OP_ICONV_TO_I4:
4736 case OP_ICONV_TO_U4:
4738 if (ins->dreg != ins->sreg1)
4739 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4742 int saved = ins->sreg2;
4743 if (ins->sreg2 == ARM_LSW_REG) {
4744 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4747 if (ins->sreg1 != ARM_LSW_REG)
4748 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4749 if (saved != ARM_MSW_REG)
4750 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4755 ARM_CPYD (code, ins->dreg, ins->sreg1);
4757 case OP_FCONV_TO_R4:
4759 ARM_CVTD (code, ins->dreg, ins->sreg1);
4760 ARM_CVTS (code, ins->dreg, ins->dreg);
4765 * Keep in sync with mono_arch_emit_epilog
4767 g_assert (!cfg->method->save_lmf);
4769 code = emit_load_volatile_arguments (cfg, code);
4771 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4773 if (cfg->used_int_regs)
4774 ARM_POP (code, cfg->used_int_regs);
4775 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4777 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4779 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4780 if (cfg->compile_aot) {
4781 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4783 *(gpointer*)code = NULL;
4785 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4787 code = mono_arm_patchable_b (code, ARMCOND_AL);
4791 MonoCallInst *call = (MonoCallInst*)ins;
4794 * The stack looks like the following:
4795 * <caller argument area>
4798 * <callee argument area>
4799 * Need to copy the arguments from the callee argument area to
4800 * the caller argument area, and pop the frame.
4802 if (call->stack_usage) {
4803 int i, prev_sp_offset = 0;
4805 /* Compute size of saved registers restored below */
4807 prev_sp_offset = 2 * 4;
4809 prev_sp_offset = 1 * 4;
4810 for (i = 0; i < 16; ++i) {
4811 if (cfg->used_int_regs & (1 << i))
4812 prev_sp_offset += 4;
4815 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4817 /* Copy arguments on the stack to our argument area */
4818 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4819 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4820 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4825 * Keep in sync with mono_arch_emit_epilog
4827 g_assert (!cfg->method->save_lmf);
4829 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4831 if (cfg->used_int_regs)
4832 ARM_POP (code, cfg->used_int_regs);
4833 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4835 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4838 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4839 if (cfg->compile_aot) {
4840 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4842 *(gpointer*)code = NULL;
4844 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4846 code = mono_arm_patchable_b (code, ARMCOND_AL);
4851 /* ensure ins->sreg1 is not NULL */
4852 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4855 g_assert (cfg->sig_cookie < 128);
4856 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4857 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4866 call = (MonoCallInst*)ins;
4869 code = emit_float_args (cfg, call, code, &max_len, &offset);
4871 if (ins->flags & MONO_INST_HAS_METHOD)
4872 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4874 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4875 code = emit_call_seq (cfg, code);
4876 ins->flags |= MONO_INST_GC_CALLSITE;
4877 ins->backend.pc_offset = code - cfg->native_code;
4878 code = emit_move_return_value (cfg, ins, code);
4884 case OP_VOIDCALL_REG:
4887 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4889 code = emit_call_reg (code, ins->sreg1);
4890 ins->flags |= MONO_INST_GC_CALLSITE;
4891 ins->backend.pc_offset = code - cfg->native_code;
4892 code = emit_move_return_value (cfg, ins, code);
4894 case OP_FCALL_MEMBASE:
4895 case OP_LCALL_MEMBASE:
4896 case OP_VCALL_MEMBASE:
4897 case OP_VCALL2_MEMBASE:
4898 case OP_VOIDCALL_MEMBASE:
4899 case OP_CALL_MEMBASE: {
4900 gboolean imt_arg = FALSE;
4902 g_assert (ins->sreg1 != ARMREG_LR);
4903 call = (MonoCallInst*)ins;
4906 code = emit_float_args (cfg, call, code, &max_len, &offset);
4908 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4910 if (!arm_is_imm12 (ins->inst_offset))
4911 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4912 #ifdef USE_JUMP_TABLES
4918 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4920 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4922 if (!arm_is_imm12 (ins->inst_offset))
4923 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4925 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4928 * We can't embed the method in the code stream in PIC code, or
4930 * Instead, we put it in V5 in code emitted by
4931 * mono_arch_emit_imt_argument (), and embed NULL here to
4932 * signal the IMT thunk that the value is in V5.
4934 #ifdef USE_JUMP_TABLES
4935 /* In case of jumptables we always use value in V5. */
4938 if (call->dynamic_imt_arg)
4939 *((gpointer*)code) = NULL;
4941 *((gpointer*)code) = (gpointer)call->method;
4945 ins->flags |= MONO_INST_GC_CALLSITE;
4946 ins->backend.pc_offset = code - cfg->native_code;
4947 code = emit_move_return_value (cfg, ins, code);
4951 /* round the size to 8 bytes */
4952 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4953 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4954 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4955 /* memzero the area: dreg holds the size, sp is the pointer */
4956 if (ins->flags & MONO_INST_INIT) {
4957 guint8 *start_loop, *branch_to_cond;
4958 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4959 branch_to_cond = code;
4962 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4963 arm_patch (branch_to_cond, code);
4964 /* decrement by 4 and set flags */
4965 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4966 ARM_B_COND (code, ARMCOND_GE, 0);
4967 arm_patch (code - 4, start_loop);
4969 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
4970 if (cfg->param_area)
4971 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
4976 MonoInst *var = cfg->dyn_call_var;
4978 g_assert (var->opcode == OP_REGOFFSET);
4979 g_assert (arm_is_imm12 (var->inst_offset));
4981 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4982 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4984 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4986 /* Save args buffer */
4987 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4989 /* Set stack slots using R0 as scratch reg */
4990 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4991 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4992 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4993 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4996 /* Set argument registers */
4997 for (i = 0; i < PARAM_REGS; ++i)
4998 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5001 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5002 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5005 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5006 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5007 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5011 if (ins->sreg1 != ARMREG_R0)
5012 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5013 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5014 (gpointer)"mono_arch_throw_exception");
5015 code = emit_call_seq (cfg, code);
5019 if (ins->sreg1 != ARMREG_R0)
5020 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5021 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5022 (gpointer)"mono_arch_rethrow_exception");
5023 code = emit_call_seq (cfg, code);
5026 case OP_START_HANDLER: {
5027 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5030 /* Reserve a param area, see filter-stack.exe */
5031 if (cfg->param_area) {
5032 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5033 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5035 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5036 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5040 if (arm_is_imm12 (spvar->inst_offset)) {
5041 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5043 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5044 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5048 case OP_ENDFILTER: {
5049 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5052 /* Free the param area */
5053 if (cfg->param_area) {
5054 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5055 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5057 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5058 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5062 if (ins->sreg1 != ARMREG_R0)
5063 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5064 if (arm_is_imm12 (spvar->inst_offset)) {
5065 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5067 g_assert (ARMREG_IP != spvar->inst_basereg);
5068 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5069 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5071 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5074 case OP_ENDFINALLY: {
5075 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5078 /* Free the param area */
5079 if (cfg->param_area) {
5080 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5081 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5083 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5084 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5088 if (arm_is_imm12 (spvar->inst_offset)) {
5089 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5091 g_assert (ARMREG_IP != spvar->inst_basereg);
5092 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5093 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5095 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5098 case OP_CALL_HANDLER:
5099 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5100 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5101 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5104 ins->inst_c0 = code - cfg->native_code;
5107 /*if (ins->inst_target_bb->native_offset) {
5109 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5111 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5112 code = mono_arm_patchable_b (code, ARMCOND_AL);
5116 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5120 * In the normal case we have:
5121 * ldr pc, [pc, ins->sreg1 << 2]
5124 * ldr lr, [pc, ins->sreg1 << 2]
5126 * After follows the data.
5127 * FIXME: add aot support.
5129 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5130 #ifdef USE_JUMP_TABLES
5132 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5133 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5134 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5138 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5139 if (offset + max_len > (cfg->code_size - 16)) {
5140 cfg->code_size += max_len;
5141 cfg->code_size *= 2;
5142 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5143 code = cfg->native_code + offset;
5145 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5147 code += 4 * GPOINTER_TO_INT (ins->klass);
5152 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5153 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5157 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5158 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5162 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5163 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5167 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5168 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5172 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5173 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5180 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5181 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5184 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5185 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5189 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5190 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5192 case OP_COND_EXC_EQ:
5193 case OP_COND_EXC_NE_UN:
5194 case OP_COND_EXC_LT:
5195 case OP_COND_EXC_LT_UN:
5196 case OP_COND_EXC_GT:
5197 case OP_COND_EXC_GT_UN:
5198 case OP_COND_EXC_GE:
5199 case OP_COND_EXC_GE_UN:
5200 case OP_COND_EXC_LE:
5201 case OP_COND_EXC_LE_UN:
5202 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5204 case OP_COND_EXC_IEQ:
5205 case OP_COND_EXC_INE_UN:
5206 case OP_COND_EXC_ILT:
5207 case OP_COND_EXC_ILT_UN:
5208 case OP_COND_EXC_IGT:
5209 case OP_COND_EXC_IGT_UN:
5210 case OP_COND_EXC_IGE:
5211 case OP_COND_EXC_IGE_UN:
5212 case OP_COND_EXC_ILE:
5213 case OP_COND_EXC_ILE_UN:
5214 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5217 case OP_COND_EXC_IC:
5218 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5220 case OP_COND_EXC_OV:
5221 case OP_COND_EXC_IOV:
5222 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5224 case OP_COND_EXC_NC:
5225 case OP_COND_EXC_INC:
5226 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5228 case OP_COND_EXC_NO:
5229 case OP_COND_EXC_INO:
5230 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5242 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5245 /* floating point opcodes */
5247 if (cfg->compile_aot) {
5248 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5250 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5252 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5255 /* FIXME: we can optimize the imm load by dealing with part of
5256 * the displacement in LDFD (aligning to 512).
5258 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5259 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5263 if (cfg->compile_aot) {
5264 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5266 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5268 ARM_CVTS (code, ins->dreg, ins->dreg);
5270 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5271 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5272 ARM_CVTS (code, ins->dreg, ins->dreg);
5275 case OP_STORER8_MEMBASE_REG:
5276 /* This is generated by the local regalloc pass which runs after the lowering pass */
5277 if (!arm_is_fpimm8 (ins->inst_offset)) {
5278 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5279 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5280 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5282 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5285 case OP_LOADR8_MEMBASE:
5286 /* This is generated by the local regalloc pass which runs after the lowering pass */
5287 if (!arm_is_fpimm8 (ins->inst_offset)) {
5288 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5289 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5290 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5292 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5295 case OP_STORER4_MEMBASE_REG:
5296 g_assert (arm_is_fpimm8 (ins->inst_offset));
5297 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5298 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5299 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5300 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5302 case OP_LOADR4_MEMBASE:
5303 g_assert (arm_is_fpimm8 (ins->inst_offset));
5304 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5305 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5306 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5307 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5309 case OP_ICONV_TO_R_UN: {
5310 g_assert_not_reached ();
5313 case OP_ICONV_TO_R4:
5314 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5315 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5316 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5317 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5318 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5320 case OP_ICONV_TO_R8:
5321 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5322 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5323 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5324 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5328 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5329 if (sig_ret->type == MONO_TYPE_R4) {
5330 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5332 if (!IS_HARD_FLOAT) {
5333 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5336 if (IS_HARD_FLOAT) {
5337 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5339 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5344 case OP_FCONV_TO_I1:
5345 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5347 case OP_FCONV_TO_U1:
5348 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5350 case OP_FCONV_TO_I2:
5351 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5353 case OP_FCONV_TO_U2:
5354 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5356 case OP_FCONV_TO_I4:
5358 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5360 case OP_FCONV_TO_U4:
5362 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5364 case OP_FCONV_TO_I8:
5365 case OP_FCONV_TO_U8:
5366 g_assert_not_reached ();
5367 /* Implemented as helper calls */
5369 case OP_LCONV_TO_R_UN:
5370 g_assert_not_reached ();
5371 /* Implemented as helper calls */
5373 case OP_LCONV_TO_OVF_I4_2: {
5374 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5376 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5379 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5380 high_bit_not_set = code;
5381 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5383 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5384 valid_negative = code;
5385 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5386 invalid_negative = code;
5387 ARM_B_COND (code, ARMCOND_AL, 0);
5389 arm_patch (high_bit_not_set, code);
5391 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5392 valid_positive = code;
5393 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5395 arm_patch (invalid_negative, code);
5396 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5398 arm_patch (valid_negative, code);
5399 arm_patch (valid_positive, code);
5401 if (ins->dreg != ins->sreg1)
5402 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5406 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5409 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5412 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5415 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5418 ARM_NEGD (code, ins->dreg, ins->sreg1);
5422 g_assert_not_reached ();
5426 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5432 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5435 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5436 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5440 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5443 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5444 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5448 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5451 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5452 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5453 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5457 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5460 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5461 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5465 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5468 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5469 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5470 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5474 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5477 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5482 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5485 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5486 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5490 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5493 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5494 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5497 /* ARM FPA flags table:
5498 * N Less than ARMCOND_MI
5499 * Z Equal ARMCOND_EQ
5500 * C Greater Than or Equal ARMCOND_CS
5501 * V Unordered ARMCOND_VS
5504 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5507 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5510 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5513 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5514 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5520 g_assert_not_reached ();
5524 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5526 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5527 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5528 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5532 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5533 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5538 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5539 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5541 #ifdef USE_JUMP_TABLES
5543 gpointer *jte = mono_jumptable_add_entries (2);
5544 jte [0] = GUINT_TO_POINTER (0xffffffff);
5545 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5546 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5547 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5550 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5551 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5553 *(guint32*)code = 0xffffffff;
5555 *(guint32*)code = 0x7fefffff;
5558 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5560 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5561 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5563 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5564 ARM_CPYD (code, ins->dreg, ins->sreg1);
5566 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5567 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5572 case OP_GC_LIVENESS_DEF:
5573 case OP_GC_LIVENESS_USE:
5574 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5575 ins->backend.pc_offset = code - cfg->native_code;
5577 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5578 ins->backend.pc_offset = code - cfg->native_code;
5579 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5583 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5584 g_assert_not_reached ();
5587 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5588 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5589 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5590 g_assert_not_reached ();
5596 last_offset = offset;
5599 cfg->code_len = code - cfg->native_code;
5602 #endif /* DISABLE_JIT */
5604 #ifdef HAVE_AEABI_READ_TP
5605 void __aeabi_read_tp (void);
5609 mono_arch_register_lowlevel_calls (void)
5611 /* The signature doesn't matter */
5612 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5613 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5615 #ifndef MONO_CROSS_COMPILE
5616 #ifdef HAVE_AEABI_READ_TP
5617 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5622 #define patch_lis_ori(ip,val) do {\
5623 guint16 *__lis_ori = (guint16*)(ip); \
5624 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5625 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5629 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5631 MonoJumpInfo *patch_info;
5632 gboolean compile_aot = !run_cctors;
5634 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5635 unsigned char *ip = patch_info->ip.i + code;
5636 const unsigned char *target;
5638 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5639 #ifdef USE_JUMP_TABLES
5640 gpointer *jt = mono_jumptable_get_entry (ip);
5642 gpointer *jt = (gpointer*)(ip + 8);
5645 /* jt is the inlined jump table, 2 instructions after ip
5646 * In the normal case we store the absolute addresses,
5647 * otherwise the displacements.
5649 for (i = 0; i < patch_info->data.table->table_size; i++)
5650 jt [i] = code + (int)patch_info->data.table->table [i];
5655 switch (patch_info->type) {
5656 case MONO_PATCH_INFO_BB:
5657 case MONO_PATCH_INFO_LABEL:
5660 /* No need to patch these */
5665 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5667 switch (patch_info->type) {
5668 case MONO_PATCH_INFO_IP:
5669 g_assert_not_reached ();
5670 patch_lis_ori (ip, ip);
5672 case MONO_PATCH_INFO_METHOD_REL:
5673 g_assert_not_reached ();
5674 *((gpointer *)(ip)) = code + patch_info->data.offset;
5676 case MONO_PATCH_INFO_METHODCONST:
5677 case MONO_PATCH_INFO_CLASS:
5678 case MONO_PATCH_INFO_IMAGE:
5679 case MONO_PATCH_INFO_FIELD:
5680 case MONO_PATCH_INFO_VTABLE:
5681 case MONO_PATCH_INFO_IID:
5682 case MONO_PATCH_INFO_SFLDA:
5683 case MONO_PATCH_INFO_LDSTR:
5684 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5685 case MONO_PATCH_INFO_LDTOKEN:
5686 g_assert_not_reached ();
5687 /* from OP_AOTCONST : lis + ori */
5688 patch_lis_ori (ip, target);
5690 case MONO_PATCH_INFO_R4:
5691 case MONO_PATCH_INFO_R8:
5692 g_assert_not_reached ();
5693 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5695 case MONO_PATCH_INFO_EXC_NAME:
5696 g_assert_not_reached ();
5697 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5699 case MONO_PATCH_INFO_NONE:
5700 case MONO_PATCH_INFO_BB_OVF:
5701 case MONO_PATCH_INFO_EXC_OVF:
5702 /* everything is dealt with at epilog output time */
5707 arm_patch_general (domain, ip, target, dyn_code_mp);
5714 * Stack frame layout:
5716 * ------------------- fp
5717 * MonoLMF structure or saved registers
5718 * -------------------
5720 * -------------------
5722 * -------------------
5723 * optional 8 bytes for tracing
5724 * -------------------
5725 * param area size is cfg->param_area
5726 * ------------------- sp
5729 mono_arch_emit_prolog (MonoCompile *cfg)
5731 MonoMethod *method = cfg->method;
5733 MonoMethodSignature *sig;
5735 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5740 int prev_sp_offset, reg_offset;
5742 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5745 sig = mono_method_signature (method);
5746 cfg->code_size = 256 + sig->param_count * 64;
5747 code = cfg->native_code = g_malloc (cfg->code_size);
5749 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5751 alloc_size = cfg->stack_offset;
5757 * The iphone uses R7 as the frame pointer, and it points at the saved
5762 * We can't use r7 as a frame pointer since it points into the middle of
5763 * the frame, so we keep using our own frame pointer.
5764 * FIXME: Optimize this.
5766 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5767 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5768 prev_sp_offset += 8; /* r7 and lr */
5769 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5770 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5773 if (!method->save_lmf) {
5775 /* No need to push LR again */
5776 if (cfg->used_int_regs)
5777 ARM_PUSH (code, cfg->used_int_regs);
5779 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5780 prev_sp_offset += 4;
5782 for (i = 0; i < 16; ++i) {
5783 if (cfg->used_int_regs & (1 << i))
5784 prev_sp_offset += 4;
5786 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5788 for (i = 0; i < 16; ++i) {
5789 if ((cfg->used_int_regs & (1 << i))) {
5790 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5791 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5796 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5797 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5799 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5800 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5803 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5804 ARM_PUSH (code, 0x5ff0);
5805 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5806 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5808 for (i = 0; i < 16; ++i) {
5809 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5810 /* The original r7 is saved at the start */
5811 if (!(iphone_abi && i == ARMREG_R7))
5812 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5816 g_assert (reg_offset == 4 * 10);
5817 pos += sizeof (MonoLMF) - (4 * 10);
5821 orig_alloc_size = alloc_size;
5822 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5823 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5824 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5825 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5828 /* the stack used in the pushed regs */
5829 if (prev_sp_offset & 4)
5831 cfg->stack_usage = alloc_size;
5833 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5834 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5836 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5837 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5839 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5841 if (cfg->frame_reg != ARMREG_SP) {
5842 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5843 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5845 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5846 prev_sp_offset += alloc_size;
5848 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5849 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5851 /* compute max_offset in order to use short forward jumps
5852 * we could skip do it on arm because the immediate displacement
5853 * for jumps is large enough, it may be useful later for constant pools
5856 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5857 MonoInst *ins = bb->code;
5858 bb->max_offset = max_offset;
5860 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5863 MONO_BB_FOR_EACH_INS (bb, ins)
5864 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5867 /* store runtime generic context */
5868 if (cfg->rgctx_var) {
5869 MonoInst *ins = cfg->rgctx_var;
5871 g_assert (ins->opcode == OP_REGOFFSET);
5873 if (arm_is_imm12 (ins->inst_offset)) {
5874 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5876 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5877 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5881 /* load arguments allocated to register from the stack */
5884 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5886 if (cinfo->vtype_retaddr) {
5887 ArgInfo *ainfo = &cinfo->ret;
5888 inst = cfg->vret_addr;
5889 g_assert (arm_is_imm12 (inst->inst_offset));
5890 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5893 if (sig->call_convention == MONO_CALL_VARARG) {
5894 ArgInfo *cookie = &cinfo->sig_cookie;
5896 /* Save the sig cookie address */
5897 g_assert (cookie->storage == RegTypeBase);
5899 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5900 g_assert (arm_is_imm12 (cfg->sig_cookie));
5901 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5902 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5905 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5906 ArgInfo *ainfo = cinfo->args + i;
5907 inst = cfg->args [pos];
5909 if (cfg->verbose_level > 2)
5910 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5911 if (inst->opcode == OP_REGVAR) {
5912 if (ainfo->storage == RegTypeGeneral)
5913 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5914 else if (ainfo->storage == RegTypeFP) {
5915 g_assert_not_reached ();
5916 } else if (ainfo->storage == RegTypeBase) {
5917 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5918 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5920 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5921 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5924 g_assert_not_reached ();
5926 if (cfg->verbose_level > 2)
5927 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5929 /* the argument should be put on the stack: FIXME handle size != word */
5930 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5931 switch (ainfo->size) {
5933 if (arm_is_imm12 (inst->inst_offset))
5934 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5936 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5937 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5941 if (arm_is_imm8 (inst->inst_offset)) {
5942 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5944 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5945 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5949 if (arm_is_imm12 (inst->inst_offset)) {
5950 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5952 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5953 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5955 if (arm_is_imm12 (inst->inst_offset + 4)) {
5956 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5958 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5959 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5963 if (arm_is_imm12 (inst->inst_offset)) {
5964 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5966 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5967 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5971 } else if (ainfo->storage == RegTypeBaseGen) {
5972 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5973 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5975 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5976 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5978 if (arm_is_imm12 (inst->inst_offset + 4)) {
5979 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5980 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5982 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5983 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5984 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5985 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5987 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5988 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5989 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5991 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5992 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5995 switch (ainfo->size) {
5997 if (arm_is_imm8 (inst->inst_offset)) {
5998 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6000 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6001 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6005 if (arm_is_imm8 (inst->inst_offset)) {
6006 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6008 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6009 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6013 if (arm_is_imm12 (inst->inst_offset)) {
6014 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6016 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6017 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6019 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6020 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6022 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6023 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6025 if (arm_is_imm12 (inst->inst_offset + 4)) {
6026 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6028 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6029 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6033 if (arm_is_imm12 (inst->inst_offset)) {
6034 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6036 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6037 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6041 } else if (ainfo->storage == RegTypeFP) {
6042 int imm8, rot_amount;
6044 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6045 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6046 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6048 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6050 if (ainfo->size == 8)
6051 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6053 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6054 } else if (ainfo->storage == RegTypeStructByVal) {
6055 int doffset = inst->inst_offset;
6059 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6060 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6061 if (arm_is_imm12 (doffset)) {
6062 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6064 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6065 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6067 soffset += sizeof (gpointer);
6068 doffset += sizeof (gpointer);
6070 if (ainfo->vtsize) {
6071 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6072 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6073 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6075 } else if (ainfo->storage == RegTypeStructByAddr) {
6076 g_assert_not_reached ();
6077 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6078 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6080 g_assert_not_reached ();
6085 if (method->save_lmf)
6086 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6089 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6091 if (cfg->arch.seq_point_info_var) {
6092 MonoInst *ins = cfg->arch.seq_point_info_var;
6094 /* Initialize the variable from a GOT slot */
6095 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6096 #ifdef USE_JUMP_TABLES
6098 gpointer *jte = mono_jumptable_add_entry ();
6099 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6100 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6102 /** XXX: is it correct? */
6104 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6106 *(gpointer*)code = NULL;
6109 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6111 g_assert (ins->opcode == OP_REGOFFSET);
6113 if (arm_is_imm12 (ins->inst_offset)) {
6114 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6116 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6117 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6121 /* Initialize ss_trigger_page_var */
6122 if (!cfg->soft_breakpoints) {
6123 MonoInst *info_var = cfg->arch.seq_point_info_var;
6124 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6125 int dreg = ARMREG_LR;
6128 g_assert (info_var->opcode == OP_REGOFFSET);
6129 g_assert (arm_is_imm12 (info_var->inst_offset));
6131 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6132 /* Load the trigger page addr */
6133 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6134 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6138 if (cfg->arch.seq_point_read_var) {
6139 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6140 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6141 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6142 #ifdef USE_JUMP_TABLES
6145 g_assert (read_ins->opcode == OP_REGOFFSET);
6146 g_assert (arm_is_imm12 (read_ins->inst_offset));
6147 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6148 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6149 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6150 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6152 #ifdef USE_JUMP_TABLES
6153 jte = mono_jumptable_add_entries (3);
6154 jte [0] = (gpointer)&ss_trigger_var;
6155 jte [1] = single_step_func_wrapper;
6156 jte [2] = breakpoint_func_wrapper;
6157 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6159 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6161 *(volatile int **)code = &ss_trigger_var;
6163 *(gpointer*)code = single_step_func_wrapper;
6165 *(gpointer*)code = breakpoint_func_wrapper;
6169 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6170 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6171 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6172 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6173 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6174 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6177 cfg->code_len = code - cfg->native_code;
6178 g_assert (cfg->code_len < cfg->code_size);
6185 mono_arch_emit_epilog (MonoCompile *cfg)
6187 MonoMethod *method = cfg->method;
6188 int pos, i, rot_amount;
6189 int max_epilog_size = 16 + 20*4;
6193 if (cfg->method->save_lmf)
6194 max_epilog_size += 128;
6196 if (mono_jit_trace_calls != NULL)
6197 max_epilog_size += 50;
6199 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6200 max_epilog_size += 50;
6202 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6203 cfg->code_size *= 2;
6204 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6205 cfg->stat_code_reallocs++;
6209 * Keep in sync with OP_JMP
6211 code = cfg->native_code + cfg->code_len;
6213 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6214 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6218 /* Load returned vtypes into registers if needed */
6219 cinfo = cfg->arch.cinfo;
6220 if (cinfo->ret.storage == RegTypeStructByVal) {
6221 MonoInst *ins = cfg->ret;
6223 if (arm_is_imm12 (ins->inst_offset)) {
6224 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6226 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6227 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6231 if (method->save_lmf) {
6232 int lmf_offset, reg, sp_adj, regmask;
6233 /* all but r0-r3, sp and pc */
6234 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6237 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6239 /* This points to r4 inside MonoLMF->iregs */
6240 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6242 regmask = 0x9ff0; /* restore lr to pc */
6243 /* Skip caller saved registers not used by the method */
6244 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6245 regmask &= ~(1 << reg);
6250 /* Restored later */
6251 regmask &= ~(1 << ARMREG_PC);
6252 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6253 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6255 ARM_POP (code, regmask);
6257 /* Restore saved r7, restore LR to PC */
6258 /* Skip lr from the lmf */
6259 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6260 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6263 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6264 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6266 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6267 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6271 /* Restore saved gregs */
6272 if (cfg->used_int_regs)
6273 ARM_POP (code, cfg->used_int_regs);
6274 /* Restore saved r7, restore LR to PC */
6275 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6277 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6281 cfg->code_len = code - cfg->native_code;
6283 g_assert (cfg->code_len < cfg->code_size);
6288 mono_arch_emit_exceptions (MonoCompile *cfg)
6290 MonoJumpInfo *patch_info;
6293 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6294 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6295 int max_epilog_size = 50;
6297 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6298 exc_throw_pos [i] = NULL;
6299 exc_throw_found [i] = 0;
6302 /* count the number of exception infos */
6305 * make sure we have enough space for exceptions
6307 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6308 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6309 i = mini_exception_id_by_name (patch_info->data.target);
6310 if (!exc_throw_found [i]) {
6311 max_epilog_size += 32;
6312 exc_throw_found [i] = TRUE;
6317 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6318 cfg->code_size *= 2;
6319 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6320 cfg->stat_code_reallocs++;
6323 code = cfg->native_code + cfg->code_len;
6325 /* add code to raise exceptions */
6326 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6327 switch (patch_info->type) {
6328 case MONO_PATCH_INFO_EXC: {
6329 MonoClass *exc_class;
6330 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6332 i = mini_exception_id_by_name (patch_info->data.target);
6333 if (exc_throw_pos [i]) {
6334 arm_patch (ip, exc_throw_pos [i]);
6335 patch_info->type = MONO_PATCH_INFO_NONE;
6338 exc_throw_pos [i] = code;
6340 arm_patch (ip, code);
6342 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6343 g_assert (exc_class);
6345 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6346 #ifdef USE_JUMP_TABLES
6348 gpointer *jte = mono_jumptable_add_entries (2);
6349 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6350 patch_info->data.name = "mono_arch_throw_corlib_exception";
6351 patch_info->ip.i = code - cfg->native_code;
6352 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6353 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6354 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6355 ARM_BLX_REG (code, ARMREG_IP);
6356 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6359 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6360 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6361 patch_info->data.name = "mono_arch_throw_corlib_exception";
6362 patch_info->ip.i = code - cfg->native_code;
6364 *(guint32*)(gpointer)code = exc_class->type_token;
6375 cfg->code_len = code - cfg->native_code;
6377 g_assert (cfg->code_len < cfg->code_size);
6381 #endif /* #ifndef DISABLE_JIT */
6384 mono_arch_finish_init (void)
6389 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6394 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6401 mono_arch_print_tree (MonoInst *tree, int arity)
6411 mono_arch_get_patch_offset (guint8 *code)
6418 mono_arch_flush_register_windows (void)
6425 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6427 int method_reg = mono_alloc_ireg (cfg);
6428 #ifdef USE_JUMP_TABLES
6429 int use_jumptables = TRUE;
6431 int use_jumptables = FALSE;
6434 if (cfg->compile_aot) {
6437 call->dynamic_imt_arg = TRUE;
6440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6442 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6443 ins->dreg = method_reg;
6444 ins->inst_p0 = call->method;
6445 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6446 MONO_ADD_INS (cfg->cbb, ins);
6448 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6449 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6450 /* Always pass in a register for simplicity */
6451 call->dynamic_imt_arg = TRUE;
6453 cfg->uses_rgctx_reg = TRUE;
6456 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6460 MONO_INST_NEW (cfg, ins, OP_PCONST);
6461 ins->inst_p0 = call->method;
6462 ins->dreg = method_reg;
6463 MONO_ADD_INS (cfg->cbb, ins);
6466 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6470 #endif /* DISABLE_JIT */
6473 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6475 #ifdef USE_JUMP_TABLES
6476 return (MonoMethod*)regs [ARMREG_V5];
6479 guint32 *code_ptr = (guint32*)code;
6481 method = GUINT_TO_POINTER (code_ptr [1]);
6485 return (MonoMethod*)regs [ARMREG_V5];
6487 /* The IMT value is stored in the code stream right after the LDC instruction. */
6488 /* This is no longer true for the gsharedvt_in trampoline */
6490 if (!IS_LDR_PC (code_ptr [0])) {
6491 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6492 g_assert (IS_LDR_PC (code_ptr [0]));
6496 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6497 return (MonoMethod*)regs [ARMREG_V5];
6499 return (MonoMethod*) method;
6504 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6506 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6509 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6510 #define BASE_SIZE (6 * 4)
6511 #define BSEARCH_ENTRY_SIZE (4 * 4)
6512 #define CMP_SIZE (3 * 4)
6513 #define BRANCH_SIZE (1 * 4)
6514 #define CALL_SIZE (2 * 4)
6515 #define WMC_SIZE (8 * 4)
6516 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6518 #ifdef USE_JUMP_TABLES
6520 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6522 g_assert (base [index] == NULL);
6523 base [index] = value;
6526 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6528 if (arm_is_imm12 (jti * 4)) {
6529 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6531 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6532 if ((jti * 4) >> 16)
6533 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6534 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6540 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6542 guint32 delta = DISTANCE (target, code);
6544 g_assert (delta >= 0 && delta <= 0xFFF);
6545 *target = *target | delta;
6551 #ifdef ENABLE_WRONG_METHOD_CHECK
6553 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6555 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6561 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6562 gpointer fail_tramp)
6565 arminstr_t *code, *start;
6566 #ifdef USE_JUMP_TABLES
6569 gboolean large_offsets = FALSE;
6570 guint32 **constant_pool_starts;
6571 arminstr_t *vtable_target = NULL;
6572 int extra_space = 0;
6574 #ifdef ENABLE_WRONG_METHOD_CHECK
6579 #ifdef USE_JUMP_TABLES
6580 for (i = 0; i < count; ++i) {
6581 MonoIMTCheckItem *item = imt_entries [i];
6582 item->chunk_size += 4 * 16;
6583 if (!item->is_equals)
6584 imt_entries [item->check_target_idx]->compare_done = TRUE;
6585 size += item->chunk_size;
6588 constant_pool_starts = g_new0 (guint32*, count);
6590 for (i = 0; i < count; ++i) {
6591 MonoIMTCheckItem *item = imt_entries [i];
6592 if (item->is_equals) {
6593 gboolean fail_case = !item->check_target_idx && fail_tramp;
6595 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6596 item->chunk_size += 32;
6597 large_offsets = TRUE;
6600 if (item->check_target_idx || fail_case) {
6601 if (!item->compare_done || fail_case)
6602 item->chunk_size += CMP_SIZE;
6603 item->chunk_size += BRANCH_SIZE;
6605 #ifdef ENABLE_WRONG_METHOD_CHECK
6606 item->chunk_size += WMC_SIZE;
6610 item->chunk_size += 16;
6611 large_offsets = TRUE;
6613 item->chunk_size += CALL_SIZE;
6615 item->chunk_size += BSEARCH_ENTRY_SIZE;
6616 imt_entries [item->check_target_idx]->compare_done = TRUE;
6618 size += item->chunk_size;
6622 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6626 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6628 code = mono_domain_code_reserve (domain, size);
6632 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6633 for (i = 0; i < count; ++i) {
6634 MonoIMTCheckItem *item = imt_entries [i];
6635 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6639 #ifdef USE_JUMP_TABLES
6640 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6641 /* If jumptables we always pass the IMT method in R5 */
6642 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6643 #define VTABLE_JTI 0
6644 #define IMT_METHOD_OFFSET 0
6645 #define TARGET_CODE_OFFSET 1
6646 #define JUMP_CODE_OFFSET 2
6647 #define RECORDS_PER_ENTRY 3
6648 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6649 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6650 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6652 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6653 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6654 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6655 set_jumptable_element (jte, VTABLE_JTI, vtable);
6658 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6660 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6661 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6662 vtable_target = code;
6663 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6665 if (mono_use_llvm) {
6666 /* LLVM always passes the IMT method in R5 */
6667 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6669 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6670 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6671 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6675 for (i = 0; i < count; ++i) {
6676 MonoIMTCheckItem *item = imt_entries [i];
6677 #ifdef USE_JUMP_TABLES
6678 guint32 imt_method_jti = 0, target_code_jti = 0;
6680 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6682 gint32 vtable_offset;
6684 item->code_target = (guint8*)code;
6686 if (item->is_equals) {
6687 gboolean fail_case = !item->check_target_idx && fail_tramp;
6689 if (item->check_target_idx || fail_case) {
6690 if (!item->compare_done || fail_case) {
6691 #ifdef USE_JUMP_TABLES
6692 imt_method_jti = IMT_METHOD_JTI (i);
6693 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6696 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6698 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6700 #ifdef USE_JUMP_TABLES
6701 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6702 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6703 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6705 item->jmp_code = (guint8*)code;
6706 ARM_B_COND (code, ARMCOND_NE, 0);
6709 /*Enable the commented code to assert on wrong method*/
6710 #ifdef ENABLE_WRONG_METHOD_CHECK
6711 #ifdef USE_JUMP_TABLES
6712 imt_method_jti = IMT_METHOD_JTI (i);
6713 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6716 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6718 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6720 ARM_B_COND (code, ARMCOND_EQ, 0);
6722 /* Define this if your system is so bad that gdb is failing. */
6723 #ifdef BROKEN_DEV_ENV
6724 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6726 arm_patch (code - 1, mini_dump_bad_imt);
6730 arm_patch (cond, code);
6734 if (item->has_target_code) {
6735 /* Load target address */
6736 #ifdef USE_JUMP_TABLES
6737 target_code_jti = TARGET_CODE_JTI (i);
6738 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6739 /* Restore registers */
6740 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6742 ARM_BX (code, ARMREG_R1);
6743 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6745 target_code_ins = code;
6746 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6747 /* Save it to the fourth slot */
6748 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6749 /* Restore registers and branch */
6750 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6752 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6755 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6756 if (!arm_is_imm12 (vtable_offset)) {
6758 * We need to branch to a computed address but we don't have
6759 * a free register to store it, since IP must contain the
6760 * vtable address. So we push the two values to the stack, and
6761 * load them both using LDM.
6763 /* Compute target address */
6764 #ifdef USE_JUMP_TABLES
6765 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6766 if (vtable_offset >> 16)
6767 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6768 /* IP had vtable base. */
6769 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6770 /* Restore registers and branch */
6771 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6772 ARM_BX (code, ARMREG_IP);
6774 vtable_offset_ins = code;
6775 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6776 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6777 /* Save it to the fourth slot */
6778 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6779 /* Restore registers and branch */
6780 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6782 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6785 #ifdef USE_JUMP_TABLES
6786 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6787 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6788 ARM_BX (code, ARMREG_IP);
6790 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6792 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6793 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6799 #ifdef USE_JUMP_TABLES
6800 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6801 target_code_jti = TARGET_CODE_JTI (i);
6802 /* Load target address */
6803 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6804 /* Restore registers */
6805 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6807 ARM_BX (code, ARMREG_R1);
6808 set_jumptable_element (jte, target_code_jti, fail_tramp);
6810 arm_patch (item->jmp_code, (guchar*)code);
6812 target_code_ins = code;
6813 /* Load target address */
6814 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6815 /* Save it to the fourth slot */
6816 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6817 /* Restore registers and branch */
6818 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6820 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6822 item->jmp_code = NULL;
6825 #ifdef USE_JUMP_TABLES
6827 set_jumptable_element (jte, imt_method_jti, item->key);
6830 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6832 /*must emit after unconditional branch*/
6833 if (vtable_target) {
6834 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6835 item->chunk_size += 4;
6836 vtable_target = NULL;
6839 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6840 constant_pool_starts [i] = code;
6842 code += extra_space;
6847 #ifdef USE_JUMP_TABLES
6848 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6849 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6850 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6851 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6852 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6854 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6855 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6857 item->jmp_code = (guint8*)code;
6858 ARM_B_COND (code, ARMCOND_HS, 0);
6864 for (i = 0; i < count; ++i) {
6865 MonoIMTCheckItem *item = imt_entries [i];
6866 if (item->jmp_code) {
6867 if (item->check_target_idx)
6868 #ifdef USE_JUMP_TABLES
6869 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6871 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6874 if (i > 0 && item->is_equals) {
6876 #ifdef USE_JUMP_TABLES
6877 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6878 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6880 arminstr_t *space_start = constant_pool_starts [i];
6881 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6882 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6890 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6891 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6896 #ifndef USE_JUMP_TABLES
6897 g_free (constant_pool_starts);
6900 mono_arch_flush_icache ((guint8*)start, size);
6901 mono_stats.imt_thunks_size += code - start;
6903 g_assert (DISTANCE (start, code) <= size);
6908 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6910 return ctx->regs [reg];
6914 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6916 ctx->regs [reg] = val;
6920 * mono_arch_get_trampolines:
6922 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6926 mono_arch_get_trampolines (gboolean aot)
6928 return mono_arm_get_exception_trampolines (aot);
6932 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
6939 bp = MONO_CONTEXT_GET_BP (ctx);
6940 lr_loc = (gpointer*)(bp + clause->exvar_offset);
6942 old_value = *lr_loc;
6943 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
6946 *lr_loc = new_value;
6951 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6953 * mono_arch_set_breakpoint:
6955 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6956 * The location should contain code emitted by OP_SEQ_POINT.
6959 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6962 guint32 native_offset = ip - (guint8*)ji->code_start;
6963 MonoDebugOptions *opt = mini_get_debug_options ();
6965 if (opt->soft_breakpoints) {
6966 g_assert (!ji->from_aot);
6968 ARM_BLX_REG (code, ARMREG_LR);
6969 mono_arch_flush_icache (code - 4, 4);
6970 } else if (ji->from_aot) {
6971 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6973 g_assert (native_offset % 4 == 0);
6974 g_assert (info->bp_addrs [native_offset / 4] == 0);
6975 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6977 int dreg = ARMREG_LR;
6979 /* Read from another trigger page */
6980 #ifdef USE_JUMP_TABLES
6981 gpointer *jte = mono_jumptable_add_entry ();
6982 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6983 jte [0] = bp_trigger_page;
6985 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6987 *(int*)code = (int)bp_trigger_page;
6990 ARM_LDR_IMM (code, dreg, dreg, 0);
6992 mono_arch_flush_icache (code - 16, 16);
6995 /* This is currently implemented by emitting an SWI instruction, which
6996 * qemu/linux seems to convert to a SIGILL.
6998 *(int*)code = (0xef << 24) | 8;
7000 mono_arch_flush_icache (code - 4, 4);
7006 * mono_arch_clear_breakpoint:
7008 * Clear the breakpoint at IP.
7011 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7013 MonoDebugOptions *opt = mini_get_debug_options ();
7017 if (opt->soft_breakpoints) {
7018 g_assert (!ji->from_aot);
7021 mono_arch_flush_icache (code - 4, 4);
7022 } else if (ji->from_aot) {
7023 guint32 native_offset = ip - (guint8*)ji->code_start;
7024 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7026 g_assert (native_offset % 4 == 0);
7027 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7028 info->bp_addrs [native_offset / 4] = 0;
7030 for (i = 0; i < 4; ++i)
7033 mono_arch_flush_icache (ip, code - ip);
7038 * mono_arch_start_single_stepping:
7040 * Start single stepping.
7043 mono_arch_start_single_stepping (void)
7045 if (ss_trigger_page)
7046 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7052 * mono_arch_stop_single_stepping:
7054 * Stop single stepping.
7057 mono_arch_stop_single_stepping (void)
7059 if (ss_trigger_page)
7060 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7066 #define DBG_SIGNAL SIGBUS
7068 #define DBG_SIGNAL SIGSEGV
7072 * mono_arch_is_single_step_event:
7074 * Return whenever the machine state in SIGCTX corresponds to a single
7078 mono_arch_is_single_step_event (void *info, void *sigctx)
7080 siginfo_t *sinfo = info;
7082 if (!ss_trigger_page)
7085 /* Sometimes the address is off by 4 */
7086 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7093 * mono_arch_is_breakpoint_event:
7095 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7098 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7100 siginfo_t *sinfo = info;
7102 if (!ss_trigger_page)
7105 if (sinfo->si_signo == DBG_SIGNAL) {
7106 /* Sometimes the address is off by 4 */
7107 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7117 * mono_arch_skip_breakpoint:
7119 * See mini-amd64.c for docs.
7122 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7124 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7128 * mono_arch_skip_single_step:
7130 * See mini-amd64.c for docs.
7133 mono_arch_skip_single_step (MonoContext *ctx)
7135 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7138 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7141 * mono_arch_get_seq_point_info:
7143 * See mini-amd64.c for docs.
7146 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7151 // FIXME: Add a free function
7153 mono_domain_lock (domain);
7154 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7156 mono_domain_unlock (domain);
7159 ji = mono_jit_info_table_find (domain, (char*)code);
7162 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7164 info->ss_trigger_page = ss_trigger_page;
7165 info->bp_trigger_page = bp_trigger_page;
7167 mono_domain_lock (domain);
7168 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7170 mono_domain_unlock (domain);
7177 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7179 ext->lmf.previous_lmf = prev_lmf;
7180 /* Mark that this is a MonoLMFExt */
7181 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7182 ext->lmf.sp = (gssize)ext;
7186 * mono_arch_set_target:
7188 * Set the target architecture the JIT backend should generate code for, in the form
7189 * of a GNU target triplet. Only used in AOT mode.
7192 mono_arch_set_target (char *mtriple)
7194 /* The GNU target triple format is not very well documented */
7195 if (strstr (mtriple, "armv7")) {
7196 v5_supported = TRUE;
7197 v6_supported = TRUE;
7198 v7_supported = TRUE;
7200 if (strstr (mtriple, "armv6")) {
7201 v5_supported = TRUE;
7202 v6_supported = TRUE;
7204 if (strstr (mtriple, "armv7s")) {
7205 v7s_supported = TRUE;
7207 if (strstr (mtriple, "thumbv7s")) {
7208 v5_supported = TRUE;
7209 v6_supported = TRUE;
7210 v7_supported = TRUE;
7211 v7s_supported = TRUE;
7212 thumb_supported = TRUE;
7213 thumb2_supported = TRUE;
7215 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7216 v5_supported = TRUE;
7217 v6_supported = TRUE;
7218 thumb_supported = TRUE;
7221 if (strstr (mtriple, "gnueabi"))
7222 eabi_supported = TRUE;
7226 mono_arch_opcode_supported (int opcode)
7229 case OP_ATOMIC_ADD_I4:
7230 case OP_ATOMIC_EXCHANGE_I4:
7231 case OP_ATOMIC_CAS_I4:
7232 return v7_supported;
7238 #if defined(ENABLE_GSHAREDVT)
7240 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7242 #endif /* !MONOTOUCH */