2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
93 static mono_mutex_t mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
206 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
210 mono_arch_regname (int reg)
212 static const char * rnames[] = {
213 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
214 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
215 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
218 if (reg >= 0 && reg < 16)
224 mono_arch_fregname (int reg)
226 static const char * rnames[] = {
227 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
228 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
229 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
230 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
231 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
232 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
235 if (reg >= 0 && reg < 32)
243 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
245 int imm8, rot_amount;
246 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
247 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
250 g_assert (dreg != sreg);
251 code = mono_arm_emit_load_imm (code, dreg, imm);
252 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
257 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
259 /* we can use r0-r3, since this is called only for incoming args on the stack */
260 if (size > sizeof (gpointer) * 4) {
262 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
263 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
264 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
265 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
266 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
267 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
268 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
269 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
270 ARM_B_COND (code, ARMCOND_NE, 0);
271 arm_patch (code - 4, start_loop);
274 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
275 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
277 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
278 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
284 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
285 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
286 doffset = soffset = 0;
288 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
289 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
295 g_assert (size == 0);
300 emit_call_reg (guint8 *code, int reg)
303 ARM_BLX_REG (code, reg);
305 #ifdef USE_JUMP_TABLES
306 g_assert_not_reached ();
308 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
312 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
318 emit_call_seq (MonoCompile *cfg, guint8 *code)
320 #ifdef USE_JUMP_TABLES
321 code = mono_arm_patchable_bl (code, ARMCOND_AL);
323 if (cfg->method->dynamic) {
324 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
326 *(gpointer*)code = NULL;
328 code = emit_call_reg (code, ARMREG_IP);
337 mono_arm_patchable_b (guint8 *code, int cond)
339 #ifdef USE_JUMP_TABLES
342 jte = mono_jumptable_add_entry ();
343 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
344 ARM_BX_COND (code, cond, ARMREG_IP);
346 ARM_B_COND (code, cond, 0);
352 mono_arm_patchable_bl (guint8 *code, int cond)
354 #ifdef USE_JUMP_TABLES
357 jte = mono_jumptable_add_entry ();
358 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
359 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
361 ARM_BL_COND (code, cond, 0);
366 #ifdef USE_JUMP_TABLES
368 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
370 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
371 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
376 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
378 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
379 ARM_LDR_IMM (code, reg, reg, 0);
385 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
387 switch (ins->opcode) {
390 case OP_FCALL_MEMBASE:
392 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
393 if (sig_ret->type == MONO_TYPE_R4) {
395 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
397 ARM_FMSR (code, ins->dreg, ARMREG_R0);
398 ARM_CVTS (code, ins->dreg, ins->dreg);
402 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
404 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
417 * Emit code to push an LMF structure on the LMF stack.
418 * On arm, this is intermixed with the initialization of other fields of the structure.
421 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
423 gboolean get_lmf_fast = FALSE;
426 #ifdef HAVE_AEABI_READ_TP
427 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
429 if (lmf_addr_tls_offset != -1) {
432 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
433 (gpointer)"__aeabi_read_tp");
434 code = emit_call_seq (cfg, code);
436 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
442 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
445 /* Inline mono_get_lmf_addr () */
446 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
448 /* Load mono_jit_tls_id */
450 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
451 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
453 *(gpointer*)code = NULL;
455 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
456 /* call pthread_getspecific () */
457 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
458 (gpointer)"pthread_getspecific");
459 code = emit_call_seq (cfg, code);
460 /* lmf_addr = &jit_tls->lmf */
461 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
462 g_assert (arm_is_imm8 (lmf_offset));
463 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
470 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
471 (gpointer)"mono_get_lmf_addr");
472 code = emit_call_seq (cfg, code);
474 /* we build the MonoLMF structure on the stack - see mini-arm.h */
475 /* lmf_offset is the offset from the previous stack pointer,
476 * alloc_size is the total stack space allocated, so the offset
477 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
478 * The pointer to the struct is put in r1 (new_lmf).
479 * ip is used as scratch
480 * The callee-saved registers are already in the MonoLMF structure
482 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
483 /* r0 is the result from mono_get_lmf_addr () */
484 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
485 /* new_lmf->previous_lmf = *lmf_addr */
486 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
487 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
488 /* *(lmf_addr) = r1 */
489 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 /* Skip method (only needed for trampoline LMF frames) */
491 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
492 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
493 /* save the current IP */
494 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
495 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
497 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
498 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
509 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
513 for (list = inst->float_args; list; list = list->next) {
514 FloatArgData *fad = list->data;
515 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
516 gboolean imm = arm_is_fpimm8 (var->inst_offset);
518 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
524 if (*offset + *max_len > cfg->code_size) {
525 cfg->code_size += *max_len;
526 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
528 code = cfg->native_code + *offset;
532 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
533 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
535 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
537 *offset = code - cfg->native_code;
544 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
548 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
550 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
553 if (!arm_is_fpimm8 (inst->inst_offset)) {
554 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
555 ARM_FSTD (code, reg, ARMREG_LR, 0);
557 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
564 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
568 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
570 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
573 if (!arm_is_fpimm8 (inst->inst_offset)) {
574 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
575 ARM_FLDD (code, reg, ARMREG_LR, 0);
577 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
586 * Emit code to pop an LMF structure from the LMF stack.
589 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
593 if (lmf_offset < 32) {
594 basereg = cfg->frame_reg;
599 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
602 /* ip = previous_lmf */
603 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
605 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
606 /* *(lmf_addr) = previous_lmf */
607 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
612 #endif /* #ifndef DISABLE_JIT */
615 * mono_arch_get_argument_info:
616 * @csig: a method signature
617 * @param_count: the number of parameters to consider
618 * @arg_info: an array to store the result infos
620 * Gathers information on parameters such as size, alignment and
621 * padding. arg_info should be large enought to hold param_count + 1 entries.
623 * Returns the size of the activation frame.
626 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
628 int k, frame_size = 0;
629 guint32 size, align, pad;
633 t = mini_type_get_underlying_type (gsctx, csig->ret);
634 if (MONO_TYPE_ISSTRUCT (t)) {
635 frame_size += sizeof (gpointer);
639 arg_info [0].offset = offset;
642 frame_size += sizeof (gpointer);
646 arg_info [0].size = frame_size;
648 for (k = 0; k < param_count; k++) {
649 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
651 /* ignore alignment for now */
654 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
655 arg_info [k].pad = pad;
657 arg_info [k + 1].pad = 0;
658 arg_info [k + 1].size = size;
660 arg_info [k + 1].offset = offset;
664 align = MONO_ARCH_FRAME_ALIGNMENT;
665 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
666 arg_info [k].pad = pad;
671 #define MAX_ARCH_DELEGATE_PARAMS 3
674 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
676 guint8 *code, *start;
679 start = code = mono_global_codeman_reserve (12);
681 /* Replace the this argument with the target */
682 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
683 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
684 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
686 g_assert ((code - start) <= 12);
688 mono_arch_flush_icache (start, 12);
692 size = 8 + param_count * 4;
693 start = code = mono_global_codeman_reserve (size);
695 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
696 /* slide down the arguments */
697 for (i = 0; i < param_count; ++i) {
698 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
700 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
702 g_assert ((code - start) <= size);
704 mono_arch_flush_icache (start, size);
708 *code_size = code - start;
714 * mono_arch_get_delegate_invoke_impls:
716 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
720 mono_arch_get_delegate_invoke_impls (void)
728 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
729 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
731 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
732 code = get_delegate_invoke_impl (FALSE, i, &code_len);
733 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
734 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
742 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
744 guint8 *code, *start;
747 /* FIXME: Support more cases */
748 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
749 if (MONO_TYPE_ISSTRUCT (sig_ret))
753 static guint8* cached = NULL;
754 mono_mini_arch_lock ();
756 mono_mini_arch_unlock ();
761 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
763 start = get_delegate_invoke_impl (TRUE, 0, NULL);
765 mono_mini_arch_unlock ();
768 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
771 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
773 for (i = 0; i < sig->param_count; ++i)
774 if (!mono_is_regsize_var (sig->params [i]))
777 mono_mini_arch_lock ();
778 code = cache [sig->param_count];
780 mono_mini_arch_unlock ();
785 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
786 start = mono_aot_get_trampoline (name);
789 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
791 cache [sig->param_count] = start;
792 mono_mini_arch_unlock ();
800 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
806 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
808 return (gpointer)regs [ARMREG_R0];
812 * Initialize the cpu to execute managed code.
815 mono_arch_cpu_init (void)
817 i8_align = MONO_ABI_ALIGNOF (gint64);
818 #ifdef MONO_CROSS_COMPILE
819 /* Need to set the alignment of i8 since it can different on the target */
820 #ifdef TARGET_ANDROID
822 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
828 create_function_wrapper (gpointer function)
830 guint8 *start, *code;
832 start = code = mono_global_codeman_reserve (96);
835 * Construct the MonoContext structure on the stack.
838 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
840 /* save ip, lr and pc into their correspodings ctx.regs slots. */
841 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
842 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
843 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
845 /* save r0..r10 and fp */
846 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
847 ARM_STM (code, ARMREG_IP, 0x0fff);
849 /* now we can update fp. */
850 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
852 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
853 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
854 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
855 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
857 /* make ctx.eip hold the address of the call. */
858 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
859 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
861 /* r0 now points to the MonoContext */
862 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
865 #ifdef USE_JUMP_TABLES
867 gpointer *jte = mono_jumptable_add_entry ();
868 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
872 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
874 *(gpointer*)code = function;
877 ARM_BLX_REG (code, ARMREG_IP);
879 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
880 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
881 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
882 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
884 /* make ip point to the regs array, then restore everything, including pc. */
885 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
886 ARM_LDM (code, ARMREG_IP, 0xffff);
888 mono_arch_flush_icache (start, code - start);
894 * Initialize architecture specific code.
897 mono_arch_init (void)
899 const char *cpu_arch;
901 mono_mutex_init_recursive (&mini_arch_mutex);
902 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
903 if (mini_get_debug_options ()->soft_breakpoints) {
904 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
905 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
910 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
911 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
912 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
915 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
916 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
917 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
918 #if defined(ENABLE_GSHAREDVT)
919 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
922 #if defined(__ARM_EABI__)
923 eabi_supported = TRUE;
926 #if defined(ARM_FPU_VFP_HARD)
927 arm_fpu = MONO_ARM_FPU_VFP_HARD;
929 arm_fpu = MONO_ARM_FPU_VFP;
931 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
932 /* If we're compiling with a soft float fallback and it
933 turns out that no VFP unit is available, we need to
934 switch to soft float. We don't do this for iOS, since
935 iOS devices always have a VFP unit. */
936 if (!mono_hwcap_arm_has_vfp)
937 arm_fpu = MONO_ARM_FPU_NONE;
941 v5_supported = mono_hwcap_arm_is_v5;
942 v6_supported = mono_hwcap_arm_is_v6;
943 v7_supported = mono_hwcap_arm_is_v7;
944 v7s_supported = mono_hwcap_arm_is_v7s;
946 #if defined(__APPLE__)
947 /* iOS is special-cased here because we don't yet
948 have a way to properly detect CPU features on it. */
949 thumb_supported = TRUE;
952 thumb_supported = mono_hwcap_arm_has_thumb;
953 thumb2_supported = mono_hwcap_arm_has_thumb2;
956 /* Format: armv(5|6|7[s])[-thumb[2]] */
957 cpu_arch = g_getenv ("MONO_CPU_ARCH");
959 /* Do this here so it overrides any detection. */
961 if (strncmp (cpu_arch, "armv", 4) == 0) {
962 v5_supported = cpu_arch [4] >= '5';
963 v6_supported = cpu_arch [4] >= '6';
964 v7_supported = cpu_arch [4] >= '7';
965 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
968 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
969 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
974 * Cleanup architecture specific code.
977 mono_arch_cleanup (void)
982 * This function returns the optimizations supported on this cpu.
985 mono_arch_cpu_optimizations (guint32 *exclude_mask)
987 /* no arm-specific optimizations yet */
993 * This function test for all SIMD functions supported.
995 * Returns a bitmask corresponding to all supported versions.
999 mono_arch_cpu_enumerate_simd_versions (void)
1001 /* SIMD is currently unimplemented */
1009 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1011 if (v7s_supported) {
1025 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1027 mono_arch_is_soft_float (void)
1029 return arm_fpu == MONO_ARM_FPU_NONE;
1034 mono_arm_is_hard_float (void)
1036 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1040 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1043 t = mini_type_get_underlying_type (gsctx, t);
1050 case MONO_TYPE_FNPTR:
1052 case MONO_TYPE_OBJECT:
1053 case MONO_TYPE_STRING:
1054 case MONO_TYPE_CLASS:
1055 case MONO_TYPE_SZARRAY:
1056 case MONO_TYPE_ARRAY:
1058 case MONO_TYPE_GENERICINST:
1059 if (!mono_type_generic_inst_is_valuetype (t))
1062 case MONO_TYPE_VALUETYPE:
1069 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1074 for (i = 0; i < cfg->num_varinfo; i++) {
1075 MonoInst *ins = cfg->varinfo [i];
1076 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1079 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1082 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1085 /* we can only allocate 32 bit values */
1086 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1087 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1088 g_assert (i == vmv->idx);
1089 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1096 #define USE_EXTRA_TEMPS 0
1099 mono_arch_get_global_int_regs (MonoCompile *cfg)
1103 mono_arch_compute_omit_fp (cfg);
1106 * FIXME: Interface calls might go through a static rgctx trampoline which
1107 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1110 if (cfg->flags & MONO_CFG_HAS_CALLS)
1111 cfg->uses_rgctx_reg = TRUE;
1113 if (cfg->arch.omit_fp)
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1119 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1120 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1122 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1123 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1124 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1125 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1126 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1127 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1133 * mono_arch_regalloc_cost:
1135 * Return the cost, in number of memory references, of the action of
1136 * allocating the variable VMV into a register during global register
1140 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1146 #endif /* #ifndef DISABLE_JIT */
1148 #ifndef __GNUC_PREREQ
1149 #define __GNUC_PREREQ(maj, min) (0)
1153 mono_arch_flush_icache (guint8 *code, gint size)
1155 #if defined(__native_client__)
1156 // For Native Client we don't have to flush i-cache here,
1157 // as it's being done by dyncode interface.
1160 #ifdef MONO_CROSS_COMPILE
1162 sys_icache_invalidate (code, size);
1163 #elif __GNUC_PREREQ(4, 1)
1164 __clear_cache (code, code + size);
1165 #elif defined(PLATFORM_ANDROID)
1166 const int syscall = 0xf0002;
1174 : "r" (code), "r" (code + size), "r" (syscall)
1175 : "r0", "r1", "r7", "r2"
1178 __asm __volatile ("mov r0, %0\n"
1181 "swi 0x9f0002 @ sys_cacheflush"
1183 : "r" (code), "r" (code + size), "r" (0)
1184 : "r0", "r1", "r3" );
1186 #endif /* !__native_client__ */
1197 RegTypeStructByAddr,
1198 /* gsharedvt argument passed by addr in greg */
1199 RegTypeGSharedVtInReg,
1200 /* gsharedvt argument passed by addr on stack */
1201 RegTypeGSharedVtOnStack,
1206 guint16 vtsize; /* in param area */
1210 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1215 guint32 stack_usage;
1216 gboolean vtype_retaddr;
1217 /* The index of the vret arg in the argument list */
1227 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1230 if (*gr > ARMREG_R3) {
1232 ainfo->offset = *stack_size;
1233 ainfo->reg = ARMREG_SP; /* in the caller */
1234 ainfo->storage = RegTypeBase;
1237 ainfo->storage = RegTypeGeneral;
1244 split = i8_align == 4;
1249 if (*gr == ARMREG_R3 && split) {
1250 /* first word in r3 and the second on the stack */
1251 ainfo->offset = *stack_size;
1252 ainfo->reg = ARMREG_SP; /* in the caller */
1253 ainfo->storage = RegTypeBaseGen;
1255 } else if (*gr >= ARMREG_R3) {
1256 if (eabi_supported) {
1257 /* darwin aligns longs to 4 byte only */
1258 if (i8_align == 8) {
1263 ainfo->offset = *stack_size;
1264 ainfo->reg = ARMREG_SP; /* in the caller */
1265 ainfo->storage = RegTypeBase;
1268 if (eabi_supported) {
1269 if (i8_align == 8 && ((*gr) & 1))
1272 ainfo->storage = RegTypeIRegPair;
1281 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1284 * If we're calling a function like this:
1286 * void foo(float a, double b, float c)
1288 * We pass a in s0 and b in d1. That leaves us
1289 * with s1 being unused. The armhf ABI recognizes
1290 * this and requires register assignment to then
1291 * use that for the next single-precision arg,
1292 * i.e. c in this example. So float_spare either
1293 * tells us which reg to use for the next single-
1294 * precision arg, or it's -1, meaning use *fpr.
1296 * Note that even though most of the JIT speaks
1297 * double-precision, fpr represents single-
1298 * precision registers.
1300 * See parts 5.5 and 6.1.2 of the AAPCS for how
1304 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1305 ainfo->storage = RegTypeFP;
1309 * If we're passing a double-precision value
1310 * and *fpr is odd (e.g. it's s1, s3, ...)
1311 * we need to use the next even register. So
1312 * we mark the current *fpr as a spare that
1313 * can be used for the next single-precision
1317 *float_spare = *fpr;
1322 * At this point, we have an even register
1323 * so we assign that and move along.
1327 } else if (*float_spare >= 0) {
1329 * We're passing a single-precision value
1330 * and it looks like a spare single-
1331 * precision register is available. Let's
1335 ainfo->reg = *float_spare;
1339 * If we hit this branch, we're passing a
1340 * single-precision value and we can simply
1341 * use the next available register.
1349 * We've exhausted available floating point
1350 * regs, so pass the rest on the stack.
1358 ainfo->offset = *stack_size;
1359 ainfo->reg = ARMREG_SP;
1360 ainfo->storage = RegTypeBase;
1367 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1369 guint i, gr, fpr, pstart;
1371 int n = sig->hasthis + sig->param_count;
1372 MonoType *simpletype;
1373 guint32 stack_size = 0;
1375 gboolean is_pinvoke = sig->pinvoke;
1379 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1381 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1388 t = mini_type_get_underlying_type (gsctx, sig->ret);
1389 if (MONO_TYPE_ISSTRUCT (t)) {
1392 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1393 cinfo->ret.storage = RegTypeStructByVal;
1395 cinfo->vtype_retaddr = TRUE;
1397 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1398 cinfo->vtype_retaddr = TRUE;
1404 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1405 * the first argument, allowing 'this' to be always passed in the first arg reg.
1406 * Also do this if the first argument is a reference type, since virtual calls
1407 * are sometimes made using calli without sig->hasthis set, like in the delegate
1410 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1412 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1414 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1418 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1419 cinfo->vret_arg_index = 1;
1423 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1427 if (cinfo->vtype_retaddr)
1428 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1431 DEBUG(printf("params: %d\n", sig->param_count));
1432 for (i = pstart; i < sig->param_count; ++i) {
1433 ArgInfo *ainfo = &cinfo->args [n];
1435 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1436 /* Prevent implicit arguments and sig_cookie from
1437 being passed in registers */
1440 /* Emit the signature cookie just before the implicit arguments */
1441 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1443 DEBUG(printf("param %d: ", i));
1444 if (sig->params [i]->byref) {
1445 DEBUG(printf("byref\n"));
1446 add_general (&gr, &stack_size, ainfo, TRUE);
1450 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1451 switch (simpletype->type) {
1452 case MONO_TYPE_BOOLEAN:
1455 cinfo->args [n].size = 1;
1456 add_general (&gr, &stack_size, ainfo, TRUE);
1459 case MONO_TYPE_CHAR:
1462 cinfo->args [n].size = 2;
1463 add_general (&gr, &stack_size, ainfo, TRUE);
1468 cinfo->args [n].size = 4;
1469 add_general (&gr, &stack_size, ainfo, TRUE);
1475 case MONO_TYPE_FNPTR:
1476 case MONO_TYPE_CLASS:
1477 case MONO_TYPE_OBJECT:
1478 case MONO_TYPE_STRING:
1479 case MONO_TYPE_SZARRAY:
1480 case MONO_TYPE_ARRAY:
1481 cinfo->args [n].size = sizeof (gpointer);
1482 add_general (&gr, &stack_size, ainfo, TRUE);
1485 case MONO_TYPE_GENERICINST:
1486 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1487 cinfo->args [n].size = sizeof (gpointer);
1488 add_general (&gr, &stack_size, ainfo, TRUE);
1492 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1493 /* gsharedvt arguments are passed by ref */
1494 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1495 add_general (&gr, &stack_size, ainfo, TRUE);
1496 switch (ainfo->storage) {
1497 case RegTypeGeneral:
1498 ainfo->storage = RegTypeGSharedVtInReg;
1501 ainfo->storage = RegTypeGSharedVtOnStack;
1504 g_assert_not_reached ();
1510 case MONO_TYPE_TYPEDBYREF:
1511 case MONO_TYPE_VALUETYPE: {
1517 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1518 size = sizeof (MonoTypedRef);
1519 align = sizeof (gpointer);
1521 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1523 size = mono_class_native_size (klass, &align);
1525 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1527 DEBUG(printf ("load %d bytes struct\n", size));
1530 align_size += (sizeof (gpointer) - 1);
1531 align_size &= ~(sizeof (gpointer) - 1);
1532 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1533 ainfo->storage = RegTypeStructByVal;
1534 ainfo->struct_size = size;
1535 /* FIXME: align stack_size if needed */
1536 if (eabi_supported) {
1537 if (align >= 8 && (gr & 1))
1540 if (gr > ARMREG_R3) {
1542 ainfo->vtsize = nwords;
1544 int rest = ARMREG_R3 - gr + 1;
1545 int n_in_regs = rest >= nwords? nwords: rest;
1547 ainfo->size = n_in_regs;
1548 ainfo->vtsize = nwords - n_in_regs;
1551 nwords -= n_in_regs;
1553 if (sig->call_convention == MONO_CALL_VARARG)
1554 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1555 stack_size = ALIGN_TO (stack_size, align);
1556 ainfo->offset = stack_size;
1557 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1558 stack_size += nwords * sizeof (gpointer);
1565 add_general (&gr, &stack_size, ainfo, FALSE);
1572 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1574 add_general (&gr, &stack_size, ainfo, TRUE);
1582 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1584 add_general (&gr, &stack_size, ainfo, FALSE);
1589 case MONO_TYPE_MVAR:
1590 /* gsharedvt arguments are passed by ref */
1591 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1592 add_general (&gr, &stack_size, ainfo, TRUE);
1593 switch (ainfo->storage) {
1594 case RegTypeGeneral:
1595 ainfo->storage = RegTypeGSharedVtInReg;
1598 ainfo->storage = RegTypeGSharedVtOnStack;
1601 g_assert_not_reached ();
1606 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1610 /* Handle the case where there are no implicit arguments */
1611 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1612 /* Prevent implicit arguments and sig_cookie from
1613 being passed in registers */
1616 /* Emit the signature cookie just before the implicit arguments */
1617 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1621 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1622 switch (simpletype->type) {
1623 case MONO_TYPE_BOOLEAN:
1628 case MONO_TYPE_CHAR:
1634 case MONO_TYPE_FNPTR:
1635 case MONO_TYPE_CLASS:
1636 case MONO_TYPE_OBJECT:
1637 case MONO_TYPE_SZARRAY:
1638 case MONO_TYPE_ARRAY:
1639 case MONO_TYPE_STRING:
1640 cinfo->ret.storage = RegTypeGeneral;
1641 cinfo->ret.reg = ARMREG_R0;
1645 cinfo->ret.storage = RegTypeIRegPair;
1646 cinfo->ret.reg = ARMREG_R0;
1650 cinfo->ret.storage = RegTypeFP;
1652 if (IS_HARD_FLOAT) {
1653 cinfo->ret.reg = ARM_VFP_F0;
1655 cinfo->ret.reg = ARMREG_R0;
1659 case MONO_TYPE_GENERICINST:
1660 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1661 cinfo->ret.storage = RegTypeGeneral;
1662 cinfo->ret.reg = ARMREG_R0;
1665 // FIXME: Only for variable types
1666 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1667 cinfo->ret.storage = RegTypeStructByAddr;
1668 g_assert (cinfo->vtype_retaddr);
1672 case MONO_TYPE_VALUETYPE:
1673 case MONO_TYPE_TYPEDBYREF:
1674 if (cinfo->ret.storage != RegTypeStructByVal)
1675 cinfo->ret.storage = RegTypeStructByAddr;
1678 case MONO_TYPE_MVAR:
1679 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1680 cinfo->ret.storage = RegTypeStructByAddr;
1681 g_assert (cinfo->vtype_retaddr);
1683 case MONO_TYPE_VOID:
1686 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1690 /* align stack size to 8 */
1691 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1692 stack_size = (stack_size + 7) & ~7;
1694 cinfo->stack_usage = stack_size;
1700 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1702 MonoType *callee_ret;
1706 if (cfg->compile_aot && !cfg->full_aot)
1707 /* OP_TAILCALL doesn't work with AOT */
1710 c1 = get_call_info (NULL, NULL, caller_sig);
1711 c2 = get_call_info (NULL, NULL, callee_sig);
1714 * Tail calls with more callee stack usage than the caller cannot be supported, since
1715 * the extra stack space would be left on the stack after the tail call.
1717 res = c1->stack_usage >= c2->stack_usage;
1718 callee_ret = mini_replace_type (callee_sig->ret);
1719 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1720 /* An address on the callee's stack is passed as the first argument */
1723 if (c2->stack_usage > 16 * 4)
1735 debug_omit_fp (void)
1738 return mono_debug_count ();
1745 * mono_arch_compute_omit_fp:
1747 * Determine whenever the frame pointer can be eliminated.
1750 mono_arch_compute_omit_fp (MonoCompile *cfg)
1752 MonoMethodSignature *sig;
1753 MonoMethodHeader *header;
1757 if (cfg->arch.omit_fp_computed)
1760 header = cfg->header;
1762 sig = mono_method_signature (cfg->method);
1764 if (!cfg->arch.cinfo)
1765 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1766 cinfo = cfg->arch.cinfo;
1769 * FIXME: Remove some of the restrictions.
1771 cfg->arch.omit_fp = TRUE;
1772 cfg->arch.omit_fp_computed = TRUE;
1774 if (cfg->disable_omit_fp)
1775 cfg->arch.omit_fp = FALSE;
1776 if (!debug_omit_fp ())
1777 cfg->arch.omit_fp = FALSE;
1779 if (cfg->method->save_lmf)
1780 cfg->arch.omit_fp = FALSE;
1782 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1783 cfg->arch.omit_fp = FALSE;
1784 if (header->num_clauses)
1785 cfg->arch.omit_fp = FALSE;
1786 if (cfg->param_area)
1787 cfg->arch.omit_fp = FALSE;
1788 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1789 cfg->arch.omit_fp = FALSE;
1790 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1791 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1792 cfg->arch.omit_fp = FALSE;
1793 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1794 ArgInfo *ainfo = &cinfo->args [i];
1796 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1798 * The stack offset can only be determined when the frame
1801 cfg->arch.omit_fp = FALSE;
1806 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1807 MonoInst *ins = cfg->varinfo [i];
1810 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1815 * Set var information according to the calling convention. arm version.
1816 * The locals var stuff should most likely be split in another method.
1819 mono_arch_allocate_vars (MonoCompile *cfg)
1821 MonoMethodSignature *sig;
1822 MonoMethodHeader *header;
1825 int i, offset, size, align, curinst;
1829 sig = mono_method_signature (cfg->method);
1831 if (!cfg->arch.cinfo)
1832 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1833 cinfo = cfg->arch.cinfo;
1834 sig_ret = mini_replace_type (sig->ret);
1836 mono_arch_compute_omit_fp (cfg);
1838 if (cfg->arch.omit_fp)
1839 cfg->frame_reg = ARMREG_SP;
1841 cfg->frame_reg = ARMREG_FP;
1843 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1845 /* allow room for the vararg method args: void* and long/double */
1846 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1847 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1849 header = cfg->header;
1851 /* See mono_arch_get_global_int_regs () */
1852 if (cfg->flags & MONO_CFG_HAS_CALLS)
1853 cfg->uses_rgctx_reg = TRUE;
1855 if (cfg->frame_reg != ARMREG_SP)
1856 cfg->used_int_regs |= 1 << cfg->frame_reg;
1858 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1859 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1860 cfg->used_int_regs |= (1 << ARMREG_V5);
1864 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1865 if (sig_ret->type != MONO_TYPE_VOID) {
1866 cfg->ret->opcode = OP_REGVAR;
1867 cfg->ret->inst_c0 = ARMREG_R0;
1870 /* local vars are at a positive offset from the stack pointer */
1872 * also note that if the function uses alloca, we use FP
1873 * to point at the local variables.
1875 offset = 0; /* linkage area */
1876 /* align the offset to 16 bytes: not sure this is needed here */
1878 //offset &= ~(8 - 1);
1880 /* add parameter area size for called functions */
1881 offset += cfg->param_area;
1884 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1887 /* allow room to save the return value */
1888 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1891 /* the MonoLMF structure is stored just below the stack pointer */
1892 if (cinfo->ret.storage == RegTypeStructByVal) {
1893 cfg->ret->opcode = OP_REGOFFSET;
1894 cfg->ret->inst_basereg = cfg->frame_reg;
1895 offset += sizeof (gpointer) - 1;
1896 offset &= ~(sizeof (gpointer) - 1);
1897 cfg->ret->inst_offset = - offset;
1898 offset += sizeof(gpointer);
1899 } else if (cinfo->vtype_retaddr) {
1900 ins = cfg->vret_addr;
1901 offset += sizeof(gpointer) - 1;
1902 offset &= ~(sizeof(gpointer) - 1);
1903 ins->inst_offset = offset;
1904 ins->opcode = OP_REGOFFSET;
1905 ins->inst_basereg = cfg->frame_reg;
1906 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1907 printf ("vret_addr =");
1908 mono_print_ins (cfg->vret_addr);
1910 offset += sizeof(gpointer);
1913 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1914 if (cfg->arch.seq_point_info_var) {
1917 ins = cfg->arch.seq_point_info_var;
1921 offset += align - 1;
1922 offset &= ~(align - 1);
1923 ins->opcode = OP_REGOFFSET;
1924 ins->inst_basereg = cfg->frame_reg;
1925 ins->inst_offset = offset;
1928 ins = cfg->arch.ss_trigger_page_var;
1931 offset += align - 1;
1932 offset &= ~(align - 1);
1933 ins->opcode = OP_REGOFFSET;
1934 ins->inst_basereg = cfg->frame_reg;
1935 ins->inst_offset = offset;
1939 if (cfg->arch.seq_point_read_var) {
1942 ins = cfg->arch.seq_point_read_var;
1946 offset += align - 1;
1947 offset &= ~(align - 1);
1948 ins->opcode = OP_REGOFFSET;
1949 ins->inst_basereg = cfg->frame_reg;
1950 ins->inst_offset = offset;
1953 ins = cfg->arch.seq_point_ss_method_var;
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->opcode = OP_REGOFFSET;
1959 ins->inst_basereg = cfg->frame_reg;
1960 ins->inst_offset = offset;
1963 ins = cfg->arch.seq_point_bp_method_var;
1966 offset += align - 1;
1967 offset &= ~(align - 1);
1968 ins->opcode = OP_REGOFFSET;
1969 ins->inst_basereg = cfg->frame_reg;
1970 ins->inst_offset = offset;
1974 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1975 /* Allocate a temporary used by the atomic ops */
1979 /* Allocate a local slot to hold the sig cookie address */
1980 offset += align - 1;
1981 offset &= ~(align - 1);
1982 cfg->arch.atomic_tmp_offset = offset;
1985 cfg->arch.atomic_tmp_offset = -1;
1988 cfg->locals_min_stack_offset = offset;
1990 curinst = cfg->locals_start;
1991 for (i = curinst; i < cfg->num_varinfo; ++i) {
1994 ins = cfg->varinfo [i];
1995 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1998 t = ins->inst_vtype;
1999 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2002 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2003 * pinvoke wrappers when they call functions returning structure */
2004 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2005 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2009 size = mono_type_size (t, &align);
2011 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2012 * since it loads/stores misaligned words, which don't do the right thing.
2014 if (align < 4 && size >= 4)
2016 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2017 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2018 offset += align - 1;
2019 offset &= ~(align - 1);
2020 ins->opcode = OP_REGOFFSET;
2021 ins->inst_offset = offset;
2022 ins->inst_basereg = cfg->frame_reg;
2024 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2027 cfg->locals_max_stack_offset = offset;
2031 ins = cfg->args [curinst];
2032 if (ins->opcode != OP_REGVAR) {
2033 ins->opcode = OP_REGOFFSET;
2034 ins->inst_basereg = cfg->frame_reg;
2035 offset += sizeof (gpointer) - 1;
2036 offset &= ~(sizeof (gpointer) - 1);
2037 ins->inst_offset = offset;
2038 offset += sizeof (gpointer);
2043 if (sig->call_convention == MONO_CALL_VARARG) {
2047 /* Allocate a local slot to hold the sig cookie address */
2048 offset += align - 1;
2049 offset &= ~(align - 1);
2050 cfg->sig_cookie = offset;
2054 for (i = 0; i < sig->param_count; ++i) {
2055 ins = cfg->args [curinst];
2057 if (ins->opcode != OP_REGVAR) {
2058 ins->opcode = OP_REGOFFSET;
2059 ins->inst_basereg = cfg->frame_reg;
2060 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2062 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2063 * since it loads/stores misaligned words, which don't do the right thing.
2065 if (align < 4 && size >= 4)
2067 /* The code in the prolog () stores words when storing vtypes received in a register */
2068 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2070 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2071 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2072 offset += align - 1;
2073 offset &= ~(align - 1);
2074 ins->inst_offset = offset;
2080 /* align the offset to 8 bytes */
2081 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2082 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2087 cfg->stack_offset = offset;
2091 mono_arch_create_vars (MonoCompile *cfg)
2093 MonoMethodSignature *sig;
2097 sig = mono_method_signature (cfg->method);
2099 if (!cfg->arch.cinfo)
2100 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2101 cinfo = cfg->arch.cinfo;
2103 if (IS_HARD_FLOAT) {
2104 for (i = 0; i < 2; i++) {
2105 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2106 inst->flags |= MONO_INST_VOLATILE;
2108 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2112 if (cinfo->ret.storage == RegTypeStructByVal)
2113 cfg->ret_var_is_local = TRUE;
2115 if (cinfo->vtype_retaddr) {
2116 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2117 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2118 printf ("vret_addr = ");
2119 mono_print_ins (cfg->vret_addr);
2123 if (cfg->gen_seq_points) {
2124 if (cfg->soft_breakpoints) {
2125 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2126 ins->flags |= MONO_INST_VOLATILE;
2127 cfg->arch.seq_point_read_var = ins;
2129 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2130 ins->flags |= MONO_INST_VOLATILE;
2131 cfg->arch.seq_point_ss_method_var = ins;
2133 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2134 ins->flags |= MONO_INST_VOLATILE;
2135 cfg->arch.seq_point_bp_method_var = ins;
2137 g_assert (!cfg->compile_aot);
2138 } else if (cfg->compile_aot) {
2139 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2140 ins->flags |= MONO_INST_VOLATILE;
2141 cfg->arch.seq_point_info_var = ins;
2143 /* Allocate a separate variable for this to save 1 load per seq point */
2144 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2145 ins->flags |= MONO_INST_VOLATILE;
2146 cfg->arch.ss_trigger_page_var = ins;
2152 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2154 MonoMethodSignature *tmp_sig;
2157 if (call->tail_call)
2160 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2163 * mono_ArgIterator_Setup assumes the signature cookie is
2164 * passed first and all the arguments which were before it are
2165 * passed on the stack after the signature. So compensate by
2166 * passing a different signature.
2168 tmp_sig = mono_metadata_signature_dup (call->signature);
2169 tmp_sig->param_count -= call->signature->sentinelpos;
2170 tmp_sig->sentinelpos = 0;
2171 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2173 sig_reg = mono_alloc_ireg (cfg);
2174 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2181 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2186 LLVMCallInfo *linfo;
2188 n = sig->param_count + sig->hasthis;
2190 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2192 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2195 * LLVM always uses the native ABI while we use our own ABI, the
2196 * only difference is the handling of vtypes:
2197 * - we only pass/receive them in registers in some cases, and only
2198 * in 1 or 2 integer registers.
2200 if (cinfo->vtype_retaddr) {
2201 /* Vtype returned using a hidden argument */
2202 linfo->ret.storage = LLVMArgVtypeRetAddr;
2203 linfo->vret_arg_index = cinfo->vret_arg_index;
2204 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2205 cfg->exception_message = g_strdup ("unknown ret conv");
2206 cfg->disable_llvm = TRUE;
2210 for (i = 0; i < n; ++i) {
2211 ainfo = cinfo->args + i;
2213 linfo->args [i].storage = LLVMArgNone;
2215 switch (ainfo->storage) {
2216 case RegTypeGeneral:
2217 case RegTypeIRegPair:
2219 linfo->args [i].storage = LLVMArgInIReg;
2221 case RegTypeStructByVal:
2222 // FIXME: Passing entirely on the stack or split reg/stack
2223 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2224 linfo->args [i].storage = LLVMArgVtypeInReg;
2225 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2226 if (ainfo->size == 2)
2227 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2229 linfo->args [i].pair_storage [1] = LLVMArgNone;
2231 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2232 cfg->disable_llvm = TRUE;
2236 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2237 cfg->disable_llvm = TRUE;
2247 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2250 MonoMethodSignature *sig;
2254 sig = call->signature;
2255 n = sig->param_count + sig->hasthis;
2257 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2259 for (i = 0; i < n; ++i) {
2260 ArgInfo *ainfo = cinfo->args + i;
2263 if (i >= sig->hasthis)
2264 t = sig->params [i - sig->hasthis];
2266 t = &mono_defaults.int_class->byval_arg;
2267 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2269 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2270 /* Emit the signature cookie just before the implicit arguments */
2271 emit_sig_cookie (cfg, call, cinfo);
2274 in = call->args [i];
2276 switch (ainfo->storage) {
2277 case RegTypeGeneral:
2278 case RegTypeIRegPair:
2279 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2280 MONO_INST_NEW (cfg, ins, OP_MOVE);
2281 ins->dreg = mono_alloc_ireg (cfg);
2282 ins->sreg1 = in->dreg + 1;
2283 MONO_ADD_INS (cfg->cbb, ins);
2284 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2286 MONO_INST_NEW (cfg, ins, OP_MOVE);
2287 ins->dreg = mono_alloc_ireg (cfg);
2288 ins->sreg1 = in->dreg + 2;
2289 MONO_ADD_INS (cfg->cbb, ins);
2290 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2291 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2292 if (ainfo->size == 4) {
2293 if (IS_SOFT_FLOAT) {
2294 /* mono_emit_call_args () have already done the r8->r4 conversion */
2295 /* The converted value is in an int vreg */
2296 MONO_INST_NEW (cfg, ins, OP_MOVE);
2297 ins->dreg = mono_alloc_ireg (cfg);
2298 ins->sreg1 = in->dreg;
2299 MONO_ADD_INS (cfg->cbb, ins);
2300 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2304 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2305 creg = mono_alloc_ireg (cfg);
2306 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2307 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2310 if (IS_SOFT_FLOAT) {
2311 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2312 ins->dreg = mono_alloc_ireg (cfg);
2313 ins->sreg1 = in->dreg;
2314 MONO_ADD_INS (cfg->cbb, ins);
2315 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2317 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2318 ins->dreg = mono_alloc_ireg (cfg);
2319 ins->sreg1 = in->dreg;
2320 MONO_ADD_INS (cfg->cbb, ins);
2321 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2325 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2326 creg = mono_alloc_ireg (cfg);
2327 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2328 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2329 creg = mono_alloc_ireg (cfg);
2330 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2331 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2334 cfg->flags |= MONO_CFG_HAS_FPOUT;
2336 MONO_INST_NEW (cfg, ins, OP_MOVE);
2337 ins->dreg = mono_alloc_ireg (cfg);
2338 ins->sreg1 = in->dreg;
2339 MONO_ADD_INS (cfg->cbb, ins);
2341 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2344 case RegTypeStructByAddr:
2347 /* FIXME: where si the data allocated? */
2348 arg->backend.reg3 = ainfo->reg;
2349 call->used_iregs |= 1 << ainfo->reg;
2350 g_assert_not_reached ();
2353 case RegTypeStructByVal:
2354 case RegTypeGSharedVtInReg:
2355 case RegTypeGSharedVtOnStack:
2356 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2357 ins->opcode = OP_OUTARG_VT;
2358 ins->sreg1 = in->dreg;
2359 ins->klass = in->klass;
2360 ins->inst_p0 = call;
2361 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2362 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2363 mono_call_inst_add_outarg_vt (cfg, call, ins);
2364 MONO_ADD_INS (cfg->cbb, ins);
2367 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2369 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2370 if (t->type == MONO_TYPE_R8) {
2371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2374 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2376 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2379 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2382 case RegTypeBaseGen:
2383 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2384 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2385 MONO_INST_NEW (cfg, ins, OP_MOVE);
2386 ins->dreg = mono_alloc_ireg (cfg);
2387 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2388 MONO_ADD_INS (cfg->cbb, ins);
2389 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2390 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2393 /* This should work for soft-float as well */
2395 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2396 creg = mono_alloc_ireg (cfg);
2397 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2398 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2399 creg = mono_alloc_ireg (cfg);
2400 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2401 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2402 cfg->flags |= MONO_CFG_HAS_FPOUT;
2404 g_assert_not_reached ();
2408 int fdreg = mono_alloc_freg (cfg);
2410 if (ainfo->size == 8) {
2411 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2412 ins->sreg1 = in->dreg;
2414 MONO_ADD_INS (cfg->cbb, ins);
2416 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2421 * Mono's register allocator doesn't speak single-precision registers that
2422 * overlap double-precision registers (i.e. armhf). So we have to work around
2423 * the register allocator and load the value from memory manually.
2425 * So we create a variable for the float argument and an instruction to store
2426 * the argument into the variable. We then store the list of these arguments
2427 * in cfg->float_args. This list is then used by emit_float_args later to
2428 * pass the arguments in the various call opcodes.
2430 * This is not very nice, and we should really try to fix the allocator.
2433 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2435 /* Make sure the instruction isn't seen as pointless and removed.
2437 float_arg->flags |= MONO_INST_VOLATILE;
2439 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2441 /* We use the dreg to look up the instruction later. The hreg is used to
2442 * emit the instruction that loads the value into the FP reg.
2444 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2445 fad->vreg = float_arg->dreg;
2446 fad->hreg = ainfo->reg;
2448 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2451 call->used_iregs |= 1 << ainfo->reg;
2452 cfg->flags |= MONO_CFG_HAS_FPOUT;
2456 g_assert_not_reached ();
2460 /* Handle the case where there are no implicit arguments */
2461 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2462 emit_sig_cookie (cfg, call, cinfo);
2464 if (cinfo->ret.storage == RegTypeStructByVal) {
2465 /* The JIT will transform this into a normal call */
2466 call->vret_in_reg = TRUE;
2467 } else if (cinfo->vtype_retaddr) {
2469 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2470 vtarg->sreg1 = call->vret_var->dreg;
2471 vtarg->dreg = mono_alloc_preg (cfg);
2472 MONO_ADD_INS (cfg->cbb, vtarg);
2474 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2477 call->stack_usage = cinfo->stack_usage;
2483 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2485 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2486 ArgInfo *ainfo = ins->inst_p1;
2487 int ovf_size = ainfo->vtsize;
2488 int doffset = ainfo->offset;
2489 int struct_size = ainfo->struct_size;
2490 int i, soffset, dreg, tmpreg;
2492 if (ainfo->storage == RegTypeGSharedVtInReg) {
2494 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2497 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2498 /* Pass by addr on stack */
2499 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2504 for (i = 0; i < ainfo->size; ++i) {
2505 dreg = mono_alloc_ireg (cfg);
2506 switch (struct_size) {
2508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2514 tmpreg = mono_alloc_ireg (cfg);
2515 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2518 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2521 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2524 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2527 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2528 soffset += sizeof (gpointer);
2529 struct_size -= sizeof (gpointer);
2531 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2533 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2537 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2539 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2542 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2545 if (COMPILE_LLVM (cfg)) {
2546 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2548 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2549 ins->sreg1 = val->dreg + 1;
2550 ins->sreg2 = val->dreg + 2;
2551 MONO_ADD_INS (cfg->cbb, ins);
2556 case MONO_ARM_FPU_NONE:
2557 if (ret->type == MONO_TYPE_R8) {
2560 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2561 ins->dreg = cfg->ret->dreg;
2562 ins->sreg1 = val->dreg;
2563 MONO_ADD_INS (cfg->cbb, ins);
2566 if (ret->type == MONO_TYPE_R4) {
2567 /* Already converted to an int in method_to_ir () */
2568 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2572 case MONO_ARM_FPU_VFP:
2573 case MONO_ARM_FPU_VFP_HARD:
2574 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2577 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2578 ins->dreg = cfg->ret->dreg;
2579 ins->sreg1 = val->dreg;
2580 MONO_ADD_INS (cfg->cbb, ins);
2585 g_assert_not_reached ();
2589 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2592 #endif /* #ifndef DISABLE_JIT */
2595 mono_arch_is_inst_imm (gint64 imm)
2601 MonoMethodSignature *sig;
2604 MonoType **param_types;
2608 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2612 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2615 switch (cinfo->ret.storage) {
2617 case RegTypeGeneral:
2618 case RegTypeIRegPair:
2619 case RegTypeStructByAddr:
2630 for (i = 0; i < cinfo->nargs; ++i) {
2631 ArgInfo *ainfo = &cinfo->args [i];
2634 switch (ainfo->storage) {
2635 case RegTypeGeneral:
2637 case RegTypeIRegPair:
2640 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2643 case RegTypeStructByVal:
2644 if (ainfo->size == 0)
2645 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2647 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2648 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2656 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2657 for (i = 0; i < sig->param_count; ++i) {
2658 MonoType *t = sig->params [i];
2663 t = mini_replace_type (t);
2686 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2688 ArchDynCallInfo *info;
2692 cinfo = get_call_info (NULL, NULL, sig);
2694 if (!dyn_call_supported (cinfo, sig)) {
2699 info = g_new0 (ArchDynCallInfo, 1);
2700 // FIXME: Preprocess the info to speed up start_dyn_call ()
2702 info->cinfo = cinfo;
2703 info->rtype = mini_replace_type (sig->ret);
2704 info->param_types = g_new0 (MonoType*, sig->param_count);
2705 for (i = 0; i < sig->param_count; ++i)
2706 info->param_types [i] = mini_replace_type (sig->params [i]);
2708 return (MonoDynCallInfo*)info;
2712 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2714 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2716 g_free (ainfo->cinfo);
2721 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2723 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2724 DynCallArgs *p = (DynCallArgs*)buf;
2725 int arg_index, greg, i, j, pindex;
2726 MonoMethodSignature *sig = dinfo->sig;
2728 g_assert (buf_len >= sizeof (DynCallArgs));
2737 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2738 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2743 if (dinfo->cinfo->vtype_retaddr)
2744 p->regs [greg ++] = (mgreg_t)ret;
2746 for (i = pindex; i < sig->param_count; i++) {
2747 MonoType *t = dinfo->param_types [i];
2748 gpointer *arg = args [arg_index ++];
2749 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2752 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2754 else if (ainfo->storage == RegTypeBase)
2755 slot = PARAM_REGS + (ainfo->offset / 4);
2757 g_assert_not_reached ();
2760 p->regs [slot] = (mgreg_t)*arg;
2765 case MONO_TYPE_STRING:
2766 case MONO_TYPE_CLASS:
2767 case MONO_TYPE_ARRAY:
2768 case MONO_TYPE_SZARRAY:
2769 case MONO_TYPE_OBJECT:
2773 p->regs [slot] = (mgreg_t)*arg;
2775 case MONO_TYPE_BOOLEAN:
2777 p->regs [slot] = *(guint8*)arg;
2780 p->regs [slot] = *(gint8*)arg;
2783 p->regs [slot] = *(gint16*)arg;
2786 case MONO_TYPE_CHAR:
2787 p->regs [slot] = *(guint16*)arg;
2790 p->regs [slot] = *(gint32*)arg;
2793 p->regs [slot] = *(guint32*)arg;
2797 p->regs [slot ++] = (mgreg_t)arg [0];
2798 p->regs [slot] = (mgreg_t)arg [1];
2801 p->regs [slot] = *(mgreg_t*)arg;
2804 p->regs [slot ++] = (mgreg_t)arg [0];
2805 p->regs [slot] = (mgreg_t)arg [1];
2807 case MONO_TYPE_GENERICINST:
2808 if (MONO_TYPE_IS_REFERENCE (t)) {
2809 p->regs [slot] = (mgreg_t)*arg;
2814 case MONO_TYPE_VALUETYPE:
2815 g_assert (ainfo->storage == RegTypeStructByVal);
2817 if (ainfo->size == 0)
2818 slot = PARAM_REGS + (ainfo->offset / 4);
2822 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2823 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2826 g_assert_not_reached ();
2832 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2834 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2835 MonoType *ptype = ainfo->rtype;
2836 guint8 *ret = ((DynCallArgs*)buf)->ret;
2837 mgreg_t res = ((DynCallArgs*)buf)->res;
2838 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2840 switch (ptype->type) {
2841 case MONO_TYPE_VOID:
2842 *(gpointer*)ret = NULL;
2844 case MONO_TYPE_STRING:
2845 case MONO_TYPE_CLASS:
2846 case MONO_TYPE_ARRAY:
2847 case MONO_TYPE_SZARRAY:
2848 case MONO_TYPE_OBJECT:
2852 *(gpointer*)ret = (gpointer)res;
2858 case MONO_TYPE_BOOLEAN:
2859 *(guint8*)ret = res;
2862 *(gint16*)ret = res;
2865 case MONO_TYPE_CHAR:
2866 *(guint16*)ret = res;
2869 *(gint32*)ret = res;
2872 *(guint32*)ret = res;
2876 /* This handles endianness as well */
2877 ((gint32*)ret) [0] = res;
2878 ((gint32*)ret) [1] = res2;
2880 case MONO_TYPE_GENERICINST:
2881 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2882 *(gpointer*)ret = (gpointer)res;
2887 case MONO_TYPE_VALUETYPE:
2888 g_assert (ainfo->cinfo->vtype_retaddr);
2893 *(float*)ret = *(float*)&res;
2895 case MONO_TYPE_R8: {
2902 *(double*)ret = *(double*)®s;
2906 g_assert_not_reached ();
2913 * Allow tracing to work with this interface (with an optional argument)
2917 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2921 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2922 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2923 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2924 code = emit_call_reg (code, ARMREG_R2);
2938 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2941 int save_mode = SAVE_NONE;
2943 MonoMethod *method = cfg->method;
2944 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2945 int rtype = ret_type->type;
2946 int save_offset = cfg->param_area;
2950 offset = code - cfg->native_code;
2951 /* we need about 16 instructions */
2952 if (offset > (cfg->code_size - 16 * 4)) {
2953 cfg->code_size *= 2;
2954 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2955 code = cfg->native_code + offset;
2958 case MONO_TYPE_VOID:
2959 /* special case string .ctor icall */
2960 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2961 save_mode = SAVE_ONE;
2963 save_mode = SAVE_NONE;
2967 save_mode = SAVE_TWO;
2971 save_mode = SAVE_ONE_FP;
2973 save_mode = SAVE_ONE;
2977 save_mode = SAVE_TWO_FP;
2979 save_mode = SAVE_TWO;
2981 case MONO_TYPE_GENERICINST:
2982 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2983 save_mode = SAVE_ONE;
2987 case MONO_TYPE_VALUETYPE:
2988 save_mode = SAVE_STRUCT;
2991 save_mode = SAVE_ONE;
2995 switch (save_mode) {
2997 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2998 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2999 if (enable_arguments) {
3000 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3001 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3005 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3006 if (enable_arguments) {
3007 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3011 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3012 if (enable_arguments) {
3013 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3017 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3018 if (enable_arguments) {
3019 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3023 if (enable_arguments) {
3024 /* FIXME: get the actual address */
3025 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3033 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3034 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3035 code = emit_call_reg (code, ARMREG_IP);
3037 switch (save_mode) {
3039 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3040 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3043 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3046 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3049 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3060 * The immediate field for cond branches is big enough for all reasonable methods
3062 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3063 if (0 && ins->inst_true_bb->native_offset) { \
3064 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3066 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3067 ARM_B_COND (code, (condcode), 0); \
3070 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3072 /* emit an exception if condition is fail
3074 * We assign the extra code used to throw the implicit exceptions
3075 * to cfg->bb_exit as far as the big branch handling is concerned
3077 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3079 mono_add_patch_info (cfg, code - cfg->native_code, \
3080 MONO_PATCH_INFO_EXC, exc_name); \
3081 ARM_BL_COND (code, (condcode), 0); \
3084 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3087 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3092 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3094 MonoInst *ins, *n, *last_ins = NULL;
3096 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3097 switch (ins->opcode) {
3100 /* Already done by an arch-independent pass */
3102 case OP_LOAD_MEMBASE:
3103 case OP_LOADI4_MEMBASE:
3105 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3106 * OP_LOAD_MEMBASE offset(basereg), reg
3108 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3109 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3110 ins->inst_basereg == last_ins->inst_destbasereg &&
3111 ins->inst_offset == last_ins->inst_offset) {
3112 if (ins->dreg == last_ins->sreg1) {
3113 MONO_DELETE_INS (bb, ins);
3116 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3117 ins->opcode = OP_MOVE;
3118 ins->sreg1 = last_ins->sreg1;
3122 * Note: reg1 must be different from the basereg in the second load
3123 * OP_LOAD_MEMBASE offset(basereg), reg1
3124 * OP_LOAD_MEMBASE offset(basereg), reg2
3126 * OP_LOAD_MEMBASE offset(basereg), reg1
3127 * OP_MOVE reg1, reg2
3129 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3130 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3131 ins->inst_basereg != last_ins->dreg &&
3132 ins->inst_basereg == last_ins->inst_basereg &&
3133 ins->inst_offset == last_ins->inst_offset) {
3135 if (ins->dreg == last_ins->dreg) {
3136 MONO_DELETE_INS (bb, ins);
3139 ins->opcode = OP_MOVE;
3140 ins->sreg1 = last_ins->dreg;
3143 //g_assert_not_reached ();
3147 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3148 * OP_LOAD_MEMBASE offset(basereg), reg
3150 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3151 * OP_ICONST reg, imm
3153 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3154 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3155 ins->inst_basereg == last_ins->inst_destbasereg &&
3156 ins->inst_offset == last_ins->inst_offset) {
3157 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3158 ins->opcode = OP_ICONST;
3159 ins->inst_c0 = last_ins->inst_imm;
3160 g_assert_not_reached (); // check this rule
3164 case OP_LOADU1_MEMBASE:
3165 case OP_LOADI1_MEMBASE:
3166 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3167 ins->inst_basereg == last_ins->inst_destbasereg &&
3168 ins->inst_offset == last_ins->inst_offset) {
3169 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3170 ins->sreg1 = last_ins->sreg1;
3173 case OP_LOADU2_MEMBASE:
3174 case OP_LOADI2_MEMBASE:
3175 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3176 ins->inst_basereg == last_ins->inst_destbasereg &&
3177 ins->inst_offset == last_ins->inst_offset) {
3178 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3179 ins->sreg1 = last_ins->sreg1;
3183 ins->opcode = OP_MOVE;
3187 if (ins->dreg == ins->sreg1) {
3188 MONO_DELETE_INS (bb, ins);
3192 * OP_MOVE sreg, dreg
3193 * OP_MOVE dreg, sreg
3195 if (last_ins && last_ins->opcode == OP_MOVE &&
3196 ins->sreg1 == last_ins->dreg &&
3197 ins->dreg == last_ins->sreg1) {
3198 MONO_DELETE_INS (bb, ins);
3206 bb->last_ins = last_ins;
3210 * the branch_cc_table should maintain the order of these
3224 branch_cc_table [] = {
3238 #define ADD_NEW_INS(cfg,dest,op) do { \
3239 MONO_INST_NEW ((cfg), (dest), (op)); \
3240 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3244 map_to_reg_reg_op (int op)
3253 case OP_COMPARE_IMM:
3255 case OP_ICOMPARE_IMM:
3269 case OP_LOAD_MEMBASE:
3270 return OP_LOAD_MEMINDEX;
3271 case OP_LOADI4_MEMBASE:
3272 return OP_LOADI4_MEMINDEX;
3273 case OP_LOADU4_MEMBASE:
3274 return OP_LOADU4_MEMINDEX;
3275 case OP_LOADU1_MEMBASE:
3276 return OP_LOADU1_MEMINDEX;
3277 case OP_LOADI2_MEMBASE:
3278 return OP_LOADI2_MEMINDEX;
3279 case OP_LOADU2_MEMBASE:
3280 return OP_LOADU2_MEMINDEX;
3281 case OP_LOADI1_MEMBASE:
3282 return OP_LOADI1_MEMINDEX;
3283 case OP_STOREI1_MEMBASE_REG:
3284 return OP_STOREI1_MEMINDEX;
3285 case OP_STOREI2_MEMBASE_REG:
3286 return OP_STOREI2_MEMINDEX;
3287 case OP_STOREI4_MEMBASE_REG:
3288 return OP_STOREI4_MEMINDEX;
3289 case OP_STORE_MEMBASE_REG:
3290 return OP_STORE_MEMINDEX;
3291 case OP_STORER4_MEMBASE_REG:
3292 return OP_STORER4_MEMINDEX;
3293 case OP_STORER8_MEMBASE_REG:
3294 return OP_STORER8_MEMINDEX;
3295 case OP_STORE_MEMBASE_IMM:
3296 return OP_STORE_MEMBASE_REG;
3297 case OP_STOREI1_MEMBASE_IMM:
3298 return OP_STOREI1_MEMBASE_REG;
3299 case OP_STOREI2_MEMBASE_IMM:
3300 return OP_STOREI2_MEMBASE_REG;
3301 case OP_STOREI4_MEMBASE_IMM:
3302 return OP_STOREI4_MEMBASE_REG;
3304 g_assert_not_reached ();
3308 * Remove from the instruction list the instructions that can't be
3309 * represented with very simple instructions with no register
3313 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3315 MonoInst *ins, *temp, *last_ins = NULL;
3316 int rot_amount, imm8, low_imm;
3318 MONO_BB_FOR_EACH_INS (bb, ins) {
3320 switch (ins->opcode) {
3324 case OP_COMPARE_IMM:
3325 case OP_ICOMPARE_IMM:
3339 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3340 ADD_NEW_INS (cfg, temp, OP_ICONST);
3341 temp->inst_c0 = ins->inst_imm;
3342 temp->dreg = mono_alloc_ireg (cfg);
3343 ins->sreg2 = temp->dreg;
3344 ins->opcode = mono_op_imm_to_op (ins->opcode);
3346 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3352 if (ins->inst_imm == 1) {
3353 ins->opcode = OP_MOVE;
3356 if (ins->inst_imm == 0) {
3357 ins->opcode = OP_ICONST;
3361 imm8 = mono_is_power_of_two (ins->inst_imm);
3363 ins->opcode = OP_SHL_IMM;
3364 ins->inst_imm = imm8;
3367 ADD_NEW_INS (cfg, temp, OP_ICONST);
3368 temp->inst_c0 = ins->inst_imm;
3369 temp->dreg = mono_alloc_ireg (cfg);
3370 ins->sreg2 = temp->dreg;
3371 ins->opcode = OP_IMUL;
3377 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3378 /* ARM sets the C flag to 1 if there was _no_ overflow */
3379 ins->next->opcode = OP_COND_EXC_NC;
3382 case OP_IDIV_UN_IMM:
3384 case OP_IREM_UN_IMM:
3385 ADD_NEW_INS (cfg, temp, OP_ICONST);
3386 temp->inst_c0 = ins->inst_imm;
3387 temp->dreg = mono_alloc_ireg (cfg);
3388 ins->sreg2 = temp->dreg;
3389 ins->opcode = mono_op_imm_to_op (ins->opcode);
3391 case OP_LOCALLOC_IMM:
3392 ADD_NEW_INS (cfg, temp, OP_ICONST);
3393 temp->inst_c0 = ins->inst_imm;
3394 temp->dreg = mono_alloc_ireg (cfg);
3395 ins->sreg1 = temp->dreg;
3396 ins->opcode = OP_LOCALLOC;
3398 case OP_LOAD_MEMBASE:
3399 case OP_LOADI4_MEMBASE:
3400 case OP_LOADU4_MEMBASE:
3401 case OP_LOADU1_MEMBASE:
3402 /* we can do two things: load the immed in a register
3403 * and use an indexed load, or see if the immed can be
3404 * represented as an ad_imm + a load with a smaller offset
3405 * that fits. We just do the first for now, optimize later.
3407 if (arm_is_imm12 (ins->inst_offset))
3409 ADD_NEW_INS (cfg, temp, OP_ICONST);
3410 temp->inst_c0 = ins->inst_offset;
3411 temp->dreg = mono_alloc_ireg (cfg);
3412 ins->sreg2 = temp->dreg;
3413 ins->opcode = map_to_reg_reg_op (ins->opcode);
3415 case OP_LOADI2_MEMBASE:
3416 case OP_LOADU2_MEMBASE:
3417 case OP_LOADI1_MEMBASE:
3418 if (arm_is_imm8 (ins->inst_offset))
3420 ADD_NEW_INS (cfg, temp, OP_ICONST);
3421 temp->inst_c0 = ins->inst_offset;
3422 temp->dreg = mono_alloc_ireg (cfg);
3423 ins->sreg2 = temp->dreg;
3424 ins->opcode = map_to_reg_reg_op (ins->opcode);
3426 case OP_LOADR4_MEMBASE:
3427 case OP_LOADR8_MEMBASE:
3428 if (arm_is_fpimm8 (ins->inst_offset))
3430 low_imm = ins->inst_offset & 0x1ff;
3431 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3432 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3433 temp->inst_imm = ins->inst_offset & ~0x1ff;
3434 temp->sreg1 = ins->inst_basereg;
3435 temp->dreg = mono_alloc_ireg (cfg);
3436 ins->inst_basereg = temp->dreg;
3437 ins->inst_offset = low_imm;
3441 ADD_NEW_INS (cfg, temp, OP_ICONST);
3442 temp->inst_c0 = ins->inst_offset;
3443 temp->dreg = mono_alloc_ireg (cfg);
3445 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3446 add_ins->sreg1 = ins->inst_basereg;
3447 add_ins->sreg2 = temp->dreg;
3448 add_ins->dreg = mono_alloc_ireg (cfg);
3450 ins->inst_basereg = add_ins->dreg;
3451 ins->inst_offset = 0;
3454 case OP_STORE_MEMBASE_REG:
3455 case OP_STOREI4_MEMBASE_REG:
3456 case OP_STOREI1_MEMBASE_REG:
3457 if (arm_is_imm12 (ins->inst_offset))
3459 ADD_NEW_INS (cfg, temp, OP_ICONST);
3460 temp->inst_c0 = ins->inst_offset;
3461 temp->dreg = mono_alloc_ireg (cfg);
3462 ins->sreg2 = temp->dreg;
3463 ins->opcode = map_to_reg_reg_op (ins->opcode);
3465 case OP_STOREI2_MEMBASE_REG:
3466 if (arm_is_imm8 (ins->inst_offset))
3468 ADD_NEW_INS (cfg, temp, OP_ICONST);
3469 temp->inst_c0 = ins->inst_offset;
3470 temp->dreg = mono_alloc_ireg (cfg);
3471 ins->sreg2 = temp->dreg;
3472 ins->opcode = map_to_reg_reg_op (ins->opcode);
3474 case OP_STORER4_MEMBASE_REG:
3475 case OP_STORER8_MEMBASE_REG:
3476 if (arm_is_fpimm8 (ins->inst_offset))
3478 low_imm = ins->inst_offset & 0x1ff;
3479 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3480 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3481 temp->inst_imm = ins->inst_offset & ~0x1ff;
3482 temp->sreg1 = ins->inst_destbasereg;
3483 temp->dreg = mono_alloc_ireg (cfg);
3484 ins->inst_destbasereg = temp->dreg;
3485 ins->inst_offset = low_imm;
3489 ADD_NEW_INS (cfg, temp, OP_ICONST);
3490 temp->inst_c0 = ins->inst_offset;
3491 temp->dreg = mono_alloc_ireg (cfg);
3493 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3494 add_ins->sreg1 = ins->inst_destbasereg;
3495 add_ins->sreg2 = temp->dreg;
3496 add_ins->dreg = mono_alloc_ireg (cfg);
3498 ins->inst_destbasereg = add_ins->dreg;
3499 ins->inst_offset = 0;
3502 case OP_STORE_MEMBASE_IMM:
3503 case OP_STOREI1_MEMBASE_IMM:
3504 case OP_STOREI2_MEMBASE_IMM:
3505 case OP_STOREI4_MEMBASE_IMM:
3506 ADD_NEW_INS (cfg, temp, OP_ICONST);
3507 temp->inst_c0 = ins->inst_imm;
3508 temp->dreg = mono_alloc_ireg (cfg);
3509 ins->sreg1 = temp->dreg;
3510 ins->opcode = map_to_reg_reg_op (ins->opcode);
3512 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3514 gboolean swap = FALSE;
3518 /* Optimized away */
3523 /* Some fp compares require swapped operands */
3524 switch (ins->next->opcode) {
3526 ins->next->opcode = OP_FBLT;
3530 ins->next->opcode = OP_FBLT_UN;
3534 ins->next->opcode = OP_FBGE;
3538 ins->next->opcode = OP_FBGE_UN;
3546 ins->sreg1 = ins->sreg2;
3555 bb->last_ins = last_ins;
3556 bb->max_vreg = cfg->next_vreg;
3560 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3564 if (long_ins->opcode == OP_LNEG) {
3566 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3567 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3573 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3575 /* sreg is a float, dreg is an integer reg */
3577 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3579 ARM_TOSIZD (code, vfp_scratch1, sreg);
3581 ARM_TOUIZD (code, vfp_scratch1, sreg);
3582 ARM_FMRS (code, dreg, vfp_scratch1);
3583 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3587 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3588 else if (size == 2) {
3589 ARM_SHL_IMM (code, dreg, dreg, 16);
3590 ARM_SHR_IMM (code, dreg, dreg, 16);
3594 ARM_SHL_IMM (code, dreg, dreg, 24);
3595 ARM_SAR_IMM (code, dreg, dreg, 24);
3596 } else if (size == 2) {
3597 ARM_SHL_IMM (code, dreg, dreg, 16);
3598 ARM_SAR_IMM (code, dreg, dreg, 16);
3604 #endif /* #ifndef DISABLE_JIT */
3608 const guchar *target;
3613 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3616 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3617 PatchData *pdata = (PatchData*)user_data;
3618 guchar *code = data;
3619 guint32 *thunks = data;
3620 guint32 *endthunks = (guint32*)(code + bsize);
3622 int difflow, diffhigh;
3624 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3625 difflow = (char*)pdata->code - (char*)thunks;
3626 diffhigh = (char*)pdata->code - (char*)endthunks;
3627 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3631 * The thunk is composed of 3 words:
3632 * load constant from thunks [2] into ARM_IP
3635 * Note that the LR register is already setup
3637 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3638 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3639 while (thunks < endthunks) {
3640 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3641 if (thunks [2] == (guint32)pdata->target) {
3642 arm_patch (pdata->code, (guchar*)thunks);
3643 mono_arch_flush_icache (pdata->code, 4);
3646 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3647 /* found a free slot instead: emit thunk */
3648 /* ARMREG_IP is fine to use since this can't be an IMT call
3651 code = (guchar*)thunks;
3652 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3653 if (thumb_supported)
3654 ARM_BX (code, ARMREG_IP);
3656 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3657 thunks [2] = (guint32)pdata->target;
3658 mono_arch_flush_icache ((guchar*)thunks, 12);
3660 arm_patch (pdata->code, (guchar*)thunks);
3661 mono_arch_flush_icache (pdata->code, 4);
3665 /* skip 12 bytes, the size of the thunk */
3669 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3675 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3680 domain = mono_domain_get ();
3683 pdata.target = target;
3684 pdata.absolute = absolute;
3688 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3691 if (pdata.found != 1) {
3692 mono_domain_lock (domain);
3693 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3696 /* this uses the first available slot */
3698 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3700 mono_domain_unlock (domain);
3703 if (pdata.found != 1) {
3705 GHashTableIter iter;
3706 MonoJitDynamicMethodInfo *ji;
3709 * This might be a dynamic method, search its code manager. We can only
3710 * use the dynamic method containing CODE, since the others might be freed later.
3714 mono_domain_lock (domain);
3715 hash = domain_jit_info (domain)->dynamic_code_hash;
3717 /* FIXME: Speed this up */
3718 g_hash_table_iter_init (&iter, hash);
3719 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3720 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3721 if (pdata.found == 1)
3725 mono_domain_unlock (domain);
3727 if (pdata.found != 1)
3728 g_print ("thunk failed for %p from %p\n", target, code);
3729 g_assert (pdata.found == 1);
3733 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3735 guint32 *code32 = (void*)code;
3736 guint32 ins = *code32;
3737 guint32 prim = (ins >> 25) & 7;
3738 guint32 tval = GPOINTER_TO_UINT (target);
3740 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3741 if (prim == 5) { /* 101b */
3742 /* the diff starts 8 bytes from the branch opcode */
3743 gint diff = target - code - 8;
3745 gint tmask = 0xffffffff;
3746 if (tval & 1) { /* entering thumb mode */
3747 diff = target - 1 - code - 8;
3748 g_assert (thumb_supported);
3749 tbits = 0xf << 28; /* bl->blx bit pattern */
3750 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3751 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3755 tmask = ~(1 << 24); /* clear the link bit */
3756 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3761 if (diff <= 33554431) {
3763 ins = (ins & 0xff000000) | diff;
3765 *code32 = ins | tbits;
3769 /* diff between 0 and -33554432 */
3770 if (diff >= -33554432) {
3772 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3774 *code32 = ins | tbits;
3779 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3783 #ifdef USE_JUMP_TABLES
3785 gpointer *jte = mono_jumptable_get_entry (code);
3787 jte [0] = (gpointer) target;
3791 * The alternative call sequences looks like this:
3793 * ldr ip, [pc] // loads the address constant
3794 * b 1f // jumps around the constant
3795 * address constant embedded in the code
3800 * There are two cases for patching:
3801 * a) at the end of method emission: in this case code points to the start
3802 * of the call sequence
3803 * b) during runtime patching of the call site: in this case code points
3804 * to the mov pc, ip instruction
3806 * We have to handle also the thunk jump code sequence:
3810 * address constant // execution never reaches here
3812 if ((ins & 0x0ffffff0) == 0x12fff10) {
3813 /* Branch and exchange: the address is constructed in a reg
3814 * We can patch BX when the code sequence is the following:
3815 * ldr ip, [pc, #0] ; 0x8
3822 guint8 *emit = (guint8*)ccode;
3823 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3825 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3826 ARM_BX (emit, ARMREG_IP);
3828 /*patching from magic trampoline*/
3829 if (ins == ccode [3]) {
3830 g_assert (code32 [-4] == ccode [0]);
3831 g_assert (code32 [-3] == ccode [1]);
3832 g_assert (code32 [-1] == ccode [2]);
3833 code32 [-2] = (guint32)target;
3836 /*patching from JIT*/
3837 if (ins == ccode [0]) {
3838 g_assert (code32 [1] == ccode [1]);
3839 g_assert (code32 [3] == ccode [2]);
3840 g_assert (code32 [4] == ccode [3]);
3841 code32 [2] = (guint32)target;
3844 g_assert_not_reached ();
3845 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3853 guint8 *emit = (guint8*)ccode;
3854 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3856 ARM_BLX_REG (emit, ARMREG_IP);
3858 g_assert (code32 [-3] == ccode [0]);
3859 g_assert (code32 [-2] == ccode [1]);
3860 g_assert (code32 [0] == ccode [2]);
3862 code32 [-1] = (guint32)target;
3865 guint32 *tmp = ccode;
3866 guint8 *emit = (guint8*)tmp;
3867 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3868 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3869 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3870 ARM_BX (emit, ARMREG_IP);
3871 if (ins == ccode [2]) {
3872 g_assert_not_reached (); // should be -2 ...
3873 code32 [-1] = (guint32)target;
3876 if (ins == ccode [0]) {
3877 /* handles both thunk jump code and the far call sequence */
3878 code32 [2] = (guint32)target;
3881 g_assert_not_reached ();
3883 // g_print ("patched with 0x%08x\n", ins);
3888 arm_patch (guchar *code, const guchar *target)
3890 arm_patch_general (NULL, code, target, NULL);
3894 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3895 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3896 * to be used with the emit macros.
3897 * Return -1 otherwise.
3900 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3903 for (i = 0; i < 31; i+= 2) {
3904 res = (val << (32 - i)) | (val >> i);
3907 *rot_amount = i? 32 - i: 0;
3914 * Emits in code a sequence of instructions that load the value 'val'
3915 * into the dreg register. Uses at most 4 instructions.
3918 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3920 int imm8, rot_amount;
3922 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3923 /* skip the constant pool */
3929 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3930 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3931 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3932 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3935 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3937 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3941 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3945 if (val & 0xFF0000) {
3946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3948 if (val & 0xFF000000) {
3949 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3951 } else if (val & 0xFF00) {
3952 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3953 if (val & 0xFF0000) {
3954 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3956 if (val & 0xFF000000) {
3957 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3959 } else if (val & 0xFF0000) {
3960 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3961 if (val & 0xFF000000) {
3962 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3965 //g_assert_not_reached ();
3971 mono_arm_thumb_supported (void)
3973 return thumb_supported;
3979 * emit_load_volatile_arguments:
3981 * Load volatile arguments from the stack to the original input registers.
3982 * Required before a tail call.
3985 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3987 MonoMethod *method = cfg->method;
3988 MonoMethodSignature *sig;
3993 /* FIXME: Generate intermediate code instead */
3995 sig = mono_method_signature (method);
3997 /* This is the opposite of the code in emit_prolog */
4001 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4003 if (cinfo->vtype_retaddr) {
4004 ArgInfo *ainfo = &cinfo->ret;
4005 inst = cfg->vret_addr;
4006 g_assert (arm_is_imm12 (inst->inst_offset));
4007 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4009 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4010 ArgInfo *ainfo = cinfo->args + i;
4011 inst = cfg->args [pos];
4013 if (cfg->verbose_level > 2)
4014 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4015 if (inst->opcode == OP_REGVAR) {
4016 if (ainfo->storage == RegTypeGeneral)
4017 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4018 else if (ainfo->storage == RegTypeFP) {
4019 g_assert_not_reached ();
4020 } else if (ainfo->storage == RegTypeBase) {
4024 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4025 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4027 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4028 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4032 g_assert_not_reached ();
4034 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4035 switch (ainfo->size) {
4042 g_assert (arm_is_imm12 (inst->inst_offset));
4043 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4044 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4045 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4048 if (arm_is_imm12 (inst->inst_offset)) {
4049 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4051 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4052 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4056 } else if (ainfo->storage == RegTypeBaseGen) {
4059 } else if (ainfo->storage == RegTypeBase) {
4061 } else if (ainfo->storage == RegTypeFP) {
4062 g_assert_not_reached ();
4063 } else if (ainfo->storage == RegTypeStructByVal) {
4064 int doffset = inst->inst_offset;
4068 if (mono_class_from_mono_type (inst->inst_vtype))
4069 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4070 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4071 if (arm_is_imm12 (doffset)) {
4072 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4074 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4075 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4077 soffset += sizeof (gpointer);
4078 doffset += sizeof (gpointer);
4083 } else if (ainfo->storage == RegTypeStructByAddr) {
4098 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4103 guint8 *code = cfg->native_code + cfg->code_len;
4104 MonoInst *last_ins = NULL;
4105 guint last_offset = 0;
4107 int imm8, rot_amount;
4109 /* we don't align basic blocks of loops on arm */
4111 if (cfg->verbose_level > 2)
4112 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4114 cpos = bb->max_offset;
4116 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4117 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4118 //g_assert (!mono_compile_aot);
4121 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4122 /* this is not thread save, but good enough */
4123 /* fixme: howto handle overflows? */
4124 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4127 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4128 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4129 (gpointer)"mono_break");
4130 code = emit_call_seq (cfg, code);
4133 MONO_BB_FOR_EACH_INS (bb, ins) {
4134 offset = code - cfg->native_code;
4136 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4138 if (offset > (cfg->code_size - max_len - 16)) {
4139 cfg->code_size *= 2;
4140 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4141 code = cfg->native_code + offset;
4143 // if (ins->cil_code)
4144 // g_print ("cil code\n");
4145 mono_debug_record_line_number (cfg, ins, offset);
4147 switch (ins->opcode) {
4148 case OP_MEMORY_BARRIER:
4150 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4151 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4155 #ifdef HAVE_AEABI_READ_TP
4156 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4157 (gpointer)"__aeabi_read_tp");
4158 code = emit_call_seq (cfg, code);
4160 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4162 g_assert_not_reached ();
4165 case OP_ATOMIC_EXCHANGE_I4:
4166 case OP_ATOMIC_CAS_I4:
4167 case OP_ATOMIC_ADD_I4: {
4171 g_assert (v7_supported);
4174 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4176 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4178 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4182 g_assert (cfg->arch.atomic_tmp_offset != -1);
4183 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4185 switch (ins->opcode) {
4186 case OP_ATOMIC_EXCHANGE_I4:
4188 ARM_DMB (code, ARM_DMB_SY);
4189 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4190 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4191 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4193 ARM_B_COND (code, ARMCOND_NE, 0);
4194 arm_patch (buf [1], buf [0]);
4196 case OP_ATOMIC_CAS_I4:
4197 ARM_DMB (code, ARM_DMB_SY);
4199 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4200 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4202 ARM_B_COND (code, ARMCOND_NE, 0);
4203 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4204 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4206 ARM_B_COND (code, ARMCOND_NE, 0);
4207 arm_patch (buf [2], buf [0]);
4208 arm_patch (buf [1], code);
4210 case OP_ATOMIC_ADD_I4:
4212 ARM_DMB (code, ARM_DMB_SY);
4213 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4214 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4215 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4216 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4218 ARM_B_COND (code, ARMCOND_NE, 0);
4219 arm_patch (buf [1], buf [0]);
4222 g_assert_not_reached ();
4225 ARM_DMB (code, ARM_DMB_SY);
4226 if (tmpreg != ins->dreg)
4227 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4228 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4233 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4234 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4237 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4238 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4240 case OP_STOREI1_MEMBASE_IMM:
4241 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4242 g_assert (arm_is_imm12 (ins->inst_offset));
4243 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4245 case OP_STOREI2_MEMBASE_IMM:
4246 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4247 g_assert (arm_is_imm8 (ins->inst_offset));
4248 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4250 case OP_STORE_MEMBASE_IMM:
4251 case OP_STOREI4_MEMBASE_IMM:
4252 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4253 g_assert (arm_is_imm12 (ins->inst_offset));
4254 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4256 case OP_STOREI1_MEMBASE_REG:
4257 g_assert (arm_is_imm12 (ins->inst_offset));
4258 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4260 case OP_STOREI2_MEMBASE_REG:
4261 g_assert (arm_is_imm8 (ins->inst_offset));
4262 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4264 case OP_STORE_MEMBASE_REG:
4265 case OP_STOREI4_MEMBASE_REG:
4266 /* this case is special, since it happens for spill code after lowering has been called */
4267 if (arm_is_imm12 (ins->inst_offset)) {
4268 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4270 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4271 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4274 case OP_STOREI1_MEMINDEX:
4275 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4277 case OP_STOREI2_MEMINDEX:
4278 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4280 case OP_STORE_MEMINDEX:
4281 case OP_STOREI4_MEMINDEX:
4282 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4285 g_assert_not_reached ();
4287 case OP_LOAD_MEMINDEX:
4288 case OP_LOADI4_MEMINDEX:
4289 case OP_LOADU4_MEMINDEX:
4290 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4292 case OP_LOADI1_MEMINDEX:
4293 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4295 case OP_LOADU1_MEMINDEX:
4296 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4298 case OP_LOADI2_MEMINDEX:
4299 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4301 case OP_LOADU2_MEMINDEX:
4302 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4304 case OP_LOAD_MEMBASE:
4305 case OP_LOADI4_MEMBASE:
4306 case OP_LOADU4_MEMBASE:
4307 /* this case is special, since it happens for spill code after lowering has been called */
4308 if (arm_is_imm12 (ins->inst_offset)) {
4309 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4311 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4312 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4315 case OP_LOADI1_MEMBASE:
4316 g_assert (arm_is_imm8 (ins->inst_offset));
4317 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4319 case OP_LOADU1_MEMBASE:
4320 g_assert (arm_is_imm12 (ins->inst_offset));
4321 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4323 case OP_LOADU2_MEMBASE:
4324 g_assert (arm_is_imm8 (ins->inst_offset));
4325 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4327 case OP_LOADI2_MEMBASE:
4328 g_assert (arm_is_imm8 (ins->inst_offset));
4329 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4331 case OP_ICONV_TO_I1:
4332 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4333 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4335 case OP_ICONV_TO_I2:
4336 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4337 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4339 case OP_ICONV_TO_U1:
4340 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4342 case OP_ICONV_TO_U2:
4343 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4344 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4348 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4350 case OP_COMPARE_IMM:
4351 case OP_ICOMPARE_IMM:
4352 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4353 g_assert (imm8 >= 0);
4354 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4358 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4359 * So instead of emitting a trap, we emit a call a C function and place a
4362 //*(int*)code = 0xef9f0001;
4365 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4366 (gpointer)"mono_break");
4367 code = emit_call_seq (cfg, code);
4369 case OP_RELAXED_NOP:
4374 case OP_DUMMY_STORE:
4375 case OP_DUMMY_ICONST:
4376 case OP_DUMMY_R8CONST:
4377 case OP_NOT_REACHED:
4380 case OP_SEQ_POINT: {
4382 MonoInst *info_var = cfg->arch.seq_point_info_var;
4383 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4384 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4385 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4386 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4388 int dreg = ARMREG_LR;
4390 if (cfg->soft_breakpoints) {
4391 g_assert (!cfg->compile_aot);
4395 * For AOT, we use one got slot per method, which will point to a
4396 * SeqPointInfo structure, containing all the information required
4397 * by the code below.
4399 if (cfg->compile_aot) {
4400 g_assert (info_var);
4401 g_assert (info_var->opcode == OP_REGOFFSET);
4402 g_assert (arm_is_imm12 (info_var->inst_offset));
4405 if (!cfg->soft_breakpoints) {
4407 * Read from the single stepping trigger page. This will cause a
4408 * SIGSEGV when single stepping is enabled.
4409 * We do this _before_ the breakpoint, so single stepping after
4410 * a breakpoint is hit will step to the next IL offset.
4412 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4415 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4416 if (cfg->soft_breakpoints) {
4417 /* Load the address of the sequence point trigger variable. */
4420 g_assert (var->opcode == OP_REGOFFSET);
4421 g_assert (arm_is_imm12 (var->inst_offset));
4422 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4424 /* Read the value and check whether it is non-zero. */
4425 ARM_LDR_IMM (code, dreg, dreg, 0);
4426 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4428 /* Load the address of the sequence point method. */
4429 var = ss_method_var;
4431 g_assert (var->opcode == OP_REGOFFSET);
4432 g_assert (arm_is_imm12 (var->inst_offset));
4433 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4435 /* Call it conditionally. */
4436 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4438 if (cfg->compile_aot) {
4439 /* Load the trigger page addr from the variable initialized in the prolog */
4440 var = ss_trigger_page_var;
4442 g_assert (var->opcode == OP_REGOFFSET);
4443 g_assert (arm_is_imm12 (var->inst_offset));
4444 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4446 #ifdef USE_JUMP_TABLES
4447 gpointer *jte = mono_jumptable_add_entry ();
4448 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4449 jte [0] = ss_trigger_page;
4451 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4453 *(int*)code = (int)ss_trigger_page;
4457 ARM_LDR_IMM (code, dreg, dreg, 0);
4461 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4463 if (cfg->soft_breakpoints) {
4464 /* Load the address of the breakpoint method into ip. */
4465 var = bp_method_var;
4467 g_assert (var->opcode == OP_REGOFFSET);
4468 g_assert (arm_is_imm12 (var->inst_offset));
4469 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4472 * A placeholder for a possible breakpoint inserted by
4473 * mono_arch_set_breakpoint ().
4476 } else if (cfg->compile_aot) {
4477 guint32 offset = code - cfg->native_code;
4480 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4481 /* Add the offset */
4482 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4483 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4484 if (arm_is_imm12 ((int)val)) {
4485 ARM_LDR_IMM (code, dreg, dreg, val);
4487 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4489 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4491 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4492 g_assert (!(val & 0xFF000000));
4494 ARM_LDR_IMM (code, dreg, dreg, 0);
4496 /* What is faster, a branch or a load ? */
4497 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4498 /* The breakpoint instruction */
4499 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4502 * A placeholder for a possible breakpoint inserted by
4503 * mono_arch_set_breakpoint ().
4505 for (i = 0; i < 4; ++i)
4512 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4515 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4519 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4522 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4523 g_assert (imm8 >= 0);
4524 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4528 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4529 g_assert (imm8 >= 0);
4530 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4534 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4535 g_assert (imm8 >= 0);
4536 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4539 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4540 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4542 case OP_IADD_OVF_UN:
4543 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4544 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4547 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4548 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4550 case OP_ISUB_OVF_UN:
4551 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4552 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4554 case OP_ADD_OVF_CARRY:
4555 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4556 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4558 case OP_ADD_OVF_UN_CARRY:
4559 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4560 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4562 case OP_SUB_OVF_CARRY:
4563 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4564 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4566 case OP_SUB_OVF_UN_CARRY:
4567 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4568 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4572 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4575 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4576 g_assert (imm8 >= 0);
4577 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4580 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4584 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4588 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4589 g_assert (imm8 >= 0);
4590 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4594 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4595 g_assert (imm8 >= 0);
4596 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4598 case OP_ARM_RSBS_IMM:
4599 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4600 g_assert (imm8 >= 0);
4601 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4603 case OP_ARM_RSC_IMM:
4604 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4605 g_assert (imm8 >= 0);
4606 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4609 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4613 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4614 g_assert (imm8 >= 0);
4615 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4618 g_assert (v7s_supported);
4619 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4622 g_assert (v7s_supported);
4623 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4626 g_assert (v7s_supported);
4627 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4628 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4631 g_assert (v7s_supported);
4632 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4633 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4637 g_assert_not_reached ();
4639 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4643 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4644 g_assert (imm8 >= 0);
4645 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4648 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4652 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4653 g_assert (imm8 >= 0);
4654 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4657 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4662 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4663 else if (ins->dreg != ins->sreg1)
4664 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4667 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4672 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4673 else if (ins->dreg != ins->sreg1)
4674 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4677 case OP_ISHR_UN_IMM:
4679 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4680 else if (ins->dreg != ins->sreg1)
4681 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4684 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4687 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4690 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4693 if (ins->dreg == ins->sreg2)
4694 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4696 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4699 g_assert_not_reached ();
4702 /* FIXME: handle ovf/ sreg2 != dreg */
4703 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4704 /* FIXME: MUL doesn't set the C/O flags on ARM */
4706 case OP_IMUL_OVF_UN:
4707 /* FIXME: handle ovf/ sreg2 != dreg */
4708 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4709 /* FIXME: MUL doesn't set the C/O flags on ARM */
4712 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4715 /* Load the GOT offset */
4716 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4717 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4719 *(gpointer*)code = NULL;
4721 /* Load the value from the GOT */
4722 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4724 case OP_OBJC_GET_SELECTOR:
4725 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4726 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4728 *(gpointer*)code = NULL;
4730 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4732 case OP_ICONV_TO_I4:
4733 case OP_ICONV_TO_U4:
4735 if (ins->dreg != ins->sreg1)
4736 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4739 int saved = ins->sreg2;
4740 if (ins->sreg2 == ARM_LSW_REG) {
4741 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4744 if (ins->sreg1 != ARM_LSW_REG)
4745 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4746 if (saved != ARM_MSW_REG)
4747 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4752 ARM_CPYD (code, ins->dreg, ins->sreg1);
4754 case OP_FCONV_TO_R4:
4756 ARM_CVTD (code, ins->dreg, ins->sreg1);
4757 ARM_CVTS (code, ins->dreg, ins->dreg);
4762 * Keep in sync with mono_arch_emit_epilog
4764 g_assert (!cfg->method->save_lmf);
4766 code = emit_load_volatile_arguments (cfg, code);
4768 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4770 if (cfg->used_int_regs)
4771 ARM_POP (code, cfg->used_int_regs);
4772 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4774 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4776 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4777 if (cfg->compile_aot) {
4778 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4780 *(gpointer*)code = NULL;
4782 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4784 code = mono_arm_patchable_b (code, ARMCOND_AL);
4788 MonoCallInst *call = (MonoCallInst*)ins;
4791 * The stack looks like the following:
4792 * <caller argument area>
4795 * <callee argument area>
4796 * Need to copy the arguments from the callee argument area to
4797 * the caller argument area, and pop the frame.
4799 if (call->stack_usage) {
4800 int i, prev_sp_offset = 0;
4802 /* Compute size of saved registers restored below */
4804 prev_sp_offset = 2 * 4;
4806 prev_sp_offset = 1 * 4;
4807 for (i = 0; i < 16; ++i) {
4808 if (cfg->used_int_regs & (1 << i))
4809 prev_sp_offset += 4;
4812 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4814 /* Copy arguments on the stack to our argument area */
4815 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4816 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4817 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4822 * Keep in sync with mono_arch_emit_epilog
4824 g_assert (!cfg->method->save_lmf);
4826 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4828 if (cfg->used_int_regs)
4829 ARM_POP (code, cfg->used_int_regs);
4830 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4832 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4835 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4836 if (cfg->compile_aot) {
4837 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4839 *(gpointer*)code = NULL;
4841 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4843 code = mono_arm_patchable_b (code, ARMCOND_AL);
4848 /* ensure ins->sreg1 is not NULL */
4849 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4852 g_assert (cfg->sig_cookie < 128);
4853 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4854 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4863 call = (MonoCallInst*)ins;
4866 code = emit_float_args (cfg, call, code, &max_len, &offset);
4868 if (ins->flags & MONO_INST_HAS_METHOD)
4869 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4871 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4872 code = emit_call_seq (cfg, code);
4873 ins->flags |= MONO_INST_GC_CALLSITE;
4874 ins->backend.pc_offset = code - cfg->native_code;
4875 code = emit_move_return_value (cfg, ins, code);
4881 case OP_VOIDCALL_REG:
4884 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4886 code = emit_call_reg (code, ins->sreg1);
4887 ins->flags |= MONO_INST_GC_CALLSITE;
4888 ins->backend.pc_offset = code - cfg->native_code;
4889 code = emit_move_return_value (cfg, ins, code);
4891 case OP_FCALL_MEMBASE:
4892 case OP_LCALL_MEMBASE:
4893 case OP_VCALL_MEMBASE:
4894 case OP_VCALL2_MEMBASE:
4895 case OP_VOIDCALL_MEMBASE:
4896 case OP_CALL_MEMBASE: {
4897 gboolean imt_arg = FALSE;
4899 g_assert (ins->sreg1 != ARMREG_LR);
4900 call = (MonoCallInst*)ins;
4903 code = emit_float_args (cfg, call, code, &max_len, &offset);
4905 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4907 if (!arm_is_imm12 (ins->inst_offset))
4908 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4909 #ifdef USE_JUMP_TABLES
4915 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4917 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4919 if (!arm_is_imm12 (ins->inst_offset))
4920 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4922 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4925 * We can't embed the method in the code stream in PIC code, or
4927 * Instead, we put it in V5 in code emitted by
4928 * mono_arch_emit_imt_argument (), and embed NULL here to
4929 * signal the IMT thunk that the value is in V5.
4931 #ifdef USE_JUMP_TABLES
4932 /* In case of jumptables we always use value in V5. */
4935 if (call->dynamic_imt_arg)
4936 *((gpointer*)code) = NULL;
4938 *((gpointer*)code) = (gpointer)call->method;
4942 ins->flags |= MONO_INST_GC_CALLSITE;
4943 ins->backend.pc_offset = code - cfg->native_code;
4944 code = emit_move_return_value (cfg, ins, code);
4948 /* keep alignment */
4949 int alloca_waste = cfg->param_area;
4952 /* round the size to 8 bytes */
4953 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4954 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4956 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4957 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4958 /* memzero the area: dreg holds the size, sp is the pointer */
4959 if (ins->flags & MONO_INST_INIT) {
4960 guint8 *start_loop, *branch_to_cond;
4961 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4962 branch_to_cond = code;
4965 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4966 arm_patch (branch_to_cond, code);
4967 /* decrement by 4 and set flags */
4968 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4969 ARM_B_COND (code, ARMCOND_GE, 0);
4970 arm_patch (code - 4, start_loop);
4972 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4977 MonoInst *var = cfg->dyn_call_var;
4979 g_assert (var->opcode == OP_REGOFFSET);
4980 g_assert (arm_is_imm12 (var->inst_offset));
4982 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4983 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4985 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4987 /* Save args buffer */
4988 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4990 /* Set stack slots using R0 as scratch reg */
4991 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4992 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4993 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4994 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4997 /* Set argument registers */
4998 for (i = 0; i < PARAM_REGS; ++i)
4999 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5002 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5003 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5006 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5007 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5008 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5012 if (ins->sreg1 != ARMREG_R0)
5013 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5014 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5015 (gpointer)"mono_arch_throw_exception");
5016 code = emit_call_seq (cfg, code);
5020 if (ins->sreg1 != ARMREG_R0)
5021 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5022 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5023 (gpointer)"mono_arch_rethrow_exception");
5024 code = emit_call_seq (cfg, code);
5027 case OP_START_HANDLER: {
5028 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5031 /* Reserve a param area, see filter-stack.exe */
5032 if (cfg->param_area) {
5033 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5034 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5036 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5037 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5041 if (arm_is_imm12 (spvar->inst_offset)) {
5042 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5044 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5045 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5049 case OP_ENDFILTER: {
5050 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5053 /* Free the param area */
5054 if (cfg->param_area) {
5055 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5056 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5058 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5059 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5063 if (ins->sreg1 != ARMREG_R0)
5064 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5065 if (arm_is_imm12 (spvar->inst_offset)) {
5066 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5068 g_assert (ARMREG_IP != spvar->inst_basereg);
5069 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5070 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5072 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5075 case OP_ENDFINALLY: {
5076 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5079 /* Free the param area */
5080 if (cfg->param_area) {
5081 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5082 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5084 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5085 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5089 if (arm_is_imm12 (spvar->inst_offset)) {
5090 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5092 g_assert (ARMREG_IP != spvar->inst_basereg);
5093 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5094 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5096 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5099 case OP_CALL_HANDLER:
5100 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5101 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5102 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5105 ins->inst_c0 = code - cfg->native_code;
5108 /*if (ins->inst_target_bb->native_offset) {
5110 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5112 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5113 code = mono_arm_patchable_b (code, ARMCOND_AL);
5117 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5121 * In the normal case we have:
5122 * ldr pc, [pc, ins->sreg1 << 2]
5125 * ldr lr, [pc, ins->sreg1 << 2]
5127 * After follows the data.
5128 * FIXME: add aot support.
5130 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5131 #ifdef USE_JUMP_TABLES
5133 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5134 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5135 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5139 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5140 if (offset + max_len > (cfg->code_size - 16)) {
5141 cfg->code_size += max_len;
5142 cfg->code_size *= 2;
5143 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5144 code = cfg->native_code + offset;
5146 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5148 code += 4 * GPOINTER_TO_INT (ins->klass);
5153 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5154 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5158 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5159 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5163 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5164 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5168 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5169 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5173 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5174 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5178 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5181 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5185 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5186 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5190 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5191 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5193 case OP_COND_EXC_EQ:
5194 case OP_COND_EXC_NE_UN:
5195 case OP_COND_EXC_LT:
5196 case OP_COND_EXC_LT_UN:
5197 case OP_COND_EXC_GT:
5198 case OP_COND_EXC_GT_UN:
5199 case OP_COND_EXC_GE:
5200 case OP_COND_EXC_GE_UN:
5201 case OP_COND_EXC_LE:
5202 case OP_COND_EXC_LE_UN:
5203 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5205 case OP_COND_EXC_IEQ:
5206 case OP_COND_EXC_INE_UN:
5207 case OP_COND_EXC_ILT:
5208 case OP_COND_EXC_ILT_UN:
5209 case OP_COND_EXC_IGT:
5210 case OP_COND_EXC_IGT_UN:
5211 case OP_COND_EXC_IGE:
5212 case OP_COND_EXC_IGE_UN:
5213 case OP_COND_EXC_ILE:
5214 case OP_COND_EXC_ILE_UN:
5215 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5218 case OP_COND_EXC_IC:
5219 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5221 case OP_COND_EXC_OV:
5222 case OP_COND_EXC_IOV:
5223 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5225 case OP_COND_EXC_NC:
5226 case OP_COND_EXC_INC:
5227 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5229 case OP_COND_EXC_NO:
5230 case OP_COND_EXC_INO:
5231 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5243 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5246 /* floating point opcodes */
5248 if (cfg->compile_aot) {
5249 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5251 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5253 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5256 /* FIXME: we can optimize the imm load by dealing with part of
5257 * the displacement in LDFD (aligning to 512).
5259 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5260 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5264 if (cfg->compile_aot) {
5265 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5267 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5269 ARM_CVTS (code, ins->dreg, ins->dreg);
5271 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5272 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5273 ARM_CVTS (code, ins->dreg, ins->dreg);
5276 case OP_STORER8_MEMBASE_REG:
5277 /* This is generated by the local regalloc pass which runs after the lowering pass */
5278 if (!arm_is_fpimm8 (ins->inst_offset)) {
5279 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5280 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5281 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5283 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5286 case OP_LOADR8_MEMBASE:
5287 /* This is generated by the local regalloc pass which runs after the lowering pass */
5288 if (!arm_is_fpimm8 (ins->inst_offset)) {
5289 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5290 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5291 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5293 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5296 case OP_STORER4_MEMBASE_REG:
5297 g_assert (arm_is_fpimm8 (ins->inst_offset));
5298 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5299 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5300 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5301 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5303 case OP_LOADR4_MEMBASE:
5304 g_assert (arm_is_fpimm8 (ins->inst_offset));
5305 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5306 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5307 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5308 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5310 case OP_ICONV_TO_R_UN: {
5311 g_assert_not_reached ();
5314 case OP_ICONV_TO_R4:
5315 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5316 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5317 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5318 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5319 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5321 case OP_ICONV_TO_R8:
5322 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5323 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5324 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5325 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5329 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5330 if (sig_ret->type == MONO_TYPE_R4) {
5331 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5333 if (!IS_HARD_FLOAT) {
5334 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5337 if (IS_HARD_FLOAT) {
5338 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5340 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5345 case OP_FCONV_TO_I1:
5346 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5348 case OP_FCONV_TO_U1:
5349 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5351 case OP_FCONV_TO_I2:
5352 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5354 case OP_FCONV_TO_U2:
5355 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5357 case OP_FCONV_TO_I4:
5359 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5361 case OP_FCONV_TO_U4:
5363 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5365 case OP_FCONV_TO_I8:
5366 case OP_FCONV_TO_U8:
5367 g_assert_not_reached ();
5368 /* Implemented as helper calls */
5370 case OP_LCONV_TO_R_UN:
5371 g_assert_not_reached ();
5372 /* Implemented as helper calls */
5374 case OP_LCONV_TO_OVF_I4_2: {
5375 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5377 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5380 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5381 high_bit_not_set = code;
5382 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5384 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5385 valid_negative = code;
5386 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5387 invalid_negative = code;
5388 ARM_B_COND (code, ARMCOND_AL, 0);
5390 arm_patch (high_bit_not_set, code);
5392 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5393 valid_positive = code;
5394 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5396 arm_patch (invalid_negative, code);
5397 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5399 arm_patch (valid_negative, code);
5400 arm_patch (valid_positive, code);
5402 if (ins->dreg != ins->sreg1)
5403 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5407 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5410 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5413 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5416 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5419 ARM_NEGD (code, ins->dreg, ins->sreg1);
5423 g_assert_not_reached ();
5427 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5433 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5436 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5437 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5441 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5444 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5445 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5449 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5452 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5453 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5454 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5458 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5461 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5462 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5466 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5469 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5470 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5471 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5475 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5479 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5483 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5486 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5487 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5491 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5494 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5495 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5498 /* ARM FPA flags table:
5499 * N Less than ARMCOND_MI
5500 * Z Equal ARMCOND_EQ
5501 * C Greater Than or Equal ARMCOND_CS
5502 * V Unordered ARMCOND_VS
5505 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5508 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5511 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5514 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5515 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5521 g_assert_not_reached ();
5525 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5527 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5528 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5529 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5533 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5534 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5539 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5540 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5542 #ifdef USE_JUMP_TABLES
5544 gpointer *jte = mono_jumptable_add_entries (2);
5545 jte [0] = GUINT_TO_POINTER (0xffffffff);
5546 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5547 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5548 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5551 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5552 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5554 *(guint32*)code = 0xffffffff;
5556 *(guint32*)code = 0x7fefffff;
5559 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5561 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5562 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5564 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5565 ARM_CPYD (code, ins->dreg, ins->sreg1);
5567 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5568 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5573 case OP_GC_LIVENESS_DEF:
5574 case OP_GC_LIVENESS_USE:
5575 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5576 ins->backend.pc_offset = code - cfg->native_code;
5578 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5579 ins->backend.pc_offset = code - cfg->native_code;
5580 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5584 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5585 g_assert_not_reached ();
5588 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5589 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5590 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5591 g_assert_not_reached ();
5597 last_offset = offset;
5600 cfg->code_len = code - cfg->native_code;
5603 #endif /* DISABLE_JIT */
5605 #ifdef HAVE_AEABI_READ_TP
5606 void __aeabi_read_tp (void);
5610 mono_arch_register_lowlevel_calls (void)
5612 /* The signature doesn't matter */
5613 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5614 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5616 #ifndef MONO_CROSS_COMPILE
5617 #ifdef HAVE_AEABI_READ_TP
5618 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5623 #define patch_lis_ori(ip,val) do {\
5624 guint16 *__lis_ori = (guint16*)(ip); \
5625 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5626 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5630 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5632 MonoJumpInfo *patch_info;
5633 gboolean compile_aot = !run_cctors;
5635 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5636 unsigned char *ip = patch_info->ip.i + code;
5637 const unsigned char *target;
5639 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5640 #ifdef USE_JUMP_TABLES
5641 gpointer *jt = mono_jumptable_get_entry (ip);
5643 gpointer *jt = (gpointer*)(ip + 8);
5646 /* jt is the inlined jump table, 2 instructions after ip
5647 * In the normal case we store the absolute addresses,
5648 * otherwise the displacements.
5650 for (i = 0; i < patch_info->data.table->table_size; i++)
5651 jt [i] = code + (int)patch_info->data.table->table [i];
5656 switch (patch_info->type) {
5657 case MONO_PATCH_INFO_BB:
5658 case MONO_PATCH_INFO_LABEL:
5661 /* No need to patch these */
5666 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5668 switch (patch_info->type) {
5669 case MONO_PATCH_INFO_IP:
5670 g_assert_not_reached ();
5671 patch_lis_ori (ip, ip);
5673 case MONO_PATCH_INFO_METHOD_REL:
5674 g_assert_not_reached ();
5675 *((gpointer *)(ip)) = code + patch_info->data.offset;
5677 case MONO_PATCH_INFO_METHODCONST:
5678 case MONO_PATCH_INFO_CLASS:
5679 case MONO_PATCH_INFO_IMAGE:
5680 case MONO_PATCH_INFO_FIELD:
5681 case MONO_PATCH_INFO_VTABLE:
5682 case MONO_PATCH_INFO_IID:
5683 case MONO_PATCH_INFO_SFLDA:
5684 case MONO_PATCH_INFO_LDSTR:
5685 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5686 case MONO_PATCH_INFO_LDTOKEN:
5687 g_assert_not_reached ();
5688 /* from OP_AOTCONST : lis + ori */
5689 patch_lis_ori (ip, target);
5691 case MONO_PATCH_INFO_R4:
5692 case MONO_PATCH_INFO_R8:
5693 g_assert_not_reached ();
5694 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5696 case MONO_PATCH_INFO_EXC_NAME:
5697 g_assert_not_reached ();
5698 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5700 case MONO_PATCH_INFO_NONE:
5701 case MONO_PATCH_INFO_BB_OVF:
5702 case MONO_PATCH_INFO_EXC_OVF:
5703 /* everything is dealt with at epilog output time */
5708 arm_patch_general (domain, ip, target, dyn_code_mp);
5715 * Stack frame layout:
5717 * ------------------- fp
5718 * MonoLMF structure or saved registers
5719 * -------------------
5721 * -------------------
5723 * -------------------
5724 * optional 8 bytes for tracing
5725 * -------------------
5726 * param area size is cfg->param_area
5727 * ------------------- sp
5730 mono_arch_emit_prolog (MonoCompile *cfg)
5732 MonoMethod *method = cfg->method;
5734 MonoMethodSignature *sig;
5736 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5741 int prev_sp_offset, reg_offset;
5743 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5746 sig = mono_method_signature (method);
5747 cfg->code_size = 256 + sig->param_count * 64;
5748 code = cfg->native_code = g_malloc (cfg->code_size);
5750 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5752 alloc_size = cfg->stack_offset;
5758 * The iphone uses R7 as the frame pointer, and it points at the saved
5763 * We can't use r7 as a frame pointer since it points into the middle of
5764 * the frame, so we keep using our own frame pointer.
5765 * FIXME: Optimize this.
5767 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5768 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5769 prev_sp_offset += 8; /* r7 and lr */
5770 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5771 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5774 if (!method->save_lmf) {
5776 /* No need to push LR again */
5777 if (cfg->used_int_regs)
5778 ARM_PUSH (code, cfg->used_int_regs);
5780 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5781 prev_sp_offset += 4;
5783 for (i = 0; i < 16; ++i) {
5784 if (cfg->used_int_regs & (1 << i))
5785 prev_sp_offset += 4;
5787 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5789 for (i = 0; i < 16; ++i) {
5790 if ((cfg->used_int_regs & (1 << i))) {
5791 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5792 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5797 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5798 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5800 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5801 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5804 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5805 ARM_PUSH (code, 0x5ff0);
5806 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5807 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5809 for (i = 0; i < 16; ++i) {
5810 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5811 /* The original r7 is saved at the start */
5812 if (!(iphone_abi && i == ARMREG_R7))
5813 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5817 g_assert (reg_offset == 4 * 10);
5818 pos += sizeof (MonoLMF) - (4 * 10);
5822 orig_alloc_size = alloc_size;
5823 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5824 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5825 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5826 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5829 /* the stack used in the pushed regs */
5830 if (prev_sp_offset & 4)
5832 cfg->stack_usage = alloc_size;
5834 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5835 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5837 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5838 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5840 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5842 if (cfg->frame_reg != ARMREG_SP) {
5843 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5844 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5846 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5847 prev_sp_offset += alloc_size;
5849 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5850 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5852 /* compute max_offset in order to use short forward jumps
5853 * we could skip do it on arm because the immediate displacement
5854 * for jumps is large enough, it may be useful later for constant pools
5857 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5858 MonoInst *ins = bb->code;
5859 bb->max_offset = max_offset;
5861 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5864 MONO_BB_FOR_EACH_INS (bb, ins)
5865 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5868 /* store runtime generic context */
5869 if (cfg->rgctx_var) {
5870 MonoInst *ins = cfg->rgctx_var;
5872 g_assert (ins->opcode == OP_REGOFFSET);
5874 if (arm_is_imm12 (ins->inst_offset)) {
5875 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5877 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5878 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5882 /* load arguments allocated to register from the stack */
5885 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5887 if (cinfo->vtype_retaddr) {
5888 ArgInfo *ainfo = &cinfo->ret;
5889 inst = cfg->vret_addr;
5890 g_assert (arm_is_imm12 (inst->inst_offset));
5891 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5894 if (sig->call_convention == MONO_CALL_VARARG) {
5895 ArgInfo *cookie = &cinfo->sig_cookie;
5897 /* Save the sig cookie address */
5898 g_assert (cookie->storage == RegTypeBase);
5900 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5901 g_assert (arm_is_imm12 (cfg->sig_cookie));
5902 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5903 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5906 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5907 ArgInfo *ainfo = cinfo->args + i;
5908 inst = cfg->args [pos];
5910 if (cfg->verbose_level > 2)
5911 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5912 if (inst->opcode == OP_REGVAR) {
5913 if (ainfo->storage == RegTypeGeneral)
5914 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5915 else if (ainfo->storage == RegTypeFP) {
5916 g_assert_not_reached ();
5917 } else if (ainfo->storage == RegTypeBase) {
5918 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5919 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5921 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5922 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5925 g_assert_not_reached ();
5927 if (cfg->verbose_level > 2)
5928 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5930 /* the argument should be put on the stack: FIXME handle size != word */
5931 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5932 switch (ainfo->size) {
5934 if (arm_is_imm12 (inst->inst_offset))
5935 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5937 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5938 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5942 if (arm_is_imm8 (inst->inst_offset)) {
5943 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5945 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5946 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5950 if (arm_is_imm12 (inst->inst_offset)) {
5951 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5953 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5954 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5956 if (arm_is_imm12 (inst->inst_offset + 4)) {
5957 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5959 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5960 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5964 if (arm_is_imm12 (inst->inst_offset)) {
5965 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5967 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5968 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5972 } else if (ainfo->storage == RegTypeBaseGen) {
5973 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5974 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5976 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5977 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5979 if (arm_is_imm12 (inst->inst_offset + 4)) {
5980 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5981 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5984 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5985 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5986 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5988 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5989 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5990 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5992 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5993 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5996 switch (ainfo->size) {
5998 if (arm_is_imm8 (inst->inst_offset)) {
5999 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6001 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6002 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6006 if (arm_is_imm8 (inst->inst_offset)) {
6007 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6009 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6010 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6014 if (arm_is_imm12 (inst->inst_offset)) {
6015 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6017 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6018 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6020 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6021 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6023 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6024 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6026 if (arm_is_imm12 (inst->inst_offset + 4)) {
6027 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6029 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6030 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6034 if (arm_is_imm12 (inst->inst_offset)) {
6035 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6037 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6038 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6042 } else if (ainfo->storage == RegTypeFP) {
6043 int imm8, rot_amount;
6045 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6046 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6047 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6049 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6051 if (ainfo->size == 8)
6052 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6054 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6055 } else if (ainfo->storage == RegTypeStructByVal) {
6056 int doffset = inst->inst_offset;
6060 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6061 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6062 if (arm_is_imm12 (doffset)) {
6063 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6065 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6066 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6068 soffset += sizeof (gpointer);
6069 doffset += sizeof (gpointer);
6071 if (ainfo->vtsize) {
6072 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6073 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6074 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6076 } else if (ainfo->storage == RegTypeStructByAddr) {
6077 g_assert_not_reached ();
6078 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6079 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6081 g_assert_not_reached ();
6086 if (method->save_lmf)
6087 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6090 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6092 if (cfg->arch.seq_point_info_var) {
6093 MonoInst *ins = cfg->arch.seq_point_info_var;
6095 /* Initialize the variable from a GOT slot */
6096 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6097 #ifdef USE_JUMP_TABLES
6099 gpointer *jte = mono_jumptable_add_entry ();
6100 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6101 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6103 /** XXX: is it correct? */
6105 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6107 *(gpointer*)code = NULL;
6110 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6112 g_assert (ins->opcode == OP_REGOFFSET);
6114 if (arm_is_imm12 (ins->inst_offset)) {
6115 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6117 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6118 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6122 /* Initialize ss_trigger_page_var */
6123 if (!cfg->soft_breakpoints) {
6124 MonoInst *info_var = cfg->arch.seq_point_info_var;
6125 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6126 int dreg = ARMREG_LR;
6129 g_assert (info_var->opcode == OP_REGOFFSET);
6130 g_assert (arm_is_imm12 (info_var->inst_offset));
6132 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6133 /* Load the trigger page addr */
6134 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6135 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6139 if (cfg->arch.seq_point_read_var) {
6140 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6141 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6142 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6143 #ifdef USE_JUMP_TABLES
6146 g_assert (read_ins->opcode == OP_REGOFFSET);
6147 g_assert (arm_is_imm12 (read_ins->inst_offset));
6148 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6149 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6150 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6151 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6153 #ifdef USE_JUMP_TABLES
6154 jte = mono_jumptable_add_entries (3);
6155 jte [0] = (gpointer)&ss_trigger_var;
6156 jte [1] = single_step_func_wrapper;
6157 jte [2] = breakpoint_func_wrapper;
6158 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6160 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6162 *(volatile int **)code = &ss_trigger_var;
6164 *(gpointer*)code = single_step_func_wrapper;
6166 *(gpointer*)code = breakpoint_func_wrapper;
6170 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6171 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6172 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6173 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6174 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6175 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6178 cfg->code_len = code - cfg->native_code;
6179 g_assert (cfg->code_len < cfg->code_size);
6186 mono_arch_emit_epilog (MonoCompile *cfg)
6188 MonoMethod *method = cfg->method;
6189 int pos, i, rot_amount;
6190 int max_epilog_size = 16 + 20*4;
6194 if (cfg->method->save_lmf)
6195 max_epilog_size += 128;
6197 if (mono_jit_trace_calls != NULL)
6198 max_epilog_size += 50;
6200 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6201 max_epilog_size += 50;
6203 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6204 cfg->code_size *= 2;
6205 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6206 cfg->stat_code_reallocs++;
6210 * Keep in sync with OP_JMP
6212 code = cfg->native_code + cfg->code_len;
6214 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6215 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6219 /* Load returned vtypes into registers if needed */
6220 cinfo = cfg->arch.cinfo;
6221 if (cinfo->ret.storage == RegTypeStructByVal) {
6222 MonoInst *ins = cfg->ret;
6224 if (arm_is_imm12 (ins->inst_offset)) {
6225 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6227 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6228 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6232 if (method->save_lmf) {
6233 int lmf_offset, reg, sp_adj, regmask;
6234 /* all but r0-r3, sp and pc */
6235 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6238 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6240 /* This points to r4 inside MonoLMF->iregs */
6241 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6243 regmask = 0x9ff0; /* restore lr to pc */
6244 /* Skip caller saved registers not used by the method */
6245 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6246 regmask &= ~(1 << reg);
6251 /* Restored later */
6252 regmask &= ~(1 << ARMREG_PC);
6253 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6254 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6256 ARM_POP (code, regmask);
6258 /* Restore saved r7, restore LR to PC */
6259 /* Skip lr from the lmf */
6260 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6261 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6264 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6265 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6267 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6268 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6272 /* Restore saved gregs */
6273 if (cfg->used_int_regs)
6274 ARM_POP (code, cfg->used_int_regs);
6275 /* Restore saved r7, restore LR to PC */
6276 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6278 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6282 cfg->code_len = code - cfg->native_code;
6284 g_assert (cfg->code_len < cfg->code_size);
6289 mono_arch_emit_exceptions (MonoCompile *cfg)
6291 MonoJumpInfo *patch_info;
6294 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6295 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6296 int max_epilog_size = 50;
6298 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6299 exc_throw_pos [i] = NULL;
6300 exc_throw_found [i] = 0;
6303 /* count the number of exception infos */
6306 * make sure we have enough space for exceptions
6308 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6309 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6310 i = mini_exception_id_by_name (patch_info->data.target);
6311 if (!exc_throw_found [i]) {
6312 max_epilog_size += 32;
6313 exc_throw_found [i] = TRUE;
6318 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6319 cfg->code_size *= 2;
6320 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6321 cfg->stat_code_reallocs++;
6324 code = cfg->native_code + cfg->code_len;
6326 /* add code to raise exceptions */
6327 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6328 switch (patch_info->type) {
6329 case MONO_PATCH_INFO_EXC: {
6330 MonoClass *exc_class;
6331 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6333 i = mini_exception_id_by_name (patch_info->data.target);
6334 if (exc_throw_pos [i]) {
6335 arm_patch (ip, exc_throw_pos [i]);
6336 patch_info->type = MONO_PATCH_INFO_NONE;
6339 exc_throw_pos [i] = code;
6341 arm_patch (ip, code);
6343 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6344 g_assert (exc_class);
6346 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6347 #ifdef USE_JUMP_TABLES
6349 gpointer *jte = mono_jumptable_add_entries (2);
6350 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6351 patch_info->data.name = "mono_arch_throw_corlib_exception";
6352 patch_info->ip.i = code - cfg->native_code;
6353 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6354 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6355 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6356 ARM_BLX_REG (code, ARMREG_IP);
6357 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6360 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6361 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6362 patch_info->data.name = "mono_arch_throw_corlib_exception";
6363 patch_info->ip.i = code - cfg->native_code;
6365 *(guint32*)(gpointer)code = exc_class->type_token;
6376 cfg->code_len = code - cfg->native_code;
6378 g_assert (cfg->code_len < cfg->code_size);
6382 #endif /* #ifndef DISABLE_JIT */
6385 mono_arch_finish_init (void)
6390 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6395 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6402 mono_arch_print_tree (MonoInst *tree, int arity)
6412 mono_arch_get_patch_offset (guint8 *code)
6419 mono_arch_flush_register_windows (void)
6426 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6428 int method_reg = mono_alloc_ireg (cfg);
6429 #ifdef USE_JUMP_TABLES
6430 int use_jumptables = TRUE;
6432 int use_jumptables = FALSE;
6435 if (cfg->compile_aot) {
6438 call->dynamic_imt_arg = TRUE;
6441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6443 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6444 ins->dreg = method_reg;
6445 ins->inst_p0 = call->method;
6446 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6447 MONO_ADD_INS (cfg->cbb, ins);
6449 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6450 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6451 /* Always pass in a register for simplicity */
6452 call->dynamic_imt_arg = TRUE;
6454 cfg->uses_rgctx_reg = TRUE;
6457 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6461 MONO_INST_NEW (cfg, ins, OP_PCONST);
6462 ins->inst_p0 = call->method;
6463 ins->dreg = method_reg;
6464 MONO_ADD_INS (cfg->cbb, ins);
6467 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6471 #endif /* DISABLE_JIT */
6474 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6476 #ifdef USE_JUMP_TABLES
6477 return (MonoMethod*)regs [ARMREG_V5];
6480 guint32 *code_ptr = (guint32*)code;
6482 method = GUINT_TO_POINTER (code_ptr [1]);
6486 return (MonoMethod*)regs [ARMREG_V5];
6488 /* The IMT value is stored in the code stream right after the LDC instruction. */
6489 /* This is no longer true for the gsharedvt_in trampoline */
6491 if (!IS_LDR_PC (code_ptr [0])) {
6492 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6493 g_assert (IS_LDR_PC (code_ptr [0]));
6497 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6498 return (MonoMethod*)regs [ARMREG_V5];
6500 return (MonoMethod*) method;
6505 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6507 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6510 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6511 #define BASE_SIZE (6 * 4)
6512 #define BSEARCH_ENTRY_SIZE (4 * 4)
6513 #define CMP_SIZE (3 * 4)
6514 #define BRANCH_SIZE (1 * 4)
6515 #define CALL_SIZE (2 * 4)
6516 #define WMC_SIZE (8 * 4)
6517 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6519 #ifdef USE_JUMP_TABLES
6521 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6523 g_assert (base [index] == NULL);
6524 base [index] = value;
6527 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6529 if (arm_is_imm12 (jti * 4)) {
6530 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6532 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6533 if ((jti * 4) >> 16)
6534 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6535 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6541 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6543 guint32 delta = DISTANCE (target, code);
6545 g_assert (delta >= 0 && delta <= 0xFFF);
6546 *target = *target | delta;
6552 #ifdef ENABLE_WRONG_METHOD_CHECK
6554 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6556 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6562 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6563 gpointer fail_tramp)
6566 arminstr_t *code, *start;
6567 #ifdef USE_JUMP_TABLES
6570 gboolean large_offsets = FALSE;
6571 guint32 **constant_pool_starts;
6572 arminstr_t *vtable_target = NULL;
6573 int extra_space = 0;
6575 #ifdef ENABLE_WRONG_METHOD_CHECK
6580 #ifdef USE_JUMP_TABLES
6581 for (i = 0; i < count; ++i) {
6582 MonoIMTCheckItem *item = imt_entries [i];
6583 item->chunk_size += 4 * 16;
6584 if (!item->is_equals)
6585 imt_entries [item->check_target_idx]->compare_done = TRUE;
6586 size += item->chunk_size;
6589 constant_pool_starts = g_new0 (guint32*, count);
6591 for (i = 0; i < count; ++i) {
6592 MonoIMTCheckItem *item = imt_entries [i];
6593 if (item->is_equals) {
6594 gboolean fail_case = !item->check_target_idx && fail_tramp;
6596 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6597 item->chunk_size += 32;
6598 large_offsets = TRUE;
6601 if (item->check_target_idx || fail_case) {
6602 if (!item->compare_done || fail_case)
6603 item->chunk_size += CMP_SIZE;
6604 item->chunk_size += BRANCH_SIZE;
6606 #ifdef ENABLE_WRONG_METHOD_CHECK
6607 item->chunk_size += WMC_SIZE;
6611 item->chunk_size += 16;
6612 large_offsets = TRUE;
6614 item->chunk_size += CALL_SIZE;
6616 item->chunk_size += BSEARCH_ENTRY_SIZE;
6617 imt_entries [item->check_target_idx]->compare_done = TRUE;
6619 size += item->chunk_size;
6623 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6627 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6629 code = mono_domain_code_reserve (domain, size);
6633 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6634 for (i = 0; i < count; ++i) {
6635 MonoIMTCheckItem *item = imt_entries [i];
6636 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6640 #ifdef USE_JUMP_TABLES
6641 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6642 /* If jumptables we always pass the IMT method in R5 */
6643 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6644 #define VTABLE_JTI 0
6645 #define IMT_METHOD_OFFSET 0
6646 #define TARGET_CODE_OFFSET 1
6647 #define JUMP_CODE_OFFSET 2
6648 #define RECORDS_PER_ENTRY 3
6649 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6650 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6651 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6653 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6654 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6655 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6656 set_jumptable_element (jte, VTABLE_JTI, vtable);
6659 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6661 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6662 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6663 vtable_target = code;
6664 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6666 if (mono_use_llvm) {
6667 /* LLVM always passes the IMT method in R5 */
6668 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6670 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6671 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6672 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6676 for (i = 0; i < count; ++i) {
6677 MonoIMTCheckItem *item = imt_entries [i];
6678 #ifdef USE_JUMP_TABLES
6679 guint32 imt_method_jti = 0, target_code_jti = 0;
6681 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6683 gint32 vtable_offset;
6685 item->code_target = (guint8*)code;
6687 if (item->is_equals) {
6688 gboolean fail_case = !item->check_target_idx && fail_tramp;
6690 if (item->check_target_idx || fail_case) {
6691 if (!item->compare_done || fail_case) {
6692 #ifdef USE_JUMP_TABLES
6693 imt_method_jti = IMT_METHOD_JTI (i);
6694 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6697 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6699 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6701 #ifdef USE_JUMP_TABLES
6702 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6703 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6704 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6706 item->jmp_code = (guint8*)code;
6707 ARM_B_COND (code, ARMCOND_NE, 0);
6710 /*Enable the commented code to assert on wrong method*/
6711 #ifdef ENABLE_WRONG_METHOD_CHECK
6712 #ifdef USE_JUMP_TABLES
6713 imt_method_jti = IMT_METHOD_JTI (i);
6714 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6717 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6719 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6721 ARM_B_COND (code, ARMCOND_EQ, 0);
6723 /* Define this if your system is so bad that gdb is failing. */
6724 #ifdef BROKEN_DEV_ENV
6725 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6727 arm_patch (code - 1, mini_dump_bad_imt);
6731 arm_patch (cond, code);
6735 if (item->has_target_code) {
6736 /* Load target address */
6737 #ifdef USE_JUMP_TABLES
6738 target_code_jti = TARGET_CODE_JTI (i);
6739 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6740 /* Restore registers */
6741 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6743 ARM_BX (code, ARMREG_R1);
6744 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6746 target_code_ins = code;
6747 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6748 /* Save it to the fourth slot */
6749 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6750 /* Restore registers and branch */
6751 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6753 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6756 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6757 if (!arm_is_imm12 (vtable_offset)) {
6759 * We need to branch to a computed address but we don't have
6760 * a free register to store it, since IP must contain the
6761 * vtable address. So we push the two values to the stack, and
6762 * load them both using LDM.
6764 /* Compute target address */
6765 #ifdef USE_JUMP_TABLES
6766 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6767 if (vtable_offset >> 16)
6768 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6769 /* IP had vtable base. */
6770 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6771 /* Restore registers and branch */
6772 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6773 ARM_BX (code, ARMREG_IP);
6775 vtable_offset_ins = code;
6776 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6777 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6778 /* Save it to the fourth slot */
6779 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6780 /* Restore registers and branch */
6781 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6783 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6786 #ifdef USE_JUMP_TABLES
6787 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6788 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6789 ARM_BX (code, ARMREG_IP);
6791 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6793 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6794 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6800 #ifdef USE_JUMP_TABLES
6801 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6802 target_code_jti = TARGET_CODE_JTI (i);
6803 /* Load target address */
6804 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6805 /* Restore registers */
6806 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6808 ARM_BX (code, ARMREG_R1);
6809 set_jumptable_element (jte, target_code_jti, fail_tramp);
6811 arm_patch (item->jmp_code, (guchar*)code);
6813 target_code_ins = code;
6814 /* Load target address */
6815 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6816 /* Save it to the fourth slot */
6817 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6818 /* Restore registers and branch */
6819 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6821 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6823 item->jmp_code = NULL;
6826 #ifdef USE_JUMP_TABLES
6828 set_jumptable_element (jte, imt_method_jti, item->key);
6831 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6833 /*must emit after unconditional branch*/
6834 if (vtable_target) {
6835 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6836 item->chunk_size += 4;
6837 vtable_target = NULL;
6840 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6841 constant_pool_starts [i] = code;
6843 code += extra_space;
6848 #ifdef USE_JUMP_TABLES
6849 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6850 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6851 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6852 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6853 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6855 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6856 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6858 item->jmp_code = (guint8*)code;
6859 ARM_B_COND (code, ARMCOND_HS, 0);
6865 for (i = 0; i < count; ++i) {
6866 MonoIMTCheckItem *item = imt_entries [i];
6867 if (item->jmp_code) {
6868 if (item->check_target_idx)
6869 #ifdef USE_JUMP_TABLES
6870 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6872 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6875 if (i > 0 && item->is_equals) {
6877 #ifdef USE_JUMP_TABLES
6878 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6879 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6881 arminstr_t *space_start = constant_pool_starts [i];
6882 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6883 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6891 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6892 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6897 #ifndef USE_JUMP_TABLES
6898 g_free (constant_pool_starts);
6901 mono_arch_flush_icache ((guint8*)start, size);
6902 mono_stats.imt_thunks_size += code - start;
6904 g_assert (DISTANCE (start, code) <= size);
6909 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6911 return ctx->regs [reg];
6915 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6917 ctx->regs [reg] = val;
6921 * mono_arch_get_trampolines:
6923 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6927 mono_arch_get_trampolines (gboolean aot)
6929 return mono_arm_get_exception_trampolines (aot);
6932 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6934 * mono_arch_set_breakpoint:
6936 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6937 * The location should contain code emitted by OP_SEQ_POINT.
6940 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6943 guint32 native_offset = ip - (guint8*)ji->code_start;
6944 MonoDebugOptions *opt = mini_get_debug_options ();
6946 if (opt->soft_breakpoints) {
6947 g_assert (!ji->from_aot);
6949 ARM_BLX_REG (code, ARMREG_LR);
6950 mono_arch_flush_icache (code - 4, 4);
6951 } else if (ji->from_aot) {
6952 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6954 g_assert (native_offset % 4 == 0);
6955 g_assert (info->bp_addrs [native_offset / 4] == 0);
6956 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6958 int dreg = ARMREG_LR;
6960 /* Read from another trigger page */
6961 #ifdef USE_JUMP_TABLES
6962 gpointer *jte = mono_jumptable_add_entry ();
6963 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6964 jte [0] = bp_trigger_page;
6966 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6968 *(int*)code = (int)bp_trigger_page;
6971 ARM_LDR_IMM (code, dreg, dreg, 0);
6973 mono_arch_flush_icache (code - 16, 16);
6976 /* This is currently implemented by emitting an SWI instruction, which
6977 * qemu/linux seems to convert to a SIGILL.
6979 *(int*)code = (0xef << 24) | 8;
6981 mono_arch_flush_icache (code - 4, 4);
6987 * mono_arch_clear_breakpoint:
6989 * Clear the breakpoint at IP.
6992 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6994 MonoDebugOptions *opt = mini_get_debug_options ();
6998 if (opt->soft_breakpoints) {
6999 g_assert (!ji->from_aot);
7002 mono_arch_flush_icache (code - 4, 4);
7003 } else if (ji->from_aot) {
7004 guint32 native_offset = ip - (guint8*)ji->code_start;
7005 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7007 g_assert (native_offset % 4 == 0);
7008 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7009 info->bp_addrs [native_offset / 4] = 0;
7011 for (i = 0; i < 4; ++i)
7014 mono_arch_flush_icache (ip, code - ip);
7019 * mono_arch_start_single_stepping:
7021 * Start single stepping.
7024 mono_arch_start_single_stepping (void)
7026 if (ss_trigger_page)
7027 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7033 * mono_arch_stop_single_stepping:
7035 * Stop single stepping.
7038 mono_arch_stop_single_stepping (void)
7040 if (ss_trigger_page)
7041 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7047 #define DBG_SIGNAL SIGBUS
7049 #define DBG_SIGNAL SIGSEGV
7053 * mono_arch_is_single_step_event:
7055 * Return whenever the machine state in SIGCTX corresponds to a single
7059 mono_arch_is_single_step_event (void *info, void *sigctx)
7061 siginfo_t *sinfo = info;
7063 if (!ss_trigger_page)
7066 /* Sometimes the address is off by 4 */
7067 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7074 * mono_arch_is_breakpoint_event:
7076 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7079 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7081 siginfo_t *sinfo = info;
7083 if (!ss_trigger_page)
7086 if (sinfo->si_signo == DBG_SIGNAL) {
7087 /* Sometimes the address is off by 4 */
7088 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7098 * mono_arch_skip_breakpoint:
7100 * See mini-amd64.c for docs.
7103 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7105 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7109 * mono_arch_skip_single_step:
7111 * See mini-amd64.c for docs.
7114 mono_arch_skip_single_step (MonoContext *ctx)
7116 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7119 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7122 * mono_arch_get_seq_point_info:
7124 * See mini-amd64.c for docs.
7127 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7132 // FIXME: Add a free function
7134 mono_domain_lock (domain);
7135 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7137 mono_domain_unlock (domain);
7140 ji = mono_jit_info_table_find (domain, (char*)code);
7143 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7145 info->ss_trigger_page = ss_trigger_page;
7146 info->bp_trigger_page = bp_trigger_page;
7148 mono_domain_lock (domain);
7149 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7151 mono_domain_unlock (domain);
7158 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7160 ext->lmf.previous_lmf = prev_lmf;
7161 /* Mark that this is a MonoLMFExt */
7162 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7163 ext->lmf.sp = (gssize)ext;
7167 * mono_arch_set_target:
7169 * Set the target architecture the JIT backend should generate code for, in the form
7170 * of a GNU target triplet. Only used in AOT mode.
7173 mono_arch_set_target (char *mtriple)
7175 /* The GNU target triple format is not very well documented */
7176 if (strstr (mtriple, "armv7")) {
7177 v5_supported = TRUE;
7178 v6_supported = TRUE;
7179 v7_supported = TRUE;
7181 if (strstr (mtriple, "armv6")) {
7182 v5_supported = TRUE;
7183 v6_supported = TRUE;
7185 if (strstr (mtriple, "armv7s")) {
7186 v7s_supported = TRUE;
7188 if (strstr (mtriple, "thumbv7s")) {
7189 v5_supported = TRUE;
7190 v6_supported = TRUE;
7191 v7_supported = TRUE;
7192 v7s_supported = TRUE;
7193 thumb_supported = TRUE;
7194 thumb2_supported = TRUE;
7196 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7197 v5_supported = TRUE;
7198 v6_supported = TRUE;
7199 thumb_supported = TRUE;
7202 if (strstr (mtriple, "gnueabi"))
7203 eabi_supported = TRUE;
7207 mono_arch_opcode_supported (int opcode)
7210 case OP_ATOMIC_ADD_I4:
7211 case OP_ATOMIC_EXCHANGE_I4:
7212 case OP_ATOMIC_CAS_I4:
7213 return v7_supported;
7219 #if defined(ENABLE_GSHAREDVT)
7221 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7223 #endif /* !MONOTOUCH */