2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 static gint lmf_tls_offset = -1;
90 static gint lmf_addr_tls_offset = -1;
92 /* This mutex protects architecture specific caches */
93 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
94 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
95 static CRITICAL_SECTION mini_arch_mutex;
97 static gboolean v5_supported = FALSE;
98 static gboolean v6_supported = FALSE;
99 static gboolean v7_supported = FALSE;
100 static gboolean v7s_supported = FALSE;
101 static gboolean thumb_supported = FALSE;
102 static gboolean thumb2_supported = FALSE;
104 * Whenever to use the ARM EABI
106 static gboolean eabi_supported = FALSE;
109 * Whenever to use the iphone ABI extensions:
110 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
111 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
112 * This is required for debugging/profiling tools to work, but it has some overhead so it should
113 * only be turned on in debug builds.
115 static gboolean iphone_abi = FALSE;
118 * The FPU we are generating code for. This is NOT runtime configurable right now,
119 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
121 static MonoArmFPU arm_fpu;
123 #if defined(ARM_FPU_VFP_HARD)
125 * On armhf, d0-d7 are used for argument passing and d8-d15
126 * must be preserved across calls, which leaves us no room
127 * for scratch registers. So we use d14-d15 but back up their
128 * previous contents to a stack slot before using them - see
129 * mono_arm_emit_vfp_scratch_save/_restore ().
131 static int vfp_scratch1 = ARM_VFP_D14;
132 static int vfp_scratch2 = ARM_VFP_D15;
135 * On armel, d0-d7 do not need to be preserved, so we can
136 * freely make use of them as scratch registers.
138 static int vfp_scratch1 = ARM_VFP_D0;
139 static int vfp_scratch2 = ARM_VFP_D1;
144 static volatile int ss_trigger_var = 0;
146 static gpointer single_step_func_wrapper;
147 static gpointer breakpoint_func_wrapper;
150 * The code generated for sequence points reads from this location, which is
151 * made read-only when single stepping is enabled.
153 static gpointer ss_trigger_page;
155 /* Enabled breakpoints read from this trigger page */
156 static gpointer bp_trigger_page;
158 /* Structure used by the sequence points in AOTed code */
160 gpointer ss_trigger_page;
161 gpointer bp_trigger_page;
162 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
167 * floating point support: on ARM it is a mess, there are at least 3
168 * different setups, each of which binary incompat with the other.
169 * 1) FPA: old and ugly, but unfortunately what current distros use
170 * the double binary format has the two words swapped. 8 double registers.
171 * Implemented usually by kernel emulation.
172 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
173 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
174 * 3) VFP: the new and actually sensible and useful FP support. Implemented
175 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
177 * We do not care about FPA. We will support soft float and VFP.
179 int mono_exc_esp_offset = 0;
181 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
182 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
183 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
185 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
186 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
187 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
189 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
190 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
191 //#define DEBUG_IMT 0
193 /* A variant of ARM_LDR_IMM which can handle large offsets */
194 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
195 if (arm_is_imm12 ((offset))) { \
196 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
198 g_assert ((scratch_reg) != (basereg)); \
199 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
200 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
204 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
205 if (arm_is_imm12 ((offset))) { \
206 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
208 g_assert ((scratch_reg) != (basereg)); \
209 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
210 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
214 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
217 mono_arch_regname (int reg)
219 static const char * rnames[] = {
220 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
221 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
222 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
225 if (reg >= 0 && reg < 16)
231 mono_arch_fregname (int reg)
233 static const char * rnames[] = {
234 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
235 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
236 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
237 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
238 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
239 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
242 if (reg >= 0 && reg < 32)
250 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
252 int imm8, rot_amount;
253 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
254 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
257 g_assert (dreg != sreg);
258 code = mono_arm_emit_load_imm (code, dreg, imm);
259 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
264 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
266 /* we can use r0-r3, since this is called only for incoming args on the stack */
267 if (size > sizeof (gpointer) * 4) {
269 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
270 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
271 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
272 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
273 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
274 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
275 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
276 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
277 ARM_B_COND (code, ARMCOND_NE, 0);
278 arm_patch (code - 4, start_loop);
281 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
282 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
284 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
285 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
291 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
292 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
293 doffset = soffset = 0;
295 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
296 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
302 g_assert (size == 0);
307 emit_call_reg (guint8 *code, int reg)
310 ARM_BLX_REG (code, reg);
312 #ifdef USE_JUMP_TABLES
313 g_assert_not_reached ();
315 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
319 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
325 emit_call_seq (MonoCompile *cfg, guint8 *code)
327 #ifdef USE_JUMP_TABLES
328 code = mono_arm_patchable_bl (code, ARMCOND_AL);
330 if (cfg->method->dynamic) {
331 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
333 *(gpointer*)code = NULL;
335 code = emit_call_reg (code, ARMREG_IP);
344 mono_arm_patchable_b (guint8 *code, int cond)
346 #ifdef USE_JUMP_TABLES
349 jte = mono_jumptable_add_entry ();
350 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
351 ARM_BX_COND (code, cond, ARMREG_IP);
353 ARM_B_COND (code, cond, 0);
359 mono_arm_patchable_bl (guint8 *code, int cond)
361 #ifdef USE_JUMP_TABLES
364 jte = mono_jumptable_add_entry ();
365 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
366 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
368 ARM_BL_COND (code, cond, 0);
373 #ifdef USE_JUMP_TABLES
375 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
377 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
378 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
383 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
385 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
386 ARM_LDR_IMM (code, reg, reg, 0);
392 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
394 switch (ins->opcode) {
397 case OP_FCALL_MEMBASE:
399 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
401 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
403 ARM_FMSR (code, ins->dreg, ARMREG_R0);
404 ARM_CVTS (code, ins->dreg, ins->dreg);
408 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
410 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
423 * Emit code to push an LMF structure on the LMF stack.
424 * On arm, this is intermixed with the initialization of other fields of the structure.
427 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
429 gboolean get_lmf_fast = FALSE;
432 #ifdef HAVE_AEABI_READ_TP
433 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
435 if (lmf_addr_tls_offset != -1) {
438 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
439 (gpointer)"__aeabi_read_tp");
440 code = emit_call_seq (cfg, code);
442 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
448 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
451 /* Inline mono_get_lmf_addr () */
452 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
454 /* Load mono_jit_tls_id */
456 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
457 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
459 *(gpointer*)code = NULL;
461 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
462 /* call pthread_getspecific () */
463 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
464 (gpointer)"pthread_getspecific");
465 code = emit_call_seq (cfg, code);
466 /* lmf_addr = &jit_tls->lmf */
467 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
468 g_assert (arm_is_imm8 (lmf_offset));
469 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
476 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
477 (gpointer)"mono_get_lmf_addr");
478 code = emit_call_seq (cfg, code);
480 /* we build the MonoLMF structure on the stack - see mini-arm.h */
481 /* lmf_offset is the offset from the previous stack pointer,
482 * alloc_size is the total stack space allocated, so the offset
483 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
484 * The pointer to the struct is put in r1 (new_lmf).
485 * ip is used as scratch
486 * The callee-saved registers are already in the MonoLMF structure
488 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
489 /* r0 is the result from mono_get_lmf_addr () */
490 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
491 /* new_lmf->previous_lmf = *lmf_addr */
492 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
493 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
494 /* *(lmf_addr) = r1 */
495 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
496 /* Skip method (only needed for trampoline LMF frames) */
497 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
498 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
499 /* save the current IP */
500 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
501 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
503 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
504 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
515 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
519 for (list = inst->float_args; list; list = list->next) {
520 FloatArgData *fad = list->data;
521 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
522 gboolean imm = arm_is_fpimm8 (var->inst_offset);
524 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
530 if (*offset + *max_len > cfg->code_size) {
531 cfg->code_size += *max_len;
532 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
534 code = cfg->native_code + *offset;
538 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
539 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
541 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
543 *offset = code - cfg->native_code;
550 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
554 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
556 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
559 if (!arm_is_fpimm8 (inst->inst_offset)) {
560 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
561 ARM_FSTD (code, reg, ARMREG_LR, 0);
563 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
570 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
574 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
576 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
579 if (!arm_is_fpimm8 (inst->inst_offset)) {
580 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
581 ARM_FLDD (code, reg, ARMREG_LR, 0);
583 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
592 * Emit code to pop an LMF structure from the LMF stack.
595 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
599 if (lmf_offset < 32) {
600 basereg = cfg->frame_reg;
605 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
608 /* ip = previous_lmf */
609 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
611 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
612 /* *(lmf_addr) = previous_lmf */
613 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
618 #endif /* #ifndef DISABLE_JIT */
621 * mono_arch_get_argument_info:
622 * @csig: a method signature
623 * @param_count: the number of parameters to consider
624 * @arg_info: an array to store the result infos
626 * Gathers information on parameters such as size, alignment and
627 * padding. arg_info should be large enought to hold param_count + 1 entries.
629 * Returns the size of the activation frame.
632 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
634 int k, frame_size = 0;
635 guint32 size, align, pad;
639 t = mini_type_get_underlying_type (gsctx, csig->ret);
640 if (MONO_TYPE_ISSTRUCT (t)) {
641 frame_size += sizeof (gpointer);
645 arg_info [0].offset = offset;
648 frame_size += sizeof (gpointer);
652 arg_info [0].size = frame_size;
654 for (k = 0; k < param_count; k++) {
655 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
657 /* ignore alignment for now */
660 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
661 arg_info [k].pad = pad;
663 arg_info [k + 1].pad = 0;
664 arg_info [k + 1].size = size;
666 arg_info [k + 1].offset = offset;
670 align = MONO_ARCH_FRAME_ALIGNMENT;
671 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
672 arg_info [k].pad = pad;
677 #define MAX_ARCH_DELEGATE_PARAMS 3
680 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
682 guint8 *code, *start;
685 start = code = mono_global_codeman_reserve (12);
687 /* Replace the this argument with the target */
688 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
689 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
690 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
692 g_assert ((code - start) <= 12);
694 mono_arch_flush_icache (start, 12);
698 size = 8 + param_count * 4;
699 start = code = mono_global_codeman_reserve (size);
701 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
702 /* slide down the arguments */
703 for (i = 0; i < param_count; ++i) {
704 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
706 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
708 g_assert ((code - start) <= size);
710 mono_arch_flush_icache (start, size);
714 *code_size = code - start;
720 * mono_arch_get_delegate_invoke_impls:
722 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
726 mono_arch_get_delegate_invoke_impls (void)
734 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
735 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
737 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
738 code = get_delegate_invoke_impl (FALSE, i, &code_len);
739 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
740 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
748 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
750 guint8 *code, *start;
752 /* FIXME: Support more cases */
753 if (MONO_TYPE_ISSTRUCT (sig->ret))
757 static guint8* cached = NULL;
758 mono_mini_arch_lock ();
760 mono_mini_arch_unlock ();
765 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
767 start = get_delegate_invoke_impl (TRUE, 0, NULL);
769 mono_mini_arch_unlock ();
772 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
775 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
777 for (i = 0; i < sig->param_count; ++i)
778 if (!mono_is_regsize_var (sig->params [i]))
781 mono_mini_arch_lock ();
782 code = cache [sig->param_count];
784 mono_mini_arch_unlock ();
789 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
790 start = mono_aot_get_trampoline (name);
793 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
795 cache [sig->param_count] = start;
796 mono_mini_arch_unlock ();
804 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
806 return (gpointer)regs [ARMREG_R0];
810 * Initialize the cpu to execute managed code.
813 mono_arch_cpu_init (void)
815 #if defined(__APPLE__)
818 i8_align = __alignof__ (gint64);
823 create_function_wrapper (gpointer function)
825 guint8 *start, *code;
827 start = code = mono_global_codeman_reserve (96);
830 * Construct the MonoContext structure on the stack.
833 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
835 /* save ip, lr and pc into their correspodings ctx.regs slots. */
836 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
837 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
838 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
840 /* save r0..r10 and fp */
841 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
842 ARM_STM (code, ARMREG_IP, 0x0fff);
844 /* now we can update fp. */
845 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
847 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
848 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
849 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
850 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
852 /* make ctx.eip hold the address of the call. */
853 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
854 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
856 /* r0 now points to the MonoContext */
857 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
860 #ifdef USE_JUMP_TABLES
862 gpointer *jte = mono_jumptable_add_entry ();
863 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
867 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
869 *(gpointer*)code = function;
872 ARM_BLX_REG (code, ARMREG_IP);
874 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
875 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
876 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
877 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
879 /* make ip point to the regs array, then restore everything, including pc. */
880 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
881 ARM_LDM (code, ARMREG_IP, 0xffff);
883 mono_arch_flush_icache (start, code - start);
889 * Initialize architecture specific code.
892 mono_arch_init (void)
894 const char *cpu_arch;
896 InitializeCriticalSection (&mini_arch_mutex);
897 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
898 if (mini_get_debug_options ()->soft_breakpoints) {
899 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
900 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
905 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
906 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
907 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
910 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
911 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
912 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
913 #if defined(ENABLE_GSHAREDVT)
914 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
917 #if defined(__ARM_EABI__)
918 eabi_supported = TRUE;
921 #if defined(ARM_FPU_VFP_HARD)
922 arm_fpu = MONO_ARM_FPU_VFP_HARD;
924 arm_fpu = MONO_ARM_FPU_VFP;
926 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
927 /* If we're compiling with a soft float fallback and it
928 turns out that no VFP unit is available, we need to
929 switch to soft float. We don't do this for iOS, since
930 iOS devices always have a VFP unit. */
931 if (!mono_hwcap_arm_has_vfp)
932 arm_fpu = MONO_ARM_FPU_NONE;
936 v5_supported = mono_hwcap_arm_is_v5;
937 v6_supported = mono_hwcap_arm_is_v6;
938 v7_supported = mono_hwcap_arm_is_v7;
939 v7s_supported = mono_hwcap_arm_is_v7s;
941 #if defined(__APPLE__)
942 /* iOS is special-cased here because we don't yet
943 have a way to properly detect CPU features on it. */
944 thumb_supported = TRUE;
947 thumb_supported = mono_hwcap_arm_has_thumb;
948 thumb2_supported = mono_hwcap_arm_has_thumb2;
951 /* Format: armv(5|6|7[s])[-thumb[2]] */
952 cpu_arch = g_getenv ("MONO_CPU_ARCH");
954 /* Do this here so it overrides any detection. */
956 if (strncmp (cpu_arch, "armv", 4) == 0) {
957 v5_supported = cpu_arch [4] >= '5';
958 v6_supported = cpu_arch [4] >= '6';
959 v7_supported = cpu_arch [4] >= '7';
960 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
963 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
964 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
969 * Cleanup architecture specific code.
972 mono_arch_cleanup (void)
977 * This function returns the optimizations supported on this cpu.
980 mono_arch_cpu_optimizations (guint32 *exclude_mask)
982 /* no arm-specific optimizations yet */
988 * This function test for all SIMD functions supported.
990 * Returns a bitmask corresponding to all supported versions.
994 mono_arch_cpu_enumerate_simd_versions (void)
996 /* SIMD is currently unimplemented */
1004 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1006 if (v7s_supported) {
1020 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1022 mono_arch_is_soft_float (void)
1024 return arm_fpu == MONO_ARM_FPU_NONE;
1029 mono_arm_is_hard_float (void)
1031 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1035 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1038 t = mini_type_get_underlying_type (gsctx, t);
1045 case MONO_TYPE_FNPTR:
1047 case MONO_TYPE_OBJECT:
1048 case MONO_TYPE_STRING:
1049 case MONO_TYPE_CLASS:
1050 case MONO_TYPE_SZARRAY:
1051 case MONO_TYPE_ARRAY:
1053 case MONO_TYPE_GENERICINST:
1054 if (!mono_type_generic_inst_is_valuetype (t))
1057 case MONO_TYPE_VALUETYPE:
1064 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1069 for (i = 0; i < cfg->num_varinfo; i++) {
1070 MonoInst *ins = cfg->varinfo [i];
1071 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1074 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1077 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1080 /* we can only allocate 32 bit values */
1081 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1082 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1083 g_assert (i == vmv->idx);
1084 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1091 #define USE_EXTRA_TEMPS 0
1094 mono_arch_get_global_int_regs (MonoCompile *cfg)
1098 mono_arch_compute_omit_fp (cfg);
1101 * FIXME: Interface calls might go through a static rgctx trampoline which
1102 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1105 if (cfg->flags & MONO_CFG_HAS_CALLS)
1106 cfg->uses_rgctx_reg = TRUE;
1108 if (cfg->arch.omit_fp)
1109 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1110 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1111 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1112 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1114 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1118 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1119 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1120 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1121 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1122 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1128 * mono_arch_regalloc_cost:
1130 * Return the cost, in number of memory references, of the action of
1131 * allocating the variable VMV into a register during global register
1135 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1141 #endif /* #ifndef DISABLE_JIT */
1143 #ifndef __GNUC_PREREQ
1144 #define __GNUC_PREREQ(maj, min) (0)
1148 mono_arch_flush_icache (guint8 *code, gint size)
1150 #if defined(__native_client__)
1151 // For Native Client we don't have to flush i-cache here,
1152 // as it's being done by dyncode interface.
1155 #ifdef MONO_CROSS_COMPILE
1157 sys_icache_invalidate (code, size);
1158 #elif __GNUC_PREREQ(4, 1)
1159 __clear_cache (code, code + size);
1160 #elif defined(PLATFORM_ANDROID)
1161 const int syscall = 0xf0002;
1169 : "r" (code), "r" (code + size), "r" (syscall)
1170 : "r0", "r1", "r7", "r2"
1173 __asm __volatile ("mov r0, %0\n"
1176 "swi 0x9f0002 @ sys_cacheflush"
1178 : "r" (code), "r" (code + size), "r" (0)
1179 : "r0", "r1", "r3" );
1181 #endif /* !__native_client__ */
1192 RegTypeStructByAddr,
1193 /* gsharedvt argument passed by addr in greg */
1194 RegTypeGSharedVtInReg,
1195 /* gsharedvt argument passed by addr on stack */
1196 RegTypeGSharedVtOnStack,
1201 guint16 vtsize; /* in param area */
1205 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1210 guint32 stack_usage;
1211 gboolean vtype_retaddr;
1212 /* The index of the vret arg in the argument list */
1222 /*#define __alignof__(a) sizeof(a)*/
1223 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1226 #define PARAM_REGS 4
1229 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1232 if (*gr > ARMREG_R3) {
1234 ainfo->offset = *stack_size;
1235 ainfo->reg = ARMREG_SP; /* in the caller */
1236 ainfo->storage = RegTypeBase;
1239 ainfo->storage = RegTypeGeneral;
1246 split = i8_align == 4;
1251 if (*gr == ARMREG_R3 && split) {
1252 /* first word in r3 and the second on the stack */
1253 ainfo->offset = *stack_size;
1254 ainfo->reg = ARMREG_SP; /* in the caller */
1255 ainfo->storage = RegTypeBaseGen;
1257 } else if (*gr >= ARMREG_R3) {
1258 if (eabi_supported) {
1259 /* darwin aligns longs to 4 byte only */
1260 if (i8_align == 8) {
1265 ainfo->offset = *stack_size;
1266 ainfo->reg = ARMREG_SP; /* in the caller */
1267 ainfo->storage = RegTypeBase;
1270 if (eabi_supported) {
1271 if (i8_align == 8 && ((*gr) & 1))
1274 ainfo->storage = RegTypeIRegPair;
1283 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1286 * If we're calling a function like this:
1288 * void foo(float a, double b, float c)
1290 * We pass a in s0 and b in d1. That leaves us
1291 * with s1 being unused. The armhf ABI recognizes
1292 * this and requires register assignment to then
1293 * use that for the next single-precision arg,
1294 * i.e. c in this example. So float_spare either
1295 * tells us which reg to use for the next single-
1296 * precision arg, or it's -1, meaning use *fpr.
1298 * Note that even though most of the JIT speaks
1299 * double-precision, fpr represents single-
1300 * precision registers.
1302 * See parts 5.5 and 6.1.2 of the AAPCS for how
1306 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1307 ainfo->storage = RegTypeFP;
1311 * If we're passing a double-precision value
1312 * and *fpr is odd (e.g. it's s1, s3, ...)
1313 * we need to use the next even register. So
1314 * we mark the current *fpr as a spare that
1315 * can be used for the next single-precision
1319 *float_spare = *fpr;
1324 * At this point, we have an even register
1325 * so we assign that and move along.
1329 } else if (*float_spare >= 0) {
1331 * We're passing a single-precision value
1332 * and it looks like a spare single-
1333 * precision register is available. Let's
1337 ainfo->reg = *float_spare;
1341 * If we hit this branch, we're passing a
1342 * single-precision value and we can simply
1343 * use the next available register.
1351 * We've exhausted available floating point
1352 * regs, so pass the rest on the stack.
1360 ainfo->offset = *stack_size;
1361 ainfo->reg = ARMREG_SP;
1362 ainfo->storage = RegTypeBase;
1369 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1371 guint i, gr, fpr, pstart;
1373 int n = sig->hasthis + sig->param_count;
1374 MonoType *simpletype;
1375 guint32 stack_size = 0;
1377 gboolean is_pinvoke = sig->pinvoke;
1381 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1383 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1390 t = mini_type_get_underlying_type (gsctx, sig->ret);
1391 if (MONO_TYPE_ISSTRUCT (t)) {
1394 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1395 cinfo->ret.storage = RegTypeStructByVal;
1397 cinfo->vtype_retaddr = TRUE;
1399 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1400 cinfo->vtype_retaddr = TRUE;
1406 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1407 * the first argument, allowing 'this' to be always passed in the first arg reg.
1408 * Also do this if the first argument is a reference type, since virtual calls
1409 * are sometimes made using calli without sig->hasthis set, like in the delegate
1412 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1414 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1416 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1420 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1421 cinfo->vret_arg_index = 1;
1425 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1429 if (cinfo->vtype_retaddr)
1430 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1433 DEBUG(printf("params: %d\n", sig->param_count));
1434 for (i = pstart; i < sig->param_count; ++i) {
1435 ArgInfo *ainfo = &cinfo->args [n];
1437 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1438 /* Prevent implicit arguments and sig_cookie from
1439 being passed in registers */
1442 /* Emit the signature cookie just before the implicit arguments */
1443 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1445 DEBUG(printf("param %d: ", i));
1446 if (sig->params [i]->byref) {
1447 DEBUG(printf("byref\n"));
1448 add_general (&gr, &stack_size, ainfo, TRUE);
1452 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1453 switch (simpletype->type) {
1454 case MONO_TYPE_BOOLEAN:
1457 cinfo->args [n].size = 1;
1458 add_general (&gr, &stack_size, ainfo, TRUE);
1461 case MONO_TYPE_CHAR:
1464 cinfo->args [n].size = 2;
1465 add_general (&gr, &stack_size, ainfo, TRUE);
1470 cinfo->args [n].size = 4;
1471 add_general (&gr, &stack_size, ainfo, TRUE);
1477 case MONO_TYPE_FNPTR:
1478 case MONO_TYPE_CLASS:
1479 case MONO_TYPE_OBJECT:
1480 case MONO_TYPE_STRING:
1481 case MONO_TYPE_SZARRAY:
1482 case MONO_TYPE_ARRAY:
1483 cinfo->args [n].size = sizeof (gpointer);
1484 add_general (&gr, &stack_size, ainfo, TRUE);
1487 case MONO_TYPE_GENERICINST:
1488 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1489 cinfo->args [n].size = sizeof (gpointer);
1490 add_general (&gr, &stack_size, ainfo, TRUE);
1494 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1495 /* gsharedvt arguments are passed by ref */
1496 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1497 add_general (&gr, &stack_size, ainfo, TRUE);
1498 switch (ainfo->storage) {
1499 case RegTypeGeneral:
1500 ainfo->storage = RegTypeGSharedVtInReg;
1503 ainfo->storage = RegTypeGSharedVtOnStack;
1506 g_assert_not_reached ();
1512 case MONO_TYPE_TYPEDBYREF:
1513 case MONO_TYPE_VALUETYPE: {
1519 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1520 size = sizeof (MonoTypedRef);
1521 align = sizeof (gpointer);
1523 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1525 size = mono_class_native_size (klass, &align);
1527 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1529 DEBUG(printf ("load %d bytes struct\n", size));
1532 align_size += (sizeof (gpointer) - 1);
1533 align_size &= ~(sizeof (gpointer) - 1);
1534 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1535 ainfo->storage = RegTypeStructByVal;
1536 ainfo->struct_size = size;
1537 /* FIXME: align stack_size if needed */
1538 if (eabi_supported) {
1539 if (align >= 8 && (gr & 1))
1542 if (gr > ARMREG_R3) {
1544 ainfo->vtsize = nwords;
1546 int rest = ARMREG_R3 - gr + 1;
1547 int n_in_regs = rest >= nwords? nwords: rest;
1549 ainfo->size = n_in_regs;
1550 ainfo->vtsize = nwords - n_in_regs;
1553 nwords -= n_in_regs;
1555 ainfo->offset = stack_size;
1556 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1557 stack_size += nwords * sizeof (gpointer);
1564 add_general (&gr, &stack_size, ainfo, FALSE);
1571 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1573 add_general (&gr, &stack_size, ainfo, TRUE);
1581 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1583 add_general (&gr, &stack_size, ainfo, FALSE);
1588 case MONO_TYPE_MVAR:
1589 /* gsharedvt arguments are passed by ref */
1590 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1591 add_general (&gr, &stack_size, ainfo, TRUE);
1592 switch (ainfo->storage) {
1593 case RegTypeGeneral:
1594 ainfo->storage = RegTypeGSharedVtInReg;
1597 ainfo->storage = RegTypeGSharedVtOnStack;
1600 g_assert_not_reached ();
1605 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1609 /* Handle the case where there are no implicit arguments */
1610 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1611 /* Prevent implicit arguments and sig_cookie from
1612 being passed in registers */
1615 /* Emit the signature cookie just before the implicit arguments */
1616 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1620 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1621 switch (simpletype->type) {
1622 case MONO_TYPE_BOOLEAN:
1627 case MONO_TYPE_CHAR:
1633 case MONO_TYPE_FNPTR:
1634 case MONO_TYPE_CLASS:
1635 case MONO_TYPE_OBJECT:
1636 case MONO_TYPE_SZARRAY:
1637 case MONO_TYPE_ARRAY:
1638 case MONO_TYPE_STRING:
1639 cinfo->ret.storage = RegTypeGeneral;
1640 cinfo->ret.reg = ARMREG_R0;
1644 cinfo->ret.storage = RegTypeIRegPair;
1645 cinfo->ret.reg = ARMREG_R0;
1649 cinfo->ret.storage = RegTypeFP;
1651 if (IS_HARD_FLOAT) {
1652 cinfo->ret.reg = ARM_VFP_F0;
1654 cinfo->ret.reg = ARMREG_R0;
1658 case MONO_TYPE_GENERICINST:
1659 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1660 cinfo->ret.storage = RegTypeGeneral;
1661 cinfo->ret.reg = ARMREG_R0;
1664 // FIXME: Only for variable types
1665 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1666 cinfo->ret.storage = RegTypeStructByAddr;
1667 g_assert (cinfo->vtype_retaddr);
1671 case MONO_TYPE_VALUETYPE:
1672 case MONO_TYPE_TYPEDBYREF:
1673 if (cinfo->ret.storage != RegTypeStructByVal)
1674 cinfo->ret.storage = RegTypeStructByAddr;
1677 case MONO_TYPE_MVAR:
1678 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1679 cinfo->ret.storage = RegTypeStructByAddr;
1680 g_assert (cinfo->vtype_retaddr);
1682 case MONO_TYPE_VOID:
1685 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1689 /* align stack size to 8 */
1690 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1691 stack_size = (stack_size + 7) & ~7;
1693 cinfo->stack_usage = stack_size;
1699 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1701 MonoType *callee_ret;
1705 if (cfg->compile_aot && !cfg->full_aot)
1706 /* OP_TAILCALL doesn't work with AOT */
1709 c1 = get_call_info (NULL, NULL, caller_sig);
1710 c2 = get_call_info (NULL, NULL, callee_sig);
1713 * Tail calls with more callee stack usage than the caller cannot be supported, since
1714 * the extra stack space would be left on the stack after the tail call.
1716 res = c1->stack_usage >= c2->stack_usage;
1717 callee_ret = mini_replace_type (callee_sig->ret);
1718 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1719 /* An address on the callee's stack is passed as the first argument */
1722 if (c2->stack_usage > 16 * 4)
1734 debug_omit_fp (void)
1737 return mono_debug_count ();
1744 * mono_arch_compute_omit_fp:
1746 * Determine whenever the frame pointer can be eliminated.
1749 mono_arch_compute_omit_fp (MonoCompile *cfg)
1751 MonoMethodSignature *sig;
1752 MonoMethodHeader *header;
1756 if (cfg->arch.omit_fp_computed)
1759 header = cfg->header;
1761 sig = mono_method_signature (cfg->method);
1763 if (!cfg->arch.cinfo)
1764 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1765 cinfo = cfg->arch.cinfo;
1768 * FIXME: Remove some of the restrictions.
1770 cfg->arch.omit_fp = TRUE;
1771 cfg->arch.omit_fp_computed = TRUE;
1773 if (cfg->disable_omit_fp)
1774 cfg->arch.omit_fp = FALSE;
1775 if (!debug_omit_fp ())
1776 cfg->arch.omit_fp = FALSE;
1778 if (cfg->method->save_lmf)
1779 cfg->arch.omit_fp = FALSE;
1781 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1782 cfg->arch.omit_fp = FALSE;
1783 if (header->num_clauses)
1784 cfg->arch.omit_fp = FALSE;
1785 if (cfg->param_area)
1786 cfg->arch.omit_fp = FALSE;
1787 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1788 cfg->arch.omit_fp = FALSE;
1789 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1790 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1791 cfg->arch.omit_fp = FALSE;
1792 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1793 ArgInfo *ainfo = &cinfo->args [i];
1795 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1797 * The stack offset can only be determined when the frame
1800 cfg->arch.omit_fp = FALSE;
1805 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1806 MonoInst *ins = cfg->varinfo [i];
1809 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1814 * Set var information according to the calling convention. arm version.
1815 * The locals var stuff should most likely be split in another method.
1818 mono_arch_allocate_vars (MonoCompile *cfg)
1820 MonoMethodSignature *sig;
1821 MonoMethodHeader *header;
1823 int i, offset, size, align, curinst;
1827 sig = mono_method_signature (cfg->method);
1829 if (!cfg->arch.cinfo)
1830 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1831 cinfo = cfg->arch.cinfo;
1833 mono_arch_compute_omit_fp (cfg);
1835 if (cfg->arch.omit_fp)
1836 cfg->frame_reg = ARMREG_SP;
1838 cfg->frame_reg = ARMREG_FP;
1840 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1842 /* allow room for the vararg method args: void* and long/double */
1843 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1844 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1846 header = cfg->header;
1848 /* See mono_arch_get_global_int_regs () */
1849 if (cfg->flags & MONO_CFG_HAS_CALLS)
1850 cfg->uses_rgctx_reg = TRUE;
1852 if (cfg->frame_reg != ARMREG_SP)
1853 cfg->used_int_regs |= 1 << cfg->frame_reg;
1855 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1856 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1857 cfg->used_int_regs |= (1 << ARMREG_V5);
1861 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1862 if (sig->ret->type != MONO_TYPE_VOID) {
1863 cfg->ret->opcode = OP_REGVAR;
1864 cfg->ret->inst_c0 = ARMREG_R0;
1867 /* local vars are at a positive offset from the stack pointer */
1869 * also note that if the function uses alloca, we use FP
1870 * to point at the local variables.
1872 offset = 0; /* linkage area */
1873 /* align the offset to 16 bytes: not sure this is needed here */
1875 //offset &= ~(8 - 1);
1877 /* add parameter area size for called functions */
1878 offset += cfg->param_area;
1881 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1884 /* allow room to save the return value */
1885 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1888 /* the MonoLMF structure is stored just below the stack pointer */
1889 if (cinfo->ret.storage == RegTypeStructByVal) {
1890 cfg->ret->opcode = OP_REGOFFSET;
1891 cfg->ret->inst_basereg = cfg->frame_reg;
1892 offset += sizeof (gpointer) - 1;
1893 offset &= ~(sizeof (gpointer) - 1);
1894 cfg->ret->inst_offset = - offset;
1895 offset += sizeof(gpointer);
1896 } else if (cinfo->vtype_retaddr) {
1897 ins = cfg->vret_addr;
1898 offset += sizeof(gpointer) - 1;
1899 offset &= ~(sizeof(gpointer) - 1);
1900 ins->inst_offset = offset;
1901 ins->opcode = OP_REGOFFSET;
1902 ins->inst_basereg = cfg->frame_reg;
1903 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1904 printf ("vret_addr =");
1905 mono_print_ins (cfg->vret_addr);
1907 offset += sizeof(gpointer);
1910 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1911 if (cfg->arch.seq_point_info_var) {
1914 ins = cfg->arch.seq_point_info_var;
1918 offset += align - 1;
1919 offset &= ~(align - 1);
1920 ins->opcode = OP_REGOFFSET;
1921 ins->inst_basereg = cfg->frame_reg;
1922 ins->inst_offset = offset;
1925 ins = cfg->arch.ss_trigger_page_var;
1928 offset += align - 1;
1929 offset &= ~(align - 1);
1930 ins->opcode = OP_REGOFFSET;
1931 ins->inst_basereg = cfg->frame_reg;
1932 ins->inst_offset = offset;
1936 if (cfg->arch.seq_point_read_var) {
1939 ins = cfg->arch.seq_point_read_var;
1943 offset += align - 1;
1944 offset &= ~(align - 1);
1945 ins->opcode = OP_REGOFFSET;
1946 ins->inst_basereg = cfg->frame_reg;
1947 ins->inst_offset = offset;
1950 ins = cfg->arch.seq_point_ss_method_var;
1953 offset += align - 1;
1954 offset &= ~(align - 1);
1955 ins->opcode = OP_REGOFFSET;
1956 ins->inst_basereg = cfg->frame_reg;
1957 ins->inst_offset = offset;
1960 ins = cfg->arch.seq_point_bp_method_var;
1963 offset += align - 1;
1964 offset &= ~(align - 1);
1965 ins->opcode = OP_REGOFFSET;
1966 ins->inst_basereg = cfg->frame_reg;
1967 ins->inst_offset = offset;
1971 cfg->locals_min_stack_offset = offset;
1973 curinst = cfg->locals_start;
1974 for (i = curinst; i < cfg->num_varinfo; ++i) {
1977 ins = cfg->varinfo [i];
1978 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1981 t = ins->inst_vtype;
1982 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1985 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1986 * pinvoke wrappers when they call functions returning structure */
1987 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1988 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
1992 size = mono_type_size (t, &align);
1994 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1995 * since it loads/stores misaligned words, which don't do the right thing.
1997 if (align < 4 && size >= 4)
1999 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2000 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2001 offset += align - 1;
2002 offset &= ~(align - 1);
2003 ins->opcode = OP_REGOFFSET;
2004 ins->inst_offset = offset;
2005 ins->inst_basereg = cfg->frame_reg;
2007 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2010 cfg->locals_max_stack_offset = offset;
2014 ins = cfg->args [curinst];
2015 if (ins->opcode != OP_REGVAR) {
2016 ins->opcode = OP_REGOFFSET;
2017 ins->inst_basereg = cfg->frame_reg;
2018 offset += sizeof (gpointer) - 1;
2019 offset &= ~(sizeof (gpointer) - 1);
2020 ins->inst_offset = offset;
2021 offset += sizeof (gpointer);
2026 if (sig->call_convention == MONO_CALL_VARARG) {
2030 /* Allocate a local slot to hold the sig cookie address */
2031 offset += align - 1;
2032 offset &= ~(align - 1);
2033 cfg->sig_cookie = offset;
2037 for (i = 0; i < sig->param_count; ++i) {
2038 ins = cfg->args [curinst];
2040 if (ins->opcode != OP_REGVAR) {
2041 ins->opcode = OP_REGOFFSET;
2042 ins->inst_basereg = cfg->frame_reg;
2043 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2045 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2046 * since it loads/stores misaligned words, which don't do the right thing.
2048 if (align < 4 && size >= 4)
2050 /* The code in the prolog () stores words when storing vtypes received in a register */
2051 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2053 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2054 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2055 offset += align - 1;
2056 offset &= ~(align - 1);
2057 ins->inst_offset = offset;
2063 /* align the offset to 8 bytes */
2064 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2065 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2070 cfg->stack_offset = offset;
2074 mono_arch_create_vars (MonoCompile *cfg)
2076 MonoMethodSignature *sig;
2080 sig = mono_method_signature (cfg->method);
2082 if (!cfg->arch.cinfo)
2083 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2084 cinfo = cfg->arch.cinfo;
2086 if (IS_HARD_FLOAT) {
2087 for (i = 0; i < 2; i++) {
2088 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2089 inst->flags |= MONO_INST_VOLATILE;
2091 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2095 if (cinfo->ret.storage == RegTypeStructByVal)
2096 cfg->ret_var_is_local = TRUE;
2098 if (cinfo->vtype_retaddr) {
2099 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2100 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2101 printf ("vret_addr = ");
2102 mono_print_ins (cfg->vret_addr);
2106 if (cfg->gen_seq_points) {
2107 if (cfg->soft_breakpoints) {
2108 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2109 ins->flags |= MONO_INST_VOLATILE;
2110 cfg->arch.seq_point_read_var = ins;
2112 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2113 ins->flags |= MONO_INST_VOLATILE;
2114 cfg->arch.seq_point_ss_method_var = ins;
2116 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2117 ins->flags |= MONO_INST_VOLATILE;
2118 cfg->arch.seq_point_bp_method_var = ins;
2120 g_assert (!cfg->compile_aot);
2121 } else if (cfg->compile_aot) {
2122 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2123 ins->flags |= MONO_INST_VOLATILE;
2124 cfg->arch.seq_point_info_var = ins;
2126 /* Allocate a separate variable for this to save 1 load per seq point */
2127 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2128 ins->flags |= MONO_INST_VOLATILE;
2129 cfg->arch.ss_trigger_page_var = ins;
2135 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2137 MonoMethodSignature *tmp_sig;
2140 if (call->tail_call)
2143 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2146 * mono_ArgIterator_Setup assumes the signature cookie is
2147 * passed first and all the arguments which were before it are
2148 * passed on the stack after the signature. So compensate by
2149 * passing a different signature.
2151 tmp_sig = mono_metadata_signature_dup (call->signature);
2152 tmp_sig->param_count -= call->signature->sentinelpos;
2153 tmp_sig->sentinelpos = 0;
2154 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2156 sig_reg = mono_alloc_ireg (cfg);
2157 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2159 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2164 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2169 LLVMCallInfo *linfo;
2171 n = sig->param_count + sig->hasthis;
2173 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2175 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2178 * LLVM always uses the native ABI while we use our own ABI, the
2179 * only difference is the handling of vtypes:
2180 * - we only pass/receive them in registers in some cases, and only
2181 * in 1 or 2 integer registers.
2183 if (cinfo->vtype_retaddr) {
2184 /* Vtype returned using a hidden argument */
2185 linfo->ret.storage = LLVMArgVtypeRetAddr;
2186 linfo->vret_arg_index = cinfo->vret_arg_index;
2187 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2188 cfg->exception_message = g_strdup ("unknown ret conv");
2189 cfg->disable_llvm = TRUE;
2193 for (i = 0; i < n; ++i) {
2194 ainfo = cinfo->args + i;
2196 linfo->args [i].storage = LLVMArgNone;
2198 switch (ainfo->storage) {
2199 case RegTypeGeneral:
2200 case RegTypeIRegPair:
2202 linfo->args [i].storage = LLVMArgInIReg;
2204 case RegTypeStructByVal:
2205 // FIXME: Passing entirely on the stack or split reg/stack
2206 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2207 linfo->args [i].storage = LLVMArgVtypeInReg;
2208 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2209 if (ainfo->size == 2)
2210 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2212 linfo->args [i].pair_storage [1] = LLVMArgNone;
2214 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2215 cfg->disable_llvm = TRUE;
2219 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2220 cfg->disable_llvm = TRUE;
2230 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2233 MonoMethodSignature *sig;
2237 sig = call->signature;
2238 n = sig->param_count + sig->hasthis;
2240 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2242 for (i = 0; i < n; ++i) {
2243 ArgInfo *ainfo = cinfo->args + i;
2246 if (i >= sig->hasthis)
2247 t = sig->params [i - sig->hasthis];
2249 t = &mono_defaults.int_class->byval_arg;
2250 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2252 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2253 /* Emit the signature cookie just before the implicit arguments */
2254 emit_sig_cookie (cfg, call, cinfo);
2257 in = call->args [i];
2259 switch (ainfo->storage) {
2260 case RegTypeGeneral:
2261 case RegTypeIRegPair:
2262 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2263 MONO_INST_NEW (cfg, ins, OP_MOVE);
2264 ins->dreg = mono_alloc_ireg (cfg);
2265 ins->sreg1 = in->dreg + 1;
2266 MONO_ADD_INS (cfg->cbb, ins);
2267 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2269 MONO_INST_NEW (cfg, ins, OP_MOVE);
2270 ins->dreg = mono_alloc_ireg (cfg);
2271 ins->sreg1 = in->dreg + 2;
2272 MONO_ADD_INS (cfg->cbb, ins);
2273 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2274 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2275 if (ainfo->size == 4) {
2276 if (IS_SOFT_FLOAT) {
2277 /* mono_emit_call_args () have already done the r8->r4 conversion */
2278 /* The converted value is in an int vreg */
2279 MONO_INST_NEW (cfg, ins, OP_MOVE);
2280 ins->dreg = mono_alloc_ireg (cfg);
2281 ins->sreg1 = in->dreg;
2282 MONO_ADD_INS (cfg->cbb, ins);
2283 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2288 creg = mono_alloc_ireg (cfg);
2289 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2290 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2293 if (IS_SOFT_FLOAT) {
2294 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2295 ins->dreg = mono_alloc_ireg (cfg);
2296 ins->sreg1 = in->dreg;
2297 MONO_ADD_INS (cfg->cbb, ins);
2298 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2300 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2301 ins->dreg = mono_alloc_ireg (cfg);
2302 ins->sreg1 = in->dreg;
2303 MONO_ADD_INS (cfg->cbb, ins);
2304 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2309 creg = mono_alloc_ireg (cfg);
2310 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2311 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2312 creg = mono_alloc_ireg (cfg);
2313 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2314 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2317 cfg->flags |= MONO_CFG_HAS_FPOUT;
2319 MONO_INST_NEW (cfg, ins, OP_MOVE);
2320 ins->dreg = mono_alloc_ireg (cfg);
2321 ins->sreg1 = in->dreg;
2322 MONO_ADD_INS (cfg->cbb, ins);
2324 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2327 case RegTypeStructByAddr:
2330 /* FIXME: where si the data allocated? */
2331 arg->backend.reg3 = ainfo->reg;
2332 call->used_iregs |= 1 << ainfo->reg;
2333 g_assert_not_reached ();
2336 case RegTypeStructByVal:
2337 case RegTypeGSharedVtInReg:
2338 case RegTypeGSharedVtOnStack:
2339 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2340 ins->opcode = OP_OUTARG_VT;
2341 ins->sreg1 = in->dreg;
2342 ins->klass = in->klass;
2343 ins->inst_p0 = call;
2344 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2345 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2346 mono_call_inst_add_outarg_vt (cfg, call, ins);
2347 MONO_ADD_INS (cfg->cbb, ins);
2350 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2351 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2352 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2353 if (t->type == MONO_TYPE_R8) {
2354 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2357 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2359 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2365 case RegTypeBaseGen:
2366 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2368 MONO_INST_NEW (cfg, ins, OP_MOVE);
2369 ins->dreg = mono_alloc_ireg (cfg);
2370 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2371 MONO_ADD_INS (cfg->cbb, ins);
2372 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2373 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2376 /* This should work for soft-float as well */
2378 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2379 creg = mono_alloc_ireg (cfg);
2380 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2381 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2382 creg = mono_alloc_ireg (cfg);
2383 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2384 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2385 cfg->flags |= MONO_CFG_HAS_FPOUT;
2387 g_assert_not_reached ();
2391 int fdreg = mono_alloc_freg (cfg);
2393 if (ainfo->size == 8) {
2394 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2395 ins->sreg1 = in->dreg;
2397 MONO_ADD_INS (cfg->cbb, ins);
2399 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2404 * Mono's register allocator doesn't speak single-precision registers that
2405 * overlap double-precision registers (i.e. armhf). So we have to work around
2406 * the register allocator and load the value from memory manually.
2408 * So we create a variable for the float argument and an instruction to store
2409 * the argument into the variable. We then store the list of these arguments
2410 * in cfg->float_args. This list is then used by emit_float_args later to
2411 * pass the arguments in the various call opcodes.
2413 * This is not very nice, and we should really try to fix the allocator.
2416 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2418 /* Make sure the instruction isn't seen as pointless and removed.
2420 float_arg->flags |= MONO_INST_VOLATILE;
2422 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2424 /* We use the dreg to look up the instruction later. The hreg is used to
2425 * emit the instruction that loads the value into the FP reg.
2427 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2428 fad->vreg = float_arg->dreg;
2429 fad->hreg = ainfo->reg;
2431 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2434 call->used_iregs |= 1 << ainfo->reg;
2435 cfg->flags |= MONO_CFG_HAS_FPOUT;
2439 g_assert_not_reached ();
2443 /* Handle the case where there are no implicit arguments */
2444 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2445 emit_sig_cookie (cfg, call, cinfo);
2447 if (cinfo->ret.storage == RegTypeStructByVal) {
2448 /* The JIT will transform this into a normal call */
2449 call->vret_in_reg = TRUE;
2450 } else if (cinfo->vtype_retaddr) {
2452 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2453 vtarg->sreg1 = call->vret_var->dreg;
2454 vtarg->dreg = mono_alloc_preg (cfg);
2455 MONO_ADD_INS (cfg->cbb, vtarg);
2457 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2460 call->stack_usage = cinfo->stack_usage;
2466 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2468 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2469 ArgInfo *ainfo = ins->inst_p1;
2470 int ovf_size = ainfo->vtsize;
2471 int doffset = ainfo->offset;
2472 int struct_size = ainfo->struct_size;
2473 int i, soffset, dreg, tmpreg;
2475 if (ainfo->storage == RegTypeGSharedVtInReg) {
2477 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2480 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2481 /* Pass by addr on stack */
2482 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2487 for (i = 0; i < ainfo->size; ++i) {
2488 dreg = mono_alloc_ireg (cfg);
2489 switch (struct_size) {
2491 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2494 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2497 tmpreg = mono_alloc_ireg (cfg);
2498 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2500 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2501 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2503 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2504 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2507 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2510 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2511 soffset += sizeof (gpointer);
2512 struct_size -= sizeof (gpointer);
2514 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2516 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2520 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2522 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2525 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2528 if (COMPILE_LLVM (cfg)) {
2529 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2531 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2532 ins->sreg1 = val->dreg + 1;
2533 ins->sreg2 = val->dreg + 2;
2534 MONO_ADD_INS (cfg->cbb, ins);
2539 case MONO_ARM_FPU_NONE:
2540 if (ret->type == MONO_TYPE_R8) {
2543 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2544 ins->dreg = cfg->ret->dreg;
2545 ins->sreg1 = val->dreg;
2546 MONO_ADD_INS (cfg->cbb, ins);
2549 if (ret->type == MONO_TYPE_R4) {
2550 /* Already converted to an int in method_to_ir () */
2551 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2555 case MONO_ARM_FPU_VFP:
2556 case MONO_ARM_FPU_VFP_HARD:
2557 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2560 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2561 ins->dreg = cfg->ret->dreg;
2562 ins->sreg1 = val->dreg;
2563 MONO_ADD_INS (cfg->cbb, ins);
2568 g_assert_not_reached ();
2572 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2575 #endif /* #ifndef DISABLE_JIT */
2578 mono_arch_is_inst_imm (gint64 imm)
2583 #define DYN_CALL_STACK_ARGS 6
2586 MonoMethodSignature *sig;
2591 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2597 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2601 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2604 switch (cinfo->ret.storage) {
2606 case RegTypeGeneral:
2607 case RegTypeIRegPair:
2608 case RegTypeStructByAddr:
2619 for (i = 0; i < cinfo->nargs; ++i) {
2620 ArgInfo *ainfo = &cinfo->args [i];
2623 switch (ainfo->storage) {
2624 case RegTypeGeneral:
2626 case RegTypeIRegPair:
2629 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2632 case RegTypeStructByVal:
2633 if (ainfo->size == 0)
2634 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2636 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2637 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2645 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2646 for (i = 0; i < sig->param_count; ++i) {
2647 MonoType *t = sig->params [i];
2673 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2675 ArchDynCallInfo *info;
2678 cinfo = get_call_info (NULL, NULL, sig);
2680 if (!dyn_call_supported (cinfo, sig)) {
2685 info = g_new0 (ArchDynCallInfo, 1);
2686 // FIXME: Preprocess the info to speed up start_dyn_call ()
2688 info->cinfo = cinfo;
2690 return (MonoDynCallInfo*)info;
2694 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2696 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2698 g_free (ainfo->cinfo);
2703 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2705 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2706 DynCallArgs *p = (DynCallArgs*)buf;
2707 int arg_index, greg, i, j, pindex;
2708 MonoMethodSignature *sig = dinfo->sig;
2710 g_assert (buf_len >= sizeof (DynCallArgs));
2719 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2720 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2725 if (dinfo->cinfo->vtype_retaddr)
2726 p->regs [greg ++] = (mgreg_t)ret;
2728 for (i = pindex; i < sig->param_count; i++) {
2729 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2730 gpointer *arg = args [arg_index ++];
2731 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2734 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2736 else if (ainfo->storage == RegTypeBase)
2737 slot = PARAM_REGS + (ainfo->offset / 4);
2739 g_assert_not_reached ();
2742 p->regs [slot] = (mgreg_t)*arg;
2747 case MONO_TYPE_STRING:
2748 case MONO_TYPE_CLASS:
2749 case MONO_TYPE_ARRAY:
2750 case MONO_TYPE_SZARRAY:
2751 case MONO_TYPE_OBJECT:
2755 p->regs [slot] = (mgreg_t)*arg;
2757 case MONO_TYPE_BOOLEAN:
2759 p->regs [slot] = *(guint8*)arg;
2762 p->regs [slot] = *(gint8*)arg;
2765 p->regs [slot] = *(gint16*)arg;
2768 case MONO_TYPE_CHAR:
2769 p->regs [slot] = *(guint16*)arg;
2772 p->regs [slot] = *(gint32*)arg;
2775 p->regs [slot] = *(guint32*)arg;
2779 p->regs [slot ++] = (mgreg_t)arg [0];
2780 p->regs [slot] = (mgreg_t)arg [1];
2783 p->regs [slot] = *(mgreg_t*)arg;
2786 p->regs [slot ++] = (mgreg_t)arg [0];
2787 p->regs [slot] = (mgreg_t)arg [1];
2789 case MONO_TYPE_GENERICINST:
2790 if (MONO_TYPE_IS_REFERENCE (t)) {
2791 p->regs [slot] = (mgreg_t)*arg;
2796 case MONO_TYPE_VALUETYPE:
2797 g_assert (ainfo->storage == RegTypeStructByVal);
2799 if (ainfo->size == 0)
2800 slot = PARAM_REGS + (ainfo->offset / 4);
2804 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2805 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2808 g_assert_not_reached ();
2814 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2816 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2817 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2818 guint8 *ret = ((DynCallArgs*)buf)->ret;
2819 mgreg_t res = ((DynCallArgs*)buf)->res;
2820 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2822 switch (mono_type_get_underlying_type (sig->ret)->type) {
2823 case MONO_TYPE_VOID:
2824 *(gpointer*)ret = NULL;
2826 case MONO_TYPE_STRING:
2827 case MONO_TYPE_CLASS:
2828 case MONO_TYPE_ARRAY:
2829 case MONO_TYPE_SZARRAY:
2830 case MONO_TYPE_OBJECT:
2834 *(gpointer*)ret = (gpointer)res;
2840 case MONO_TYPE_BOOLEAN:
2841 *(guint8*)ret = res;
2844 *(gint16*)ret = res;
2847 case MONO_TYPE_CHAR:
2848 *(guint16*)ret = res;
2851 *(gint32*)ret = res;
2854 *(guint32*)ret = res;
2858 /* This handles endianness as well */
2859 ((gint32*)ret) [0] = res;
2860 ((gint32*)ret) [1] = res2;
2862 case MONO_TYPE_GENERICINST:
2863 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2864 *(gpointer*)ret = (gpointer)res;
2869 case MONO_TYPE_VALUETYPE:
2870 g_assert (ainfo->cinfo->vtype_retaddr);
2875 *(float*)ret = *(float*)&res;
2877 case MONO_TYPE_R8: {
2884 *(double*)ret = *(double*)®s;
2888 g_assert_not_reached ();
2895 * Allow tracing to work with this interface (with an optional argument)
2899 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2903 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2904 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2905 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2906 code = emit_call_reg (code, ARMREG_R2);
2919 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2922 int save_mode = SAVE_NONE;
2924 MonoMethod *method = cfg->method;
2925 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2926 int save_offset = cfg->param_area;
2930 offset = code - cfg->native_code;
2931 /* we need about 16 instructions */
2932 if (offset > (cfg->code_size - 16 * 4)) {
2933 cfg->code_size *= 2;
2934 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2935 code = cfg->native_code + offset;
2938 case MONO_TYPE_VOID:
2939 /* special case string .ctor icall */
2940 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2941 save_mode = SAVE_ONE;
2943 save_mode = SAVE_NONE;
2947 save_mode = SAVE_TWO;
2951 save_mode = SAVE_FP;
2953 case MONO_TYPE_VALUETYPE:
2954 save_mode = SAVE_STRUCT;
2957 save_mode = SAVE_ONE;
2961 switch (save_mode) {
2963 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2964 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2965 if (enable_arguments) {
2966 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2967 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2971 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2972 if (enable_arguments) {
2973 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2977 /* FIXME: what reg? */
2978 if (enable_arguments) {
2979 /* FIXME: what reg? */
2983 if (enable_arguments) {
2984 /* FIXME: get the actual address */
2985 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2993 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2994 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2995 code = emit_call_reg (code, ARMREG_IP);
2997 switch (save_mode) {
2999 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3000 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3003 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3017 * The immediate field for cond branches is big enough for all reasonable methods
3019 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3020 if (0 && ins->inst_true_bb->native_offset) { \
3021 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3023 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3024 ARM_B_COND (code, (condcode), 0); \
3027 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3029 /* emit an exception if condition is fail
3031 * We assign the extra code used to throw the implicit exceptions
3032 * to cfg->bb_exit as far as the big branch handling is concerned
3034 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3036 mono_add_patch_info (cfg, code - cfg->native_code, \
3037 MONO_PATCH_INFO_EXC, exc_name); \
3038 ARM_BL_COND (code, (condcode), 0); \
3041 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3044 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3049 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3051 MonoInst *ins, *n, *last_ins = NULL;
3053 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3054 switch (ins->opcode) {
3057 /* Already done by an arch-independent pass */
3059 case OP_LOAD_MEMBASE:
3060 case OP_LOADI4_MEMBASE:
3062 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3063 * OP_LOAD_MEMBASE offset(basereg), reg
3065 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3066 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3067 ins->inst_basereg == last_ins->inst_destbasereg &&
3068 ins->inst_offset == last_ins->inst_offset) {
3069 if (ins->dreg == last_ins->sreg1) {
3070 MONO_DELETE_INS (bb, ins);
3073 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3074 ins->opcode = OP_MOVE;
3075 ins->sreg1 = last_ins->sreg1;
3079 * Note: reg1 must be different from the basereg in the second load
3080 * OP_LOAD_MEMBASE offset(basereg), reg1
3081 * OP_LOAD_MEMBASE offset(basereg), reg2
3083 * OP_LOAD_MEMBASE offset(basereg), reg1
3084 * OP_MOVE reg1, reg2
3086 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3087 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3088 ins->inst_basereg != last_ins->dreg &&
3089 ins->inst_basereg == last_ins->inst_basereg &&
3090 ins->inst_offset == last_ins->inst_offset) {
3092 if (ins->dreg == last_ins->dreg) {
3093 MONO_DELETE_INS (bb, ins);
3096 ins->opcode = OP_MOVE;
3097 ins->sreg1 = last_ins->dreg;
3100 //g_assert_not_reached ();
3104 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3105 * OP_LOAD_MEMBASE offset(basereg), reg
3107 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3108 * OP_ICONST reg, imm
3110 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3111 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3112 ins->inst_basereg == last_ins->inst_destbasereg &&
3113 ins->inst_offset == last_ins->inst_offset) {
3114 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3115 ins->opcode = OP_ICONST;
3116 ins->inst_c0 = last_ins->inst_imm;
3117 g_assert_not_reached (); // check this rule
3121 case OP_LOADU1_MEMBASE:
3122 case OP_LOADI1_MEMBASE:
3123 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3124 ins->inst_basereg == last_ins->inst_destbasereg &&
3125 ins->inst_offset == last_ins->inst_offset) {
3126 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3127 ins->sreg1 = last_ins->sreg1;
3130 case OP_LOADU2_MEMBASE:
3131 case OP_LOADI2_MEMBASE:
3132 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3133 ins->inst_basereg == last_ins->inst_destbasereg &&
3134 ins->inst_offset == last_ins->inst_offset) {
3135 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3136 ins->sreg1 = last_ins->sreg1;
3140 ins->opcode = OP_MOVE;
3144 if (ins->dreg == ins->sreg1) {
3145 MONO_DELETE_INS (bb, ins);
3149 * OP_MOVE sreg, dreg
3150 * OP_MOVE dreg, sreg
3152 if (last_ins && last_ins->opcode == OP_MOVE &&
3153 ins->sreg1 == last_ins->dreg &&
3154 ins->dreg == last_ins->sreg1) {
3155 MONO_DELETE_INS (bb, ins);
3163 bb->last_ins = last_ins;
3167 * the branch_cc_table should maintain the order of these
3181 branch_cc_table [] = {
3195 #define ADD_NEW_INS(cfg,dest,op) do { \
3196 MONO_INST_NEW ((cfg), (dest), (op)); \
3197 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3201 map_to_reg_reg_op (int op)
3210 case OP_COMPARE_IMM:
3212 case OP_ICOMPARE_IMM:
3226 case OP_LOAD_MEMBASE:
3227 return OP_LOAD_MEMINDEX;
3228 case OP_LOADI4_MEMBASE:
3229 return OP_LOADI4_MEMINDEX;
3230 case OP_LOADU4_MEMBASE:
3231 return OP_LOADU4_MEMINDEX;
3232 case OP_LOADU1_MEMBASE:
3233 return OP_LOADU1_MEMINDEX;
3234 case OP_LOADI2_MEMBASE:
3235 return OP_LOADI2_MEMINDEX;
3236 case OP_LOADU2_MEMBASE:
3237 return OP_LOADU2_MEMINDEX;
3238 case OP_LOADI1_MEMBASE:
3239 return OP_LOADI1_MEMINDEX;
3240 case OP_STOREI1_MEMBASE_REG:
3241 return OP_STOREI1_MEMINDEX;
3242 case OP_STOREI2_MEMBASE_REG:
3243 return OP_STOREI2_MEMINDEX;
3244 case OP_STOREI4_MEMBASE_REG:
3245 return OP_STOREI4_MEMINDEX;
3246 case OP_STORE_MEMBASE_REG:
3247 return OP_STORE_MEMINDEX;
3248 case OP_STORER4_MEMBASE_REG:
3249 return OP_STORER4_MEMINDEX;
3250 case OP_STORER8_MEMBASE_REG:
3251 return OP_STORER8_MEMINDEX;
3252 case OP_STORE_MEMBASE_IMM:
3253 return OP_STORE_MEMBASE_REG;
3254 case OP_STOREI1_MEMBASE_IMM:
3255 return OP_STOREI1_MEMBASE_REG;
3256 case OP_STOREI2_MEMBASE_IMM:
3257 return OP_STOREI2_MEMBASE_REG;
3258 case OP_STOREI4_MEMBASE_IMM:
3259 return OP_STOREI4_MEMBASE_REG;
3261 g_assert_not_reached ();
3265 * Remove from the instruction list the instructions that can't be
3266 * represented with very simple instructions with no register
3270 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3272 MonoInst *ins, *temp, *last_ins = NULL;
3273 int rot_amount, imm8, low_imm;
3275 MONO_BB_FOR_EACH_INS (bb, ins) {
3277 switch (ins->opcode) {
3281 case OP_COMPARE_IMM:
3282 case OP_ICOMPARE_IMM:
3296 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3297 ADD_NEW_INS (cfg, temp, OP_ICONST);
3298 temp->inst_c0 = ins->inst_imm;
3299 temp->dreg = mono_alloc_ireg (cfg);
3300 ins->sreg2 = temp->dreg;
3301 ins->opcode = mono_op_imm_to_op (ins->opcode);
3303 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3309 if (ins->inst_imm == 1) {
3310 ins->opcode = OP_MOVE;
3313 if (ins->inst_imm == 0) {
3314 ins->opcode = OP_ICONST;
3318 imm8 = mono_is_power_of_two (ins->inst_imm);
3320 ins->opcode = OP_SHL_IMM;
3321 ins->inst_imm = imm8;
3324 ADD_NEW_INS (cfg, temp, OP_ICONST);
3325 temp->inst_c0 = ins->inst_imm;
3326 temp->dreg = mono_alloc_ireg (cfg);
3327 ins->sreg2 = temp->dreg;
3328 ins->opcode = OP_IMUL;
3334 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3335 /* ARM sets the C flag to 1 if there was _no_ overflow */
3336 ins->next->opcode = OP_COND_EXC_NC;
3339 case OP_IDIV_UN_IMM:
3341 case OP_IREM_UN_IMM:
3342 ADD_NEW_INS (cfg, temp, OP_ICONST);
3343 temp->inst_c0 = ins->inst_imm;
3344 temp->dreg = mono_alloc_ireg (cfg);
3345 ins->sreg2 = temp->dreg;
3346 ins->opcode = mono_op_imm_to_op (ins->opcode);
3348 case OP_LOCALLOC_IMM:
3349 ADD_NEW_INS (cfg, temp, OP_ICONST);
3350 temp->inst_c0 = ins->inst_imm;
3351 temp->dreg = mono_alloc_ireg (cfg);
3352 ins->sreg1 = temp->dreg;
3353 ins->opcode = OP_LOCALLOC;
3355 case OP_LOAD_MEMBASE:
3356 case OP_LOADI4_MEMBASE:
3357 case OP_LOADU4_MEMBASE:
3358 case OP_LOADU1_MEMBASE:
3359 /* we can do two things: load the immed in a register
3360 * and use an indexed load, or see if the immed can be
3361 * represented as an ad_imm + a load with a smaller offset
3362 * that fits. We just do the first for now, optimize later.
3364 if (arm_is_imm12 (ins->inst_offset))
3366 ADD_NEW_INS (cfg, temp, OP_ICONST);
3367 temp->inst_c0 = ins->inst_offset;
3368 temp->dreg = mono_alloc_ireg (cfg);
3369 ins->sreg2 = temp->dreg;
3370 ins->opcode = map_to_reg_reg_op (ins->opcode);
3372 case OP_LOADI2_MEMBASE:
3373 case OP_LOADU2_MEMBASE:
3374 case OP_LOADI1_MEMBASE:
3375 if (arm_is_imm8 (ins->inst_offset))
3377 ADD_NEW_INS (cfg, temp, OP_ICONST);
3378 temp->inst_c0 = ins->inst_offset;
3379 temp->dreg = mono_alloc_ireg (cfg);
3380 ins->sreg2 = temp->dreg;
3381 ins->opcode = map_to_reg_reg_op (ins->opcode);
3383 case OP_LOADR4_MEMBASE:
3384 case OP_LOADR8_MEMBASE:
3385 if (arm_is_fpimm8 (ins->inst_offset))
3387 low_imm = ins->inst_offset & 0x1ff;
3388 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3389 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3390 temp->inst_imm = ins->inst_offset & ~0x1ff;
3391 temp->sreg1 = ins->inst_basereg;
3392 temp->dreg = mono_alloc_ireg (cfg);
3393 ins->inst_basereg = temp->dreg;
3394 ins->inst_offset = low_imm;
3398 ADD_NEW_INS (cfg, temp, OP_ICONST);
3399 temp->inst_c0 = ins->inst_offset;
3400 temp->dreg = mono_alloc_ireg (cfg);
3402 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3403 add_ins->sreg1 = ins->inst_basereg;
3404 add_ins->sreg2 = temp->dreg;
3405 add_ins->dreg = mono_alloc_ireg (cfg);
3407 ins->inst_basereg = add_ins->dreg;
3408 ins->inst_offset = 0;
3411 case OP_STORE_MEMBASE_REG:
3412 case OP_STOREI4_MEMBASE_REG:
3413 case OP_STOREI1_MEMBASE_REG:
3414 if (arm_is_imm12 (ins->inst_offset))
3416 ADD_NEW_INS (cfg, temp, OP_ICONST);
3417 temp->inst_c0 = ins->inst_offset;
3418 temp->dreg = mono_alloc_ireg (cfg);
3419 ins->sreg2 = temp->dreg;
3420 ins->opcode = map_to_reg_reg_op (ins->opcode);
3422 case OP_STOREI2_MEMBASE_REG:
3423 if (arm_is_imm8 (ins->inst_offset))
3425 ADD_NEW_INS (cfg, temp, OP_ICONST);
3426 temp->inst_c0 = ins->inst_offset;
3427 temp->dreg = mono_alloc_ireg (cfg);
3428 ins->sreg2 = temp->dreg;
3429 ins->opcode = map_to_reg_reg_op (ins->opcode);
3431 case OP_STORER4_MEMBASE_REG:
3432 case OP_STORER8_MEMBASE_REG:
3433 if (arm_is_fpimm8 (ins->inst_offset))
3435 low_imm = ins->inst_offset & 0x1ff;
3436 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3437 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3438 temp->inst_imm = ins->inst_offset & ~0x1ff;
3439 temp->sreg1 = ins->inst_destbasereg;
3440 temp->dreg = mono_alloc_ireg (cfg);
3441 ins->inst_destbasereg = temp->dreg;
3442 ins->inst_offset = low_imm;
3446 ADD_NEW_INS (cfg, temp, OP_ICONST);
3447 temp->inst_c0 = ins->inst_offset;
3448 temp->dreg = mono_alloc_ireg (cfg);
3450 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3451 add_ins->sreg1 = ins->inst_destbasereg;
3452 add_ins->sreg2 = temp->dreg;
3453 add_ins->dreg = mono_alloc_ireg (cfg);
3455 ins->inst_destbasereg = add_ins->dreg;
3456 ins->inst_offset = 0;
3459 case OP_STORE_MEMBASE_IMM:
3460 case OP_STOREI1_MEMBASE_IMM:
3461 case OP_STOREI2_MEMBASE_IMM:
3462 case OP_STOREI4_MEMBASE_IMM:
3463 ADD_NEW_INS (cfg, temp, OP_ICONST);
3464 temp->inst_c0 = ins->inst_imm;
3465 temp->dreg = mono_alloc_ireg (cfg);
3466 ins->sreg1 = temp->dreg;
3467 ins->opcode = map_to_reg_reg_op (ins->opcode);
3469 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3471 gboolean swap = FALSE;
3475 /* Optimized away */
3480 /* Some fp compares require swapped operands */
3481 switch (ins->next->opcode) {
3483 ins->next->opcode = OP_FBLT;
3487 ins->next->opcode = OP_FBLT_UN;
3491 ins->next->opcode = OP_FBGE;
3495 ins->next->opcode = OP_FBGE_UN;
3503 ins->sreg1 = ins->sreg2;
3512 bb->last_ins = last_ins;
3513 bb->max_vreg = cfg->next_vreg;
3517 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3521 if (long_ins->opcode == OP_LNEG) {
3523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3524 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3530 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3532 /* sreg is a float, dreg is an integer reg */
3534 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3536 ARM_TOSIZD (code, vfp_scratch1, sreg);
3538 ARM_TOUIZD (code, vfp_scratch1, sreg);
3539 ARM_FMRS (code, dreg, vfp_scratch1);
3540 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3544 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3545 else if (size == 2) {
3546 ARM_SHL_IMM (code, dreg, dreg, 16);
3547 ARM_SHR_IMM (code, dreg, dreg, 16);
3551 ARM_SHL_IMM (code, dreg, dreg, 24);
3552 ARM_SAR_IMM (code, dreg, dreg, 24);
3553 } else if (size == 2) {
3554 ARM_SHL_IMM (code, dreg, dreg, 16);
3555 ARM_SAR_IMM (code, dreg, dreg, 16);
3561 #endif /* #ifndef DISABLE_JIT */
3565 const guchar *target;
3570 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3573 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3574 PatchData *pdata = (PatchData*)user_data;
3575 guchar *code = data;
3576 guint32 *thunks = data;
3577 guint32 *endthunks = (guint32*)(code + bsize);
3579 int difflow, diffhigh;
3581 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3582 difflow = (char*)pdata->code - (char*)thunks;
3583 diffhigh = (char*)pdata->code - (char*)endthunks;
3584 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3588 * The thunk is composed of 3 words:
3589 * load constant from thunks [2] into ARM_IP
3592 * Note that the LR register is already setup
3594 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3595 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3596 while (thunks < endthunks) {
3597 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3598 if (thunks [2] == (guint32)pdata->target) {
3599 arm_patch (pdata->code, (guchar*)thunks);
3600 mono_arch_flush_icache (pdata->code, 4);
3603 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3604 /* found a free slot instead: emit thunk */
3605 /* ARMREG_IP is fine to use since this can't be an IMT call
3608 code = (guchar*)thunks;
3609 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3610 if (thumb_supported)
3611 ARM_BX (code, ARMREG_IP);
3613 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3614 thunks [2] = (guint32)pdata->target;
3615 mono_arch_flush_icache ((guchar*)thunks, 12);
3617 arm_patch (pdata->code, (guchar*)thunks);
3618 mono_arch_flush_icache (pdata->code, 4);
3622 /* skip 12 bytes, the size of the thunk */
3626 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3632 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3637 domain = mono_domain_get ();
3640 pdata.target = target;
3641 pdata.absolute = absolute;
3645 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3648 if (pdata.found != 1) {
3649 mono_domain_lock (domain);
3650 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3653 /* this uses the first available slot */
3655 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3657 mono_domain_unlock (domain);
3660 if (pdata.found != 1) {
3662 GHashTableIter iter;
3663 MonoJitDynamicMethodInfo *ji;
3666 * This might be a dynamic method, search its code manager. We can only
3667 * use the dynamic method containing CODE, since the others might be freed later.
3671 mono_domain_lock (domain);
3672 hash = domain_jit_info (domain)->dynamic_code_hash;
3674 /* FIXME: Speed this up */
3675 g_hash_table_iter_init (&iter, hash);
3676 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3677 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3678 if (pdata.found == 1)
3682 mono_domain_unlock (domain);
3684 if (pdata.found != 1)
3685 g_print ("thunk failed for %p from %p\n", target, code);
3686 g_assert (pdata.found == 1);
3690 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3692 guint32 *code32 = (void*)code;
3693 guint32 ins = *code32;
3694 guint32 prim = (ins >> 25) & 7;
3695 guint32 tval = GPOINTER_TO_UINT (target);
3697 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3698 if (prim == 5) { /* 101b */
3699 /* the diff starts 8 bytes from the branch opcode */
3700 gint diff = target - code - 8;
3702 gint tmask = 0xffffffff;
3703 if (tval & 1) { /* entering thumb mode */
3704 diff = target - 1 - code - 8;
3705 g_assert (thumb_supported);
3706 tbits = 0xf << 28; /* bl->blx bit pattern */
3707 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3708 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3712 tmask = ~(1 << 24); /* clear the link bit */
3713 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3718 if (diff <= 33554431) {
3720 ins = (ins & 0xff000000) | diff;
3722 *code32 = ins | tbits;
3726 /* diff between 0 and -33554432 */
3727 if (diff >= -33554432) {
3729 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3731 *code32 = ins | tbits;
3736 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3740 #ifdef USE_JUMP_TABLES
3742 gpointer *jte = mono_jumptable_get_entry (code);
3744 jte [0] = (gpointer) target;
3748 * The alternative call sequences looks like this:
3750 * ldr ip, [pc] // loads the address constant
3751 * b 1f // jumps around the constant
3752 * address constant embedded in the code
3757 * There are two cases for patching:
3758 * a) at the end of method emission: in this case code points to the start
3759 * of the call sequence
3760 * b) during runtime patching of the call site: in this case code points
3761 * to the mov pc, ip instruction
3763 * We have to handle also the thunk jump code sequence:
3767 * address constant // execution never reaches here
3769 if ((ins & 0x0ffffff0) == 0x12fff10) {
3770 /* Branch and exchange: the address is constructed in a reg
3771 * We can patch BX when the code sequence is the following:
3772 * ldr ip, [pc, #0] ; 0x8
3779 guint8 *emit = (guint8*)ccode;
3780 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3782 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3783 ARM_BX (emit, ARMREG_IP);
3785 /*patching from magic trampoline*/
3786 if (ins == ccode [3]) {
3787 g_assert (code32 [-4] == ccode [0]);
3788 g_assert (code32 [-3] == ccode [1]);
3789 g_assert (code32 [-1] == ccode [2]);
3790 code32 [-2] = (guint32)target;
3793 /*patching from JIT*/
3794 if (ins == ccode [0]) {
3795 g_assert (code32 [1] == ccode [1]);
3796 g_assert (code32 [3] == ccode [2]);
3797 g_assert (code32 [4] == ccode [3]);
3798 code32 [2] = (guint32)target;
3801 g_assert_not_reached ();
3802 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3810 guint8 *emit = (guint8*)ccode;
3811 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3813 ARM_BLX_REG (emit, ARMREG_IP);
3815 g_assert (code32 [-3] == ccode [0]);
3816 g_assert (code32 [-2] == ccode [1]);
3817 g_assert (code32 [0] == ccode [2]);
3819 code32 [-1] = (guint32)target;
3822 guint32 *tmp = ccode;
3823 guint8 *emit = (guint8*)tmp;
3824 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3825 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3826 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3827 ARM_BX (emit, ARMREG_IP);
3828 if (ins == ccode [2]) {
3829 g_assert_not_reached (); // should be -2 ...
3830 code32 [-1] = (guint32)target;
3833 if (ins == ccode [0]) {
3834 /* handles both thunk jump code and the far call sequence */
3835 code32 [2] = (guint32)target;
3838 g_assert_not_reached ();
3840 // g_print ("patched with 0x%08x\n", ins);
3845 arm_patch (guchar *code, const guchar *target)
3847 arm_patch_general (NULL, code, target, NULL);
3851 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3852 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3853 * to be used with the emit macros.
3854 * Return -1 otherwise.
3857 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3860 for (i = 0; i < 31; i+= 2) {
3861 res = (val << (32 - i)) | (val >> i);
3864 *rot_amount = i? 32 - i: 0;
3871 * Emits in code a sequence of instructions that load the value 'val'
3872 * into the dreg register. Uses at most 4 instructions.
3875 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3877 int imm8, rot_amount;
3879 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3880 /* skip the constant pool */
3886 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3887 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3888 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3889 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3892 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3894 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3898 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3900 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3902 if (val & 0xFF0000) {
3903 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3905 if (val & 0xFF000000) {
3906 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3908 } else if (val & 0xFF00) {
3909 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3910 if (val & 0xFF0000) {
3911 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3913 if (val & 0xFF000000) {
3914 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3916 } else if (val & 0xFF0000) {
3917 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3918 if (val & 0xFF000000) {
3919 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3922 //g_assert_not_reached ();
3928 mono_arm_thumb_supported (void)
3930 return thumb_supported;
3936 * emit_load_volatile_arguments:
3938 * Load volatile arguments from the stack to the original input registers.
3939 * Required before a tail call.
3942 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3944 MonoMethod *method = cfg->method;
3945 MonoMethodSignature *sig;
3950 /* FIXME: Generate intermediate code instead */
3952 sig = mono_method_signature (method);
3954 /* This is the opposite of the code in emit_prolog */
3958 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3960 if (cinfo->vtype_retaddr) {
3961 ArgInfo *ainfo = &cinfo->ret;
3962 inst = cfg->vret_addr;
3963 g_assert (arm_is_imm12 (inst->inst_offset));
3964 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3966 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3967 ArgInfo *ainfo = cinfo->args + i;
3968 inst = cfg->args [pos];
3970 if (cfg->verbose_level > 2)
3971 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3972 if (inst->opcode == OP_REGVAR) {
3973 if (ainfo->storage == RegTypeGeneral)
3974 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3975 else if (ainfo->storage == RegTypeFP) {
3976 g_assert_not_reached ();
3977 } else if (ainfo->storage == RegTypeBase) {
3981 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3982 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3984 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3985 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3989 g_assert_not_reached ();
3991 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3992 switch (ainfo->size) {
3999 g_assert (arm_is_imm12 (inst->inst_offset));
4000 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4001 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4002 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4005 if (arm_is_imm12 (inst->inst_offset)) {
4006 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4008 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4009 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4013 } else if (ainfo->storage == RegTypeBaseGen) {
4016 } else if (ainfo->storage == RegTypeBase) {
4018 } else if (ainfo->storage == RegTypeFP) {
4019 g_assert_not_reached ();
4020 } else if (ainfo->storage == RegTypeStructByVal) {
4021 int doffset = inst->inst_offset;
4025 if (mono_class_from_mono_type (inst->inst_vtype))
4026 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4027 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4028 if (arm_is_imm12 (doffset)) {
4029 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4031 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4032 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4034 soffset += sizeof (gpointer);
4035 doffset += sizeof (gpointer);
4040 } else if (ainfo->storage == RegTypeStructByAddr) {
4055 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4060 guint8 *code = cfg->native_code + cfg->code_len;
4061 MonoInst *last_ins = NULL;
4062 guint last_offset = 0;
4064 int imm8, rot_amount;
4066 /* we don't align basic blocks of loops on arm */
4068 if (cfg->verbose_level > 2)
4069 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4071 cpos = bb->max_offset;
4073 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4074 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4075 //g_assert (!mono_compile_aot);
4078 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4079 /* this is not thread save, but good enough */
4080 /* fixme: howto handle overflows? */
4081 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4084 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4085 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4086 (gpointer)"mono_break");
4087 code = emit_call_seq (cfg, code);
4090 MONO_BB_FOR_EACH_INS (bb, ins) {
4091 offset = code - cfg->native_code;
4093 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4095 if (offset > (cfg->code_size - max_len - 16)) {
4096 cfg->code_size *= 2;
4097 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4098 code = cfg->native_code + offset;
4100 // if (ins->cil_code)
4101 // g_print ("cil code\n");
4102 mono_debug_record_line_number (cfg, ins, offset);
4104 switch (ins->opcode) {
4105 case OP_MEMORY_BARRIER:
4107 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4108 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4112 #ifdef HAVE_AEABI_READ_TP
4113 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4114 (gpointer)"__aeabi_read_tp");
4115 code = emit_call_seq (cfg, code);
4117 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4119 g_assert_not_reached ();
4123 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4124 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4127 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4128 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4130 case OP_STOREI1_MEMBASE_IMM:
4131 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4132 g_assert (arm_is_imm12 (ins->inst_offset));
4133 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4135 case OP_STOREI2_MEMBASE_IMM:
4136 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4137 g_assert (arm_is_imm8 (ins->inst_offset));
4138 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4140 case OP_STORE_MEMBASE_IMM:
4141 case OP_STOREI4_MEMBASE_IMM:
4142 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4143 g_assert (arm_is_imm12 (ins->inst_offset));
4144 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4146 case OP_STOREI1_MEMBASE_REG:
4147 g_assert (arm_is_imm12 (ins->inst_offset));
4148 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4150 case OP_STOREI2_MEMBASE_REG:
4151 g_assert (arm_is_imm8 (ins->inst_offset));
4152 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4154 case OP_STORE_MEMBASE_REG:
4155 case OP_STOREI4_MEMBASE_REG:
4156 /* this case is special, since it happens for spill code after lowering has been called */
4157 if (arm_is_imm12 (ins->inst_offset)) {
4158 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4160 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4161 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4164 case OP_STOREI1_MEMINDEX:
4165 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4167 case OP_STOREI2_MEMINDEX:
4168 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4170 case OP_STORE_MEMINDEX:
4171 case OP_STOREI4_MEMINDEX:
4172 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4175 g_assert_not_reached ();
4177 case OP_LOAD_MEMINDEX:
4178 case OP_LOADI4_MEMINDEX:
4179 case OP_LOADU4_MEMINDEX:
4180 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4182 case OP_LOADI1_MEMINDEX:
4183 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4185 case OP_LOADU1_MEMINDEX:
4186 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4188 case OP_LOADI2_MEMINDEX:
4189 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4191 case OP_LOADU2_MEMINDEX:
4192 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4194 case OP_LOAD_MEMBASE:
4195 case OP_LOADI4_MEMBASE:
4196 case OP_LOADU4_MEMBASE:
4197 /* this case is special, since it happens for spill code after lowering has been called */
4198 if (arm_is_imm12 (ins->inst_offset)) {
4199 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4201 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4202 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4205 case OP_LOADI1_MEMBASE:
4206 g_assert (arm_is_imm8 (ins->inst_offset));
4207 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4209 case OP_LOADU1_MEMBASE:
4210 g_assert (arm_is_imm12 (ins->inst_offset));
4211 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4213 case OP_LOADU2_MEMBASE:
4214 g_assert (arm_is_imm8 (ins->inst_offset));
4215 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4217 case OP_LOADI2_MEMBASE:
4218 g_assert (arm_is_imm8 (ins->inst_offset));
4219 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4221 case OP_ICONV_TO_I1:
4222 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4223 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4225 case OP_ICONV_TO_I2:
4226 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4227 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4229 case OP_ICONV_TO_U1:
4230 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4232 case OP_ICONV_TO_U2:
4233 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4234 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4238 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4240 case OP_COMPARE_IMM:
4241 case OP_ICOMPARE_IMM:
4242 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4243 g_assert (imm8 >= 0);
4244 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4248 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4249 * So instead of emitting a trap, we emit a call a C function and place a
4252 //*(int*)code = 0xef9f0001;
4255 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4256 (gpointer)"mono_break");
4257 code = emit_call_seq (cfg, code);
4259 case OP_RELAXED_NOP:
4264 case OP_DUMMY_STORE:
4265 case OP_DUMMY_ICONST:
4266 case OP_DUMMY_R8CONST:
4267 case OP_NOT_REACHED:
4270 case OP_SEQ_POINT: {
4272 MonoInst *info_var = cfg->arch.seq_point_info_var;
4273 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4274 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4275 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4276 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4278 int dreg = ARMREG_LR;
4280 if (cfg->soft_breakpoints) {
4281 g_assert (!cfg->compile_aot);
4285 * For AOT, we use one got slot per method, which will point to a
4286 * SeqPointInfo structure, containing all the information required
4287 * by the code below.
4289 if (cfg->compile_aot) {
4290 g_assert (info_var);
4291 g_assert (info_var->opcode == OP_REGOFFSET);
4292 g_assert (arm_is_imm12 (info_var->inst_offset));
4295 if (!cfg->soft_breakpoints) {
4297 * Read from the single stepping trigger page. This will cause a
4298 * SIGSEGV when single stepping is enabled.
4299 * We do this _before_ the breakpoint, so single stepping after
4300 * a breakpoint is hit will step to the next IL offset.
4302 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4305 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4306 if (cfg->soft_breakpoints) {
4307 /* Load the address of the sequence point trigger variable. */
4310 g_assert (var->opcode == OP_REGOFFSET);
4311 g_assert (arm_is_imm12 (var->inst_offset));
4312 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4314 /* Read the value and check whether it is non-zero. */
4315 ARM_LDR_IMM (code, dreg, dreg, 0);
4316 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4318 /* Load the address of the sequence point method. */
4319 var = ss_method_var;
4321 g_assert (var->opcode == OP_REGOFFSET);
4322 g_assert (arm_is_imm12 (var->inst_offset));
4323 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4325 /* Call it conditionally. */
4326 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4328 if (cfg->compile_aot) {
4329 /* Load the trigger page addr from the variable initialized in the prolog */
4330 var = ss_trigger_page_var;
4332 g_assert (var->opcode == OP_REGOFFSET);
4333 g_assert (arm_is_imm12 (var->inst_offset));
4334 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4336 #ifdef USE_JUMP_TABLES
4337 gpointer *jte = mono_jumptable_add_entry ();
4338 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4339 jte [0] = ss_trigger_page;
4341 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4343 *(int*)code = (int)ss_trigger_page;
4347 ARM_LDR_IMM (code, dreg, dreg, 0);
4351 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4353 if (cfg->soft_breakpoints) {
4354 /* Load the address of the breakpoint method into ip. */
4355 var = bp_method_var;
4357 g_assert (var->opcode == OP_REGOFFSET);
4358 g_assert (arm_is_imm12 (var->inst_offset));
4359 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4362 * A placeholder for a possible breakpoint inserted by
4363 * mono_arch_set_breakpoint ().
4366 } else if (cfg->compile_aot) {
4367 guint32 offset = code - cfg->native_code;
4370 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4371 /* Add the offset */
4372 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4373 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4374 if (arm_is_imm12 ((int)val)) {
4375 ARM_LDR_IMM (code, dreg, dreg, val);
4377 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4379 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4381 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4382 g_assert (!(val & 0xFF000000));
4384 ARM_LDR_IMM (code, dreg, dreg, 0);
4386 /* What is faster, a branch or a load ? */
4387 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4388 /* The breakpoint instruction */
4389 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4392 * A placeholder for a possible breakpoint inserted by
4393 * mono_arch_set_breakpoint ().
4395 for (i = 0; i < 4; ++i)
4402 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4405 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4409 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4412 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4413 g_assert (imm8 >= 0);
4414 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4418 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4419 g_assert (imm8 >= 0);
4420 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4424 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4425 g_assert (imm8 >= 0);
4426 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4429 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4430 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4432 case OP_IADD_OVF_UN:
4433 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4434 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4437 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4438 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4440 case OP_ISUB_OVF_UN:
4441 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4442 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4444 case OP_ADD_OVF_CARRY:
4445 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4446 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4448 case OP_ADD_OVF_UN_CARRY:
4449 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4450 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4452 case OP_SUB_OVF_CARRY:
4453 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4454 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4456 case OP_SUB_OVF_UN_CARRY:
4457 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4458 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4462 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4465 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4466 g_assert (imm8 >= 0);
4467 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4470 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4474 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4478 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4479 g_assert (imm8 >= 0);
4480 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4484 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4485 g_assert (imm8 >= 0);
4486 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4488 case OP_ARM_RSBS_IMM:
4489 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4490 g_assert (imm8 >= 0);
4491 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4493 case OP_ARM_RSC_IMM:
4494 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4495 g_assert (imm8 >= 0);
4496 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4499 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4503 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4504 g_assert (imm8 >= 0);
4505 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4508 g_assert (v7s_supported);
4509 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4512 g_assert (v7s_supported);
4513 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4516 g_assert (v7s_supported);
4517 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4518 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4521 g_assert (v7s_supported);
4522 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4523 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4527 g_assert_not_reached ();
4529 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4533 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4534 g_assert (imm8 >= 0);
4535 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4538 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4542 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4543 g_assert (imm8 >= 0);
4544 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4547 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4552 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4553 else if (ins->dreg != ins->sreg1)
4554 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4557 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4562 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4563 else if (ins->dreg != ins->sreg1)
4564 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4567 case OP_ISHR_UN_IMM:
4569 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4570 else if (ins->dreg != ins->sreg1)
4571 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4574 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4577 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4580 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4583 if (ins->dreg == ins->sreg2)
4584 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4586 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4589 g_assert_not_reached ();
4592 /* FIXME: handle ovf/ sreg2 != dreg */
4593 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4594 /* FIXME: MUL doesn't set the C/O flags on ARM */
4596 case OP_IMUL_OVF_UN:
4597 /* FIXME: handle ovf/ sreg2 != dreg */
4598 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4599 /* FIXME: MUL doesn't set the C/O flags on ARM */
4602 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4605 /* Load the GOT offset */
4606 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4607 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4609 *(gpointer*)code = NULL;
4611 /* Load the value from the GOT */
4612 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4614 case OP_OBJC_GET_SELECTOR:
4615 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4616 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4618 *(gpointer*)code = NULL;
4620 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4622 case OP_ICONV_TO_I4:
4623 case OP_ICONV_TO_U4:
4625 if (ins->dreg != ins->sreg1)
4626 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4629 int saved = ins->sreg2;
4630 if (ins->sreg2 == ARM_LSW_REG) {
4631 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4634 if (ins->sreg1 != ARM_LSW_REG)
4635 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4636 if (saved != ARM_MSW_REG)
4637 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4642 ARM_CPYD (code, ins->dreg, ins->sreg1);
4644 case OP_FCONV_TO_R4:
4646 ARM_CVTD (code, ins->dreg, ins->sreg1);
4647 ARM_CVTS (code, ins->dreg, ins->dreg);
4652 * Keep in sync with mono_arch_emit_epilog
4654 g_assert (!cfg->method->save_lmf);
4656 code = emit_load_volatile_arguments (cfg, code);
4658 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4660 if (cfg->used_int_regs)
4661 ARM_POP (code, cfg->used_int_regs);
4662 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4664 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4666 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4667 if (cfg->compile_aot) {
4668 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4670 *(gpointer*)code = NULL;
4672 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4674 code = mono_arm_patchable_b (code, ARMCOND_AL);
4678 MonoCallInst *call = (MonoCallInst*)ins;
4681 * The stack looks like the following:
4682 * <caller argument area>
4685 * <callee argument area>
4686 * Need to copy the arguments from the callee argument area to
4687 * the caller argument area, and pop the frame.
4689 if (call->stack_usage) {
4690 int i, prev_sp_offset = 0;
4692 /* Compute size of saved registers restored below */
4694 prev_sp_offset = 2 * 4;
4696 prev_sp_offset = 1 * 4;
4697 for (i = 0; i < 16; ++i) {
4698 if (cfg->used_int_regs & (1 << i))
4699 prev_sp_offset += 4;
4702 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4704 /* Copy arguments on the stack to our argument area */
4705 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4706 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4707 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4712 * Keep in sync with mono_arch_emit_epilog
4714 g_assert (!cfg->method->save_lmf);
4716 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4718 if (cfg->used_int_regs)
4719 ARM_POP (code, cfg->used_int_regs);
4720 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4722 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4725 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4726 if (cfg->compile_aot) {
4727 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4729 *(gpointer*)code = NULL;
4731 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4733 code = mono_arm_patchable_b (code, ARMCOND_AL);
4738 /* ensure ins->sreg1 is not NULL */
4739 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4742 g_assert (cfg->sig_cookie < 128);
4743 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4744 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4753 call = (MonoCallInst*)ins;
4756 code = emit_float_args (cfg, call, code, &max_len, &offset);
4758 if (ins->flags & MONO_INST_HAS_METHOD)
4759 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4761 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4762 code = emit_call_seq (cfg, code);
4763 ins->flags |= MONO_INST_GC_CALLSITE;
4764 ins->backend.pc_offset = code - cfg->native_code;
4765 code = emit_move_return_value (cfg, ins, code);
4771 case OP_VOIDCALL_REG:
4774 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4776 code = emit_call_reg (code, ins->sreg1);
4777 ins->flags |= MONO_INST_GC_CALLSITE;
4778 ins->backend.pc_offset = code - cfg->native_code;
4779 code = emit_move_return_value (cfg, ins, code);
4781 case OP_FCALL_MEMBASE:
4782 case OP_LCALL_MEMBASE:
4783 case OP_VCALL_MEMBASE:
4784 case OP_VCALL2_MEMBASE:
4785 case OP_VOIDCALL_MEMBASE:
4786 case OP_CALL_MEMBASE: {
4787 gboolean imt_arg = FALSE;
4789 g_assert (ins->sreg1 != ARMREG_LR);
4790 call = (MonoCallInst*)ins;
4793 code = emit_float_args (cfg, call, code, &max_len, &offset);
4795 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4797 if (!arm_is_imm12 (ins->inst_offset))
4798 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4799 #ifdef USE_JUMP_TABLES
4805 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4807 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4809 if (!arm_is_imm12 (ins->inst_offset))
4810 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4812 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4815 * We can't embed the method in the code stream in PIC code, or
4817 * Instead, we put it in V5 in code emitted by
4818 * mono_arch_emit_imt_argument (), and embed NULL here to
4819 * signal the IMT thunk that the value is in V5.
4821 #ifdef USE_JUMP_TABLES
4822 /* In case of jumptables we always use value in V5. */
4825 if (call->dynamic_imt_arg)
4826 *((gpointer*)code) = NULL;
4828 *((gpointer*)code) = (gpointer)call->method;
4832 ins->flags |= MONO_INST_GC_CALLSITE;
4833 ins->backend.pc_offset = code - cfg->native_code;
4834 code = emit_move_return_value (cfg, ins, code);
4838 /* keep alignment */
4839 int alloca_waste = cfg->param_area;
4842 /* round the size to 8 bytes */
4843 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4844 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4846 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4847 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4848 /* memzero the area: dreg holds the size, sp is the pointer */
4849 if (ins->flags & MONO_INST_INIT) {
4850 guint8 *start_loop, *branch_to_cond;
4851 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4852 branch_to_cond = code;
4855 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4856 arm_patch (branch_to_cond, code);
4857 /* decrement by 4 and set flags */
4858 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4859 ARM_B_COND (code, ARMCOND_GE, 0);
4860 arm_patch (code - 4, start_loop);
4862 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4867 MonoInst *var = cfg->dyn_call_var;
4869 g_assert (var->opcode == OP_REGOFFSET);
4870 g_assert (arm_is_imm12 (var->inst_offset));
4872 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4873 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4875 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4877 /* Save args buffer */
4878 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4880 /* Set stack slots using R0 as scratch reg */
4881 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4882 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4883 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4884 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4887 /* Set argument registers */
4888 for (i = 0; i < PARAM_REGS; ++i)
4889 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4892 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4893 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4896 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4897 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4898 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4902 if (ins->sreg1 != ARMREG_R0)
4903 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4904 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4905 (gpointer)"mono_arch_throw_exception");
4906 code = emit_call_seq (cfg, code);
4910 if (ins->sreg1 != ARMREG_R0)
4911 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4912 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4913 (gpointer)"mono_arch_rethrow_exception");
4914 code = emit_call_seq (cfg, code);
4917 case OP_START_HANDLER: {
4918 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4921 /* Reserve a param area, see filter-stack.exe */
4922 if (cfg->param_area) {
4923 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4924 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4926 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4927 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4931 if (arm_is_imm12 (spvar->inst_offset)) {
4932 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
4934 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4935 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
4939 case OP_ENDFILTER: {
4940 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4943 /* Free the param area */
4944 if (cfg->param_area) {
4945 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4946 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4948 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4949 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4953 if (ins->sreg1 != ARMREG_R0)
4954 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4955 if (arm_is_imm12 (spvar->inst_offset)) {
4956 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4958 g_assert (ARMREG_IP != spvar->inst_basereg);
4959 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4960 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4962 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4965 case OP_ENDFINALLY: {
4966 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4969 /* Free the param area */
4970 if (cfg->param_area) {
4971 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4972 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4974 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4975 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4979 if (arm_is_imm12 (spvar->inst_offset)) {
4980 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4982 g_assert (ARMREG_IP != spvar->inst_basereg);
4983 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4984 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4986 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4989 case OP_CALL_HANDLER:
4990 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4991 code = mono_arm_patchable_bl (code, ARMCOND_AL);
4992 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4995 ins->inst_c0 = code - cfg->native_code;
4998 /*if (ins->inst_target_bb->native_offset) {
5000 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5002 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5003 code = mono_arm_patchable_b (code, ARMCOND_AL);
5007 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5011 * In the normal case we have:
5012 * ldr pc, [pc, ins->sreg1 << 2]
5015 * ldr lr, [pc, ins->sreg1 << 2]
5017 * After follows the data.
5018 * FIXME: add aot support.
5020 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5021 #ifdef USE_JUMP_TABLES
5023 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5024 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5025 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5029 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5030 if (offset + max_len > (cfg->code_size - 16)) {
5031 cfg->code_size += max_len;
5032 cfg->code_size *= 2;
5033 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5034 code = cfg->native_code + offset;
5036 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5038 code += 4 * GPOINTER_TO_INT (ins->klass);
5043 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5044 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5048 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5049 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5053 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5054 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5058 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5059 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5063 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5064 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5066 case OP_COND_EXC_EQ:
5067 case OP_COND_EXC_NE_UN:
5068 case OP_COND_EXC_LT:
5069 case OP_COND_EXC_LT_UN:
5070 case OP_COND_EXC_GT:
5071 case OP_COND_EXC_GT_UN:
5072 case OP_COND_EXC_GE:
5073 case OP_COND_EXC_GE_UN:
5074 case OP_COND_EXC_LE:
5075 case OP_COND_EXC_LE_UN:
5076 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5078 case OP_COND_EXC_IEQ:
5079 case OP_COND_EXC_INE_UN:
5080 case OP_COND_EXC_ILT:
5081 case OP_COND_EXC_ILT_UN:
5082 case OP_COND_EXC_IGT:
5083 case OP_COND_EXC_IGT_UN:
5084 case OP_COND_EXC_IGE:
5085 case OP_COND_EXC_IGE_UN:
5086 case OP_COND_EXC_ILE:
5087 case OP_COND_EXC_ILE_UN:
5088 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5091 case OP_COND_EXC_IC:
5092 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5094 case OP_COND_EXC_OV:
5095 case OP_COND_EXC_IOV:
5096 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5098 case OP_COND_EXC_NC:
5099 case OP_COND_EXC_INC:
5100 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5102 case OP_COND_EXC_NO:
5103 case OP_COND_EXC_INO:
5104 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5116 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5119 /* floating point opcodes */
5121 if (cfg->compile_aot) {
5122 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5124 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5126 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5129 /* FIXME: we can optimize the imm load by dealing with part of
5130 * the displacement in LDFD (aligning to 512).
5132 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5133 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5137 if (cfg->compile_aot) {
5138 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5140 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5142 ARM_CVTS (code, ins->dreg, ins->dreg);
5144 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5145 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5146 ARM_CVTS (code, ins->dreg, ins->dreg);
5149 case OP_STORER8_MEMBASE_REG:
5150 /* This is generated by the local regalloc pass which runs after the lowering pass */
5151 if (!arm_is_fpimm8 (ins->inst_offset)) {
5152 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5153 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5154 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5156 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5159 case OP_LOADR8_MEMBASE:
5160 /* This is generated by the local regalloc pass which runs after the lowering pass */
5161 if (!arm_is_fpimm8 (ins->inst_offset)) {
5162 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5163 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5164 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5166 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5169 case OP_STORER4_MEMBASE_REG:
5170 g_assert (arm_is_fpimm8 (ins->inst_offset));
5171 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5172 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5173 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5174 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5176 case OP_LOADR4_MEMBASE:
5177 g_assert (arm_is_fpimm8 (ins->inst_offset));
5178 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5179 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5180 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5181 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5183 case OP_ICONV_TO_R_UN: {
5184 g_assert_not_reached ();
5187 case OP_ICONV_TO_R4:
5188 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5189 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5190 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5191 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5192 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5194 case OP_ICONV_TO_R8:
5195 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5196 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5197 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5198 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5202 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5203 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5205 if (!IS_HARD_FLOAT) {
5206 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5209 if (IS_HARD_FLOAT) {
5210 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5212 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5216 case OP_FCONV_TO_I1:
5217 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5219 case OP_FCONV_TO_U1:
5220 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5222 case OP_FCONV_TO_I2:
5223 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5225 case OP_FCONV_TO_U2:
5226 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5228 case OP_FCONV_TO_I4:
5230 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5232 case OP_FCONV_TO_U4:
5234 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5236 case OP_FCONV_TO_I8:
5237 case OP_FCONV_TO_U8:
5238 g_assert_not_reached ();
5239 /* Implemented as helper calls */
5241 case OP_LCONV_TO_R_UN:
5242 g_assert_not_reached ();
5243 /* Implemented as helper calls */
5245 case OP_LCONV_TO_OVF_I4_2: {
5246 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5248 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5251 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5252 high_bit_not_set = code;
5253 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5255 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5256 valid_negative = code;
5257 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5258 invalid_negative = code;
5259 ARM_B_COND (code, ARMCOND_AL, 0);
5261 arm_patch (high_bit_not_set, code);
5263 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5264 valid_positive = code;
5265 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5267 arm_patch (invalid_negative, code);
5268 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5270 arm_patch (valid_negative, code);
5271 arm_patch (valid_positive, code);
5273 if (ins->dreg != ins->sreg1)
5274 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5278 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5281 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5284 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5287 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5290 ARM_NEGD (code, ins->dreg, ins->sreg1);
5294 g_assert_not_reached ();
5298 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5304 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5307 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5308 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5312 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5315 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5316 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5320 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5323 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5324 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5325 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5329 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5332 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5333 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5337 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5340 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5341 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5342 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5344 /* ARM FPA flags table:
5345 * N Less than ARMCOND_MI
5346 * Z Equal ARMCOND_EQ
5347 * C Greater Than or Equal ARMCOND_CS
5348 * V Unordered ARMCOND_VS
5351 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5354 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5357 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5360 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5361 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5367 g_assert_not_reached ();
5371 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5373 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5374 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5375 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5379 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5380 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5385 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5386 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5388 #ifdef USE_JUMP_TABLES
5390 gpointer *jte = mono_jumptable_add_entries (2);
5391 jte [0] = GUINT_TO_POINTER (0xffffffff);
5392 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5393 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5394 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5397 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5398 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5400 *(guint32*)code = 0xffffffff;
5402 *(guint32*)code = 0x7fefffff;
5405 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5407 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5408 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5410 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5411 ARM_CPYD (code, ins->dreg, ins->sreg1);
5413 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5414 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5419 case OP_GC_LIVENESS_DEF:
5420 case OP_GC_LIVENESS_USE:
5421 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5422 ins->backend.pc_offset = code - cfg->native_code;
5424 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5425 ins->backend.pc_offset = code - cfg->native_code;
5426 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5430 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5431 g_assert_not_reached ();
5434 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5435 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5436 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5437 g_assert_not_reached ();
5443 last_offset = offset;
5446 cfg->code_len = code - cfg->native_code;
5449 #endif /* DISABLE_JIT */
5451 #ifdef HAVE_AEABI_READ_TP
5452 void __aeabi_read_tp (void);
5456 mono_arch_register_lowlevel_calls (void)
5458 /* The signature doesn't matter */
5459 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5460 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5462 #ifndef MONO_CROSS_COMPILE
5463 #ifdef HAVE_AEABI_READ_TP
5464 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5469 #define patch_lis_ori(ip,val) do {\
5470 guint16 *__lis_ori = (guint16*)(ip); \
5471 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5472 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5476 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5478 MonoJumpInfo *patch_info;
5479 gboolean compile_aot = !run_cctors;
5481 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5482 unsigned char *ip = patch_info->ip.i + code;
5483 const unsigned char *target;
5485 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5486 #ifdef USE_JUMP_TABLES
5487 gpointer *jt = mono_jumptable_get_entry (ip);
5489 gpointer *jt = (gpointer*)(ip + 8);
5492 /* jt is the inlined jump table, 2 instructions after ip
5493 * In the normal case we store the absolute addresses,
5494 * otherwise the displacements.
5496 for (i = 0; i < patch_info->data.table->table_size; i++)
5497 jt [i] = code + (int)patch_info->data.table->table [i];
5502 switch (patch_info->type) {
5503 case MONO_PATCH_INFO_BB:
5504 case MONO_PATCH_INFO_LABEL:
5507 /* No need to patch these */
5512 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5514 switch (patch_info->type) {
5515 case MONO_PATCH_INFO_IP:
5516 g_assert_not_reached ();
5517 patch_lis_ori (ip, ip);
5519 case MONO_PATCH_INFO_METHOD_REL:
5520 g_assert_not_reached ();
5521 *((gpointer *)(ip)) = code + patch_info->data.offset;
5523 case MONO_PATCH_INFO_METHODCONST:
5524 case MONO_PATCH_INFO_CLASS:
5525 case MONO_PATCH_INFO_IMAGE:
5526 case MONO_PATCH_INFO_FIELD:
5527 case MONO_PATCH_INFO_VTABLE:
5528 case MONO_PATCH_INFO_IID:
5529 case MONO_PATCH_INFO_SFLDA:
5530 case MONO_PATCH_INFO_LDSTR:
5531 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5532 case MONO_PATCH_INFO_LDTOKEN:
5533 g_assert_not_reached ();
5534 /* from OP_AOTCONST : lis + ori */
5535 patch_lis_ori (ip, target);
5537 case MONO_PATCH_INFO_R4:
5538 case MONO_PATCH_INFO_R8:
5539 g_assert_not_reached ();
5540 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5542 case MONO_PATCH_INFO_EXC_NAME:
5543 g_assert_not_reached ();
5544 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5546 case MONO_PATCH_INFO_NONE:
5547 case MONO_PATCH_INFO_BB_OVF:
5548 case MONO_PATCH_INFO_EXC_OVF:
5549 /* everything is dealt with at epilog output time */
5554 arm_patch_general (domain, ip, target, dyn_code_mp);
5561 * Stack frame layout:
5563 * ------------------- fp
5564 * MonoLMF structure or saved registers
5565 * -------------------
5567 * -------------------
5569 * -------------------
5570 * optional 8 bytes for tracing
5571 * -------------------
5572 * param area size is cfg->param_area
5573 * ------------------- sp
5576 mono_arch_emit_prolog (MonoCompile *cfg)
5578 MonoMethod *method = cfg->method;
5580 MonoMethodSignature *sig;
5582 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5587 int prev_sp_offset, reg_offset;
5589 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5592 sig = mono_method_signature (method);
5593 cfg->code_size = 256 + sig->param_count * 64;
5594 code = cfg->native_code = g_malloc (cfg->code_size);
5596 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5598 alloc_size = cfg->stack_offset;
5604 * The iphone uses R7 as the frame pointer, and it points at the saved
5609 * We can't use r7 as a frame pointer since it points into the middle of
5610 * the frame, so we keep using our own frame pointer.
5611 * FIXME: Optimize this.
5613 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5614 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5615 prev_sp_offset += 8; /* r7 and lr */
5616 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5617 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5620 if (!method->save_lmf) {
5622 /* No need to push LR again */
5623 if (cfg->used_int_regs)
5624 ARM_PUSH (code, cfg->used_int_regs);
5626 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5627 prev_sp_offset += 4;
5629 for (i = 0; i < 16; ++i) {
5630 if (cfg->used_int_regs & (1 << i))
5631 prev_sp_offset += 4;
5633 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5635 for (i = 0; i < 16; ++i) {
5636 if ((cfg->used_int_regs & (1 << i))) {
5637 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5638 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5643 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5644 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5646 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5647 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5650 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5651 ARM_PUSH (code, 0x5ff0);
5652 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5653 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5655 for (i = 0; i < 16; ++i) {
5656 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5657 /* The original r7 is saved at the start */
5658 if (!(iphone_abi && i == ARMREG_R7))
5659 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5663 g_assert (reg_offset == 4 * 10);
5664 pos += sizeof (MonoLMF) - (4 * 10);
5668 orig_alloc_size = alloc_size;
5669 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5670 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5671 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5672 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5675 /* the stack used in the pushed regs */
5676 if (prev_sp_offset & 4)
5678 cfg->stack_usage = alloc_size;
5680 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5681 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5683 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5684 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5686 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5688 if (cfg->frame_reg != ARMREG_SP) {
5689 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5690 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5692 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5693 prev_sp_offset += alloc_size;
5695 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5696 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5698 /* compute max_offset in order to use short forward jumps
5699 * we could skip do it on arm because the immediate displacement
5700 * for jumps is large enough, it may be useful later for constant pools
5703 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5704 MonoInst *ins = bb->code;
5705 bb->max_offset = max_offset;
5707 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5710 MONO_BB_FOR_EACH_INS (bb, ins)
5711 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5714 /* store runtime generic context */
5715 if (cfg->rgctx_var) {
5716 MonoInst *ins = cfg->rgctx_var;
5718 g_assert (ins->opcode == OP_REGOFFSET);
5720 if (arm_is_imm12 (ins->inst_offset)) {
5721 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5723 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5724 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5728 /* load arguments allocated to register from the stack */
5731 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5733 if (cinfo->vtype_retaddr) {
5734 ArgInfo *ainfo = &cinfo->ret;
5735 inst = cfg->vret_addr;
5736 g_assert (arm_is_imm12 (inst->inst_offset));
5737 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5740 if (sig->call_convention == MONO_CALL_VARARG) {
5741 ArgInfo *cookie = &cinfo->sig_cookie;
5743 /* Save the sig cookie address */
5744 g_assert (cookie->storage == RegTypeBase);
5746 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5747 g_assert (arm_is_imm12 (cfg->sig_cookie));
5748 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5749 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5752 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5753 ArgInfo *ainfo = cinfo->args + i;
5754 inst = cfg->args [pos];
5756 if (cfg->verbose_level > 2)
5757 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5758 if (inst->opcode == OP_REGVAR) {
5759 if (ainfo->storage == RegTypeGeneral)
5760 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5761 else if (ainfo->storage == RegTypeFP) {
5762 g_assert_not_reached ();
5763 } else if (ainfo->storage == RegTypeBase) {
5764 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5765 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5767 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5768 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5771 g_assert_not_reached ();
5773 if (cfg->verbose_level > 2)
5774 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5776 /* the argument should be put on the stack: FIXME handle size != word */
5777 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5778 switch (ainfo->size) {
5780 if (arm_is_imm12 (inst->inst_offset))
5781 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5783 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5784 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5788 if (arm_is_imm8 (inst->inst_offset)) {
5789 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5791 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5792 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5796 if (arm_is_imm12 (inst->inst_offset)) {
5797 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5799 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5800 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5802 if (arm_is_imm12 (inst->inst_offset + 4)) {
5803 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5805 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5806 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5810 if (arm_is_imm12 (inst->inst_offset)) {
5811 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5813 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5814 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5818 } else if (ainfo->storage == RegTypeBaseGen) {
5819 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5820 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5822 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5823 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5825 if (arm_is_imm12 (inst->inst_offset + 4)) {
5826 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5827 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5829 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5830 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5831 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5832 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5834 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5835 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5836 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5838 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5839 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5842 switch (ainfo->size) {
5844 if (arm_is_imm8 (inst->inst_offset)) {
5845 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5847 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5848 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5852 if (arm_is_imm8 (inst->inst_offset)) {
5853 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5855 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5856 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5860 if (arm_is_imm12 (inst->inst_offset)) {
5861 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5863 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5864 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5866 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5867 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5869 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5870 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5872 if (arm_is_imm12 (inst->inst_offset + 4)) {
5873 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5875 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5876 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5880 if (arm_is_imm12 (inst->inst_offset)) {
5881 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5883 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5884 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5888 } else if (ainfo->storage == RegTypeFP) {
5889 int imm8, rot_amount;
5891 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
5892 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5893 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
5895 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
5897 if (ainfo->size == 8)
5898 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
5900 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
5901 } else if (ainfo->storage == RegTypeStructByVal) {
5902 int doffset = inst->inst_offset;
5906 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5907 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5908 if (arm_is_imm12 (doffset)) {
5909 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5911 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5912 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5914 soffset += sizeof (gpointer);
5915 doffset += sizeof (gpointer);
5917 if (ainfo->vtsize) {
5918 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5919 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
5920 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
5922 } else if (ainfo->storage == RegTypeStructByAddr) {
5923 g_assert_not_reached ();
5924 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5925 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
5927 g_assert_not_reached ();
5932 if (method->save_lmf)
5933 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
5936 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5938 if (cfg->arch.seq_point_info_var) {
5939 MonoInst *ins = cfg->arch.seq_point_info_var;
5941 /* Initialize the variable from a GOT slot */
5942 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
5943 #ifdef USE_JUMP_TABLES
5945 gpointer *jte = mono_jumptable_add_entry ();
5946 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
5947 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
5949 /** XXX: is it correct? */
5951 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5953 *(gpointer*)code = NULL;
5956 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
5958 g_assert (ins->opcode == OP_REGOFFSET);
5960 if (arm_is_imm12 (ins->inst_offset)) {
5961 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5963 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5964 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5968 /* Initialize ss_trigger_page_var */
5969 if (!cfg->soft_breakpoints) {
5970 MonoInst *info_var = cfg->arch.seq_point_info_var;
5971 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
5972 int dreg = ARMREG_LR;
5975 g_assert (info_var->opcode == OP_REGOFFSET);
5976 g_assert (arm_is_imm12 (info_var->inst_offset));
5978 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
5979 /* Load the trigger page addr */
5980 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
5981 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
5985 if (cfg->arch.seq_point_read_var) {
5986 MonoInst *read_ins = cfg->arch.seq_point_read_var;
5987 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
5988 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
5989 #ifdef USE_JUMP_TABLES
5992 g_assert (read_ins->opcode == OP_REGOFFSET);
5993 g_assert (arm_is_imm12 (read_ins->inst_offset));
5994 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
5995 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
5996 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
5997 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
5999 #ifdef USE_JUMP_TABLES
6000 jte = mono_jumptable_add_entries (3);
6001 jte [0] = (gpointer)&ss_trigger_var;
6002 jte [1] = single_step_func_wrapper;
6003 jte [2] = breakpoint_func_wrapper;
6004 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6006 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6008 *(volatile int **)code = &ss_trigger_var;
6010 *(gpointer*)code = single_step_func_wrapper;
6012 *(gpointer*)code = breakpoint_func_wrapper;
6016 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6017 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6018 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6019 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6020 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6021 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6024 cfg->code_len = code - cfg->native_code;
6025 g_assert (cfg->code_len < cfg->code_size);
6032 mono_arch_emit_epilog (MonoCompile *cfg)
6034 MonoMethod *method = cfg->method;
6035 int pos, i, rot_amount;
6036 int max_epilog_size = 16 + 20*4;
6040 if (cfg->method->save_lmf)
6041 max_epilog_size += 128;
6043 if (mono_jit_trace_calls != NULL)
6044 max_epilog_size += 50;
6046 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6047 max_epilog_size += 50;
6049 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6050 cfg->code_size *= 2;
6051 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6052 cfg->stat_code_reallocs++;
6056 * Keep in sync with OP_JMP
6058 code = cfg->native_code + cfg->code_len;
6060 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6061 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6065 /* Load returned vtypes into registers if needed */
6066 cinfo = cfg->arch.cinfo;
6067 if (cinfo->ret.storage == RegTypeStructByVal) {
6068 MonoInst *ins = cfg->ret;
6070 if (arm_is_imm12 (ins->inst_offset)) {
6071 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6073 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6074 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6078 if (method->save_lmf) {
6079 int lmf_offset, reg, sp_adj, regmask;
6080 /* all but r0-r3, sp and pc */
6081 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6084 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6086 /* This points to r4 inside MonoLMF->iregs */
6087 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6089 regmask = 0x9ff0; /* restore lr to pc */
6090 /* Skip caller saved registers not used by the method */
6091 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6092 regmask &= ~(1 << reg);
6097 /* Restored later */
6098 regmask &= ~(1 << ARMREG_PC);
6099 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6100 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6102 ARM_POP (code, regmask);
6104 /* Restore saved r7, restore LR to PC */
6105 /* Skip lr from the lmf */
6106 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6107 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6110 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6111 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6113 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6114 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6118 /* Restore saved gregs */
6119 if (cfg->used_int_regs)
6120 ARM_POP (code, cfg->used_int_regs);
6121 /* Restore saved r7, restore LR to PC */
6122 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6124 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6128 cfg->code_len = code - cfg->native_code;
6130 g_assert (cfg->code_len < cfg->code_size);
6135 mono_arch_emit_exceptions (MonoCompile *cfg)
6137 MonoJumpInfo *patch_info;
6140 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6141 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6142 int max_epilog_size = 50;
6144 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6145 exc_throw_pos [i] = NULL;
6146 exc_throw_found [i] = 0;
6149 /* count the number of exception infos */
6152 * make sure we have enough space for exceptions
6154 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6155 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6156 i = mini_exception_id_by_name (patch_info->data.target);
6157 if (!exc_throw_found [i]) {
6158 max_epilog_size += 32;
6159 exc_throw_found [i] = TRUE;
6164 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6165 cfg->code_size *= 2;
6166 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6167 cfg->stat_code_reallocs++;
6170 code = cfg->native_code + cfg->code_len;
6172 /* add code to raise exceptions */
6173 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6174 switch (patch_info->type) {
6175 case MONO_PATCH_INFO_EXC: {
6176 MonoClass *exc_class;
6177 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6179 i = mini_exception_id_by_name (patch_info->data.target);
6180 if (exc_throw_pos [i]) {
6181 arm_patch (ip, exc_throw_pos [i]);
6182 patch_info->type = MONO_PATCH_INFO_NONE;
6185 exc_throw_pos [i] = code;
6187 arm_patch (ip, code);
6189 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6190 g_assert (exc_class);
6192 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6193 #ifdef USE_JUMP_TABLES
6195 gpointer *jte = mono_jumptable_add_entries (2);
6196 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6197 patch_info->data.name = "mono_arch_throw_corlib_exception";
6198 patch_info->ip.i = code - cfg->native_code;
6199 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6200 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6201 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6202 ARM_BLX_REG (code, ARMREG_IP);
6203 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6206 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6207 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6208 patch_info->data.name = "mono_arch_throw_corlib_exception";
6209 patch_info->ip.i = code - cfg->native_code;
6211 *(guint32*)(gpointer)code = exc_class->type_token;
6222 cfg->code_len = code - cfg->native_code;
6224 g_assert (cfg->code_len < cfg->code_size);
6228 #endif /* #ifndef DISABLE_JIT */
6231 mono_arch_finish_init (void)
6233 lmf_tls_offset = mono_get_lmf_tls_offset ();
6234 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
6238 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6243 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6250 mono_arch_print_tree (MonoInst *tree, int arity)
6260 mono_arch_get_patch_offset (guint8 *code)
6267 mono_arch_flush_register_windows (void)
6271 #ifdef MONO_ARCH_HAVE_IMT
6276 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6278 int method_reg = mono_alloc_ireg (cfg);
6279 #ifdef USE_JUMP_TABLES
6280 int use_jumptables = TRUE;
6282 int use_jumptables = FALSE;
6285 if (cfg->compile_aot) {
6288 call->dynamic_imt_arg = TRUE;
6291 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6293 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6294 ins->dreg = method_reg;
6295 ins->inst_p0 = call->method;
6296 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6297 MONO_ADD_INS (cfg->cbb, ins);
6299 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6300 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6301 /* Always pass in a register for simplicity */
6302 call->dynamic_imt_arg = TRUE;
6304 cfg->uses_rgctx_reg = TRUE;
6307 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6311 MONO_INST_NEW (cfg, ins, OP_PCONST);
6312 ins->inst_p0 = call->method;
6313 ins->dreg = method_reg;
6314 MONO_ADD_INS (cfg->cbb, ins);
6317 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6321 #endif /* DISABLE_JIT */
6324 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6326 #ifdef USE_JUMP_TABLES
6327 return (MonoMethod*)regs [ARMREG_V5];
6330 guint32 *code_ptr = (guint32*)code;
6332 method = GUINT_TO_POINTER (code_ptr [1]);
6336 return (MonoMethod*)regs [ARMREG_V5];
6338 /* The IMT value is stored in the code stream right after the LDC instruction. */
6339 /* This is no longer true for the gsharedvt_in trampoline */
6341 if (!IS_LDR_PC (code_ptr [0])) {
6342 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6343 g_assert (IS_LDR_PC (code_ptr [0]));
6347 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6348 return (MonoMethod*)regs [ARMREG_V5];
6350 return (MonoMethod*) method;
6355 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6357 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6360 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6361 #define BASE_SIZE (6 * 4)
6362 #define BSEARCH_ENTRY_SIZE (4 * 4)
6363 #define CMP_SIZE (3 * 4)
6364 #define BRANCH_SIZE (1 * 4)
6365 #define CALL_SIZE (2 * 4)
6366 #define WMC_SIZE (8 * 4)
6367 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6369 #ifdef USE_JUMP_TABLES
6371 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6373 g_assert (base [index] == NULL);
6374 base [index] = value;
6377 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6379 if (arm_is_imm12 (jti * 4)) {
6380 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6382 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6383 if ((jti * 4) >> 16)
6384 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6385 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6391 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6393 guint32 delta = DISTANCE (target, code);
6395 g_assert (delta >= 0 && delta <= 0xFFF);
6396 *target = *target | delta;
6402 #ifdef ENABLE_WRONG_METHOD_CHECK
6404 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6406 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6412 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6413 gpointer fail_tramp)
6416 arminstr_t *code, *start;
6417 #ifdef USE_JUMP_TABLES
6420 gboolean large_offsets = FALSE;
6421 guint32 **constant_pool_starts;
6422 arminstr_t *vtable_target = NULL;
6423 int extra_space = 0;
6425 #ifdef ENABLE_WRONG_METHOD_CHECK
6430 #ifdef USE_JUMP_TABLES
6431 for (i = 0; i < count; ++i) {
6432 MonoIMTCheckItem *item = imt_entries [i];
6433 item->chunk_size += 4 * 16;
6434 if (!item->is_equals)
6435 imt_entries [item->check_target_idx]->compare_done = TRUE;
6436 size += item->chunk_size;
6439 constant_pool_starts = g_new0 (guint32*, count);
6441 for (i = 0; i < count; ++i) {
6442 MonoIMTCheckItem *item = imt_entries [i];
6443 if (item->is_equals) {
6444 gboolean fail_case = !item->check_target_idx && fail_tramp;
6446 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6447 item->chunk_size += 32;
6448 large_offsets = TRUE;
6451 if (item->check_target_idx || fail_case) {
6452 if (!item->compare_done || fail_case)
6453 item->chunk_size += CMP_SIZE;
6454 item->chunk_size += BRANCH_SIZE;
6456 #ifdef ENABLE_WRONG_METHOD_CHECK
6457 item->chunk_size += WMC_SIZE;
6461 item->chunk_size += 16;
6462 large_offsets = TRUE;
6464 item->chunk_size += CALL_SIZE;
6466 item->chunk_size += BSEARCH_ENTRY_SIZE;
6467 imt_entries [item->check_target_idx]->compare_done = TRUE;
6469 size += item->chunk_size;
6473 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6477 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6479 code = mono_domain_code_reserve (domain, size);
6483 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6484 for (i = 0; i < count; ++i) {
6485 MonoIMTCheckItem *item = imt_entries [i];
6486 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6490 #ifdef USE_JUMP_TABLES
6491 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6492 /* If jumptables we always pass the IMT method in R5 */
6493 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6494 #define VTABLE_JTI 0
6495 #define IMT_METHOD_OFFSET 0
6496 #define TARGET_CODE_OFFSET 1
6497 #define JUMP_CODE_OFFSET 2
6498 #define RECORDS_PER_ENTRY 3
6499 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6500 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6501 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6503 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6504 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6505 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6506 set_jumptable_element (jte, VTABLE_JTI, vtable);
6509 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6511 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6512 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6513 vtable_target = code;
6514 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6516 if (mono_use_llvm) {
6517 /* LLVM always passes the IMT method in R5 */
6518 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6520 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6521 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6522 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6526 for (i = 0; i < count; ++i) {
6527 MonoIMTCheckItem *item = imt_entries [i];
6528 #ifdef USE_JUMP_TABLES
6529 guint32 imt_method_jti = 0, target_code_jti = 0;
6531 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6533 gint32 vtable_offset;
6535 item->code_target = (guint8*)code;
6537 if (item->is_equals) {
6538 gboolean fail_case = !item->check_target_idx && fail_tramp;
6540 if (item->check_target_idx || fail_case) {
6541 if (!item->compare_done || fail_case) {
6542 #ifdef USE_JUMP_TABLES
6543 imt_method_jti = IMT_METHOD_JTI (i);
6544 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6547 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6549 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6551 #ifdef USE_JUMP_TABLES
6552 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6553 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6554 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6556 item->jmp_code = (guint8*)code;
6557 ARM_B_COND (code, ARMCOND_NE, 0);
6560 /*Enable the commented code to assert on wrong method*/
6561 #ifdef ENABLE_WRONG_METHOD_CHECK
6562 #ifdef USE_JUMP_TABLES
6563 imt_method_jti = IMT_METHOD_JTI (i);
6564 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6567 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6569 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6571 ARM_B_COND (code, ARMCOND_EQ, 0);
6573 /* Define this if your system is so bad that gdb is failing. */
6574 #ifdef BROKEN_DEV_ENV
6575 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6577 arm_patch (code - 1, mini_dump_bad_imt);
6581 arm_patch (cond, code);
6585 if (item->has_target_code) {
6586 /* Load target address */
6587 #ifdef USE_JUMP_TABLES
6588 target_code_jti = TARGET_CODE_JTI (i);
6589 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6590 /* Restore registers */
6591 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6593 ARM_BX (code, ARMREG_R1);
6594 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6596 target_code_ins = code;
6597 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6598 /* Save it to the fourth slot */
6599 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6600 /* Restore registers and branch */
6601 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6603 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6606 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6607 if (!arm_is_imm12 (vtable_offset)) {
6609 * We need to branch to a computed address but we don't have
6610 * a free register to store it, since IP must contain the
6611 * vtable address. So we push the two values to the stack, and
6612 * load them both using LDM.
6614 /* Compute target address */
6615 #ifdef USE_JUMP_TABLES
6616 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6617 if (vtable_offset >> 16)
6618 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6619 /* IP had vtable base. */
6620 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6621 /* Restore registers and branch */
6622 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6623 ARM_BX (code, ARMREG_IP);
6625 vtable_offset_ins = code;
6626 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6627 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6628 /* Save it to the fourth slot */
6629 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6630 /* Restore registers and branch */
6631 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6633 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6636 #ifdef USE_JUMP_TABLES
6637 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6638 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6639 ARM_BX (code, ARMREG_IP);
6641 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6643 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6644 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6650 #ifdef USE_JUMP_TABLES
6651 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6652 target_code_jti = TARGET_CODE_JTI (i);
6653 /* Load target address */
6654 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6655 /* Restore registers */
6656 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6658 ARM_BX (code, ARMREG_R1);
6659 set_jumptable_element (jte, target_code_jti, fail_tramp);
6661 arm_patch (item->jmp_code, (guchar*)code);
6663 target_code_ins = code;
6664 /* Load target address */
6665 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6666 /* Save it to the fourth slot */
6667 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6668 /* Restore registers and branch */
6669 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6671 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6673 item->jmp_code = NULL;
6676 #ifdef USE_JUMP_TABLES
6678 set_jumptable_element (jte, imt_method_jti, item->key);
6681 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6683 /*must emit after unconditional branch*/
6684 if (vtable_target) {
6685 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6686 item->chunk_size += 4;
6687 vtable_target = NULL;
6690 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6691 constant_pool_starts [i] = code;
6693 code += extra_space;
6698 #ifdef USE_JUMP_TABLES
6699 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6700 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6701 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6702 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6703 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6705 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6706 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6708 item->jmp_code = (guint8*)code;
6709 ARM_B_COND (code, ARMCOND_HS, 0);
6715 for (i = 0; i < count; ++i) {
6716 MonoIMTCheckItem *item = imt_entries [i];
6717 if (item->jmp_code) {
6718 if (item->check_target_idx)
6719 #ifdef USE_JUMP_TABLES
6720 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6722 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6725 if (i > 0 && item->is_equals) {
6727 #ifdef USE_JUMP_TABLES
6728 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6729 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6731 arminstr_t *space_start = constant_pool_starts [i];
6732 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6733 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6741 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6742 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6747 #ifndef USE_JUMP_TABLES
6748 g_free (constant_pool_starts);
6751 mono_arch_flush_icache ((guint8*)start, size);
6752 mono_stats.imt_thunks_size += code - start;
6754 g_assert (DISTANCE (start, code) <= size);
6761 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6763 return ctx->regs [reg];
6767 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6769 ctx->regs [reg] = val;
6773 * mono_arch_get_trampolines:
6775 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6779 mono_arch_get_trampolines (gboolean aot)
6781 return mono_arm_get_exception_trampolines (aot);
6785 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6787 * mono_arch_set_breakpoint:
6789 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6790 * The location should contain code emitted by OP_SEQ_POINT.
6793 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6796 guint32 native_offset = ip - (guint8*)ji->code_start;
6797 MonoDebugOptions *opt = mini_get_debug_options ();
6799 if (opt->soft_breakpoints) {
6800 g_assert (!ji->from_aot);
6802 ARM_BLX_REG (code, ARMREG_LR);
6803 mono_arch_flush_icache (code - 4, 4);
6804 } else if (ji->from_aot) {
6805 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6807 g_assert (native_offset % 4 == 0);
6808 g_assert (info->bp_addrs [native_offset / 4] == 0);
6809 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6811 int dreg = ARMREG_LR;
6813 /* Read from another trigger page */
6814 #ifdef USE_JUMP_TABLES
6815 gpointer *jte = mono_jumptable_add_entry ();
6816 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6817 jte [0] = bp_trigger_page;
6819 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6821 *(int*)code = (int)bp_trigger_page;
6824 ARM_LDR_IMM (code, dreg, dreg, 0);
6826 mono_arch_flush_icache (code - 16, 16);
6829 /* This is currently implemented by emitting an SWI instruction, which
6830 * qemu/linux seems to convert to a SIGILL.
6832 *(int*)code = (0xef << 24) | 8;
6834 mono_arch_flush_icache (code - 4, 4);
6840 * mono_arch_clear_breakpoint:
6842 * Clear the breakpoint at IP.
6845 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6847 MonoDebugOptions *opt = mini_get_debug_options ();
6851 if (opt->soft_breakpoints) {
6852 g_assert (!ji->from_aot);
6855 mono_arch_flush_icache (code - 4, 4);
6856 } else if (ji->from_aot) {
6857 guint32 native_offset = ip - (guint8*)ji->code_start;
6858 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6860 g_assert (native_offset % 4 == 0);
6861 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6862 info->bp_addrs [native_offset / 4] = 0;
6864 for (i = 0; i < 4; ++i)
6867 mono_arch_flush_icache (ip, code - ip);
6872 * mono_arch_start_single_stepping:
6874 * Start single stepping.
6877 mono_arch_start_single_stepping (void)
6879 if (ss_trigger_page)
6880 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6886 * mono_arch_stop_single_stepping:
6888 * Stop single stepping.
6891 mono_arch_stop_single_stepping (void)
6893 if (ss_trigger_page)
6894 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6900 #define DBG_SIGNAL SIGBUS
6902 #define DBG_SIGNAL SIGSEGV
6906 * mono_arch_is_single_step_event:
6908 * Return whenever the machine state in SIGCTX corresponds to a single
6912 mono_arch_is_single_step_event (void *info, void *sigctx)
6914 siginfo_t *sinfo = info;
6916 if (!ss_trigger_page)
6919 /* Sometimes the address is off by 4 */
6920 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6927 * mono_arch_is_breakpoint_event:
6929 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
6932 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6934 siginfo_t *sinfo = info;
6936 if (!ss_trigger_page)
6939 if (sinfo->si_signo == DBG_SIGNAL) {
6940 /* Sometimes the address is off by 4 */
6941 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6951 * mono_arch_skip_breakpoint:
6953 * See mini-amd64.c for docs.
6956 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
6958 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6962 * mono_arch_skip_single_step:
6964 * See mini-amd64.c for docs.
6967 mono_arch_skip_single_step (MonoContext *ctx)
6969 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6972 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
6975 * mono_arch_get_seq_point_info:
6977 * See mini-amd64.c for docs.
6980 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6985 // FIXME: Add a free function
6987 mono_domain_lock (domain);
6988 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
6990 mono_domain_unlock (domain);
6993 ji = mono_jit_info_table_find (domain, (char*)code);
6996 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
6998 info->ss_trigger_page = ss_trigger_page;
6999 info->bp_trigger_page = bp_trigger_page;
7001 mono_domain_lock (domain);
7002 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7004 mono_domain_unlock (domain);
7011 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7013 ext->lmf.previous_lmf = prev_lmf;
7014 /* Mark that this is a MonoLMFExt */
7015 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7016 ext->lmf.sp = (gssize)ext;
7020 * mono_arch_set_target:
7022 * Set the target architecture the JIT backend should generate code for, in the form
7023 * of a GNU target triplet. Only used in AOT mode.
7026 mono_arch_set_target (char *mtriple)
7028 /* The GNU target triple format is not very well documented */
7029 if (strstr (mtriple, "armv7")) {
7030 v5_supported = TRUE;
7031 v6_supported = TRUE;
7032 v7_supported = TRUE;
7034 if (strstr (mtriple, "armv6")) {
7035 v5_supported = TRUE;
7036 v6_supported = TRUE;
7038 if (strstr (mtriple, "armv7s")) {
7039 v7s_supported = TRUE;
7041 if (strstr (mtriple, "thumbv7s")) {
7042 v5_supported = TRUE;
7043 v6_supported = TRUE;
7044 v7_supported = TRUE;
7045 v7s_supported = TRUE;
7046 thumb_supported = TRUE;
7047 thumb2_supported = TRUE;
7049 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7050 v5_supported = TRUE;
7051 v6_supported = TRUE;
7052 thumb_supported = TRUE;
7055 if (strstr (mtriple, "gnueabi"))
7056 eabi_supported = TRUE;
7059 #if defined(ENABLE_GSHAREDVT)
7061 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7063 #endif /* !MONOTOUCH */