2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 /* This mutex protects architecture specific caches */
90 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
91 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
92 static CRITICAL_SECTION mini_arch_mutex;
94 static gboolean v5_supported = FALSE;
95 static gboolean v6_supported = FALSE;
96 static gboolean v7_supported = FALSE;
97 static gboolean v7s_supported = FALSE;
98 static gboolean thumb_supported = FALSE;
99 static gboolean thumb2_supported = FALSE;
101 * Whenever to use the ARM EABI
103 static gboolean eabi_supported = FALSE;
106 * Whenever to use the iphone ABI extensions:
107 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
108 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
109 * This is required for debugging/profiling tools to work, but it has some overhead so it should
110 * only be turned on in debug builds.
112 static gboolean iphone_abi = FALSE;
115 * The FPU we are generating code for. This is NOT runtime configurable right now,
116 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
118 static MonoArmFPU arm_fpu;
120 #if defined(ARM_FPU_VFP_HARD)
122 * On armhf, d0-d7 are used for argument passing and d8-d15
123 * must be preserved across calls, which leaves us no room
124 * for scratch registers. So we use d14-d15 but back up their
125 * previous contents to a stack slot before using them - see
126 * mono_arm_emit_vfp_scratch_save/_restore ().
128 static int vfp_scratch1 = ARM_VFP_D14;
129 static int vfp_scratch2 = ARM_VFP_D15;
132 * On armel, d0-d7 do not need to be preserved, so we can
133 * freely make use of them as scratch registers.
135 static int vfp_scratch1 = ARM_VFP_D0;
136 static int vfp_scratch2 = ARM_VFP_D1;
141 static volatile int ss_trigger_var = 0;
143 static gpointer single_step_func_wrapper;
144 static gpointer breakpoint_func_wrapper;
147 * The code generated for sequence points reads from this location, which is
148 * made read-only when single stepping is enabled.
150 static gpointer ss_trigger_page;
152 /* Enabled breakpoints read from this trigger page */
153 static gpointer bp_trigger_page;
155 /* Structure used by the sequence points in AOTed code */
157 gpointer ss_trigger_page;
158 gpointer bp_trigger_page;
159 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
164 * floating point support: on ARM it is a mess, there are at least 3
165 * different setups, each of which binary incompat with the other.
166 * 1) FPA: old and ugly, but unfortunately what current distros use
167 * the double binary format has the two words swapped. 8 double registers.
168 * Implemented usually by kernel emulation.
169 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
170 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
171 * 3) VFP: the new and actually sensible and useful FP support. Implemented
172 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
174 * We do not care about FPA. We will support soft float and VFP.
176 int mono_exc_esp_offset = 0;
178 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
179 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
180 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
182 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
183 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
184 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
186 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
187 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
188 //#define DEBUG_IMT 0
190 /* A variant of ARM_LDR_IMM which can handle large offsets */
191 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
192 if (arm_is_imm12 ((offset))) { \
193 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
195 g_assert ((scratch_reg) != (basereg)); \
196 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
197 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
201 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
202 if (arm_is_imm12 ((offset))) { \
203 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
205 g_assert ((scratch_reg) != (basereg)); \
206 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
207 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
211 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
214 mono_arch_regname (int reg)
216 static const char * rnames[] = {
217 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
218 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
219 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
222 if (reg >= 0 && reg < 16)
228 mono_arch_fregname (int reg)
230 static const char * rnames[] = {
231 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
232 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
233 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
234 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
235 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
236 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
239 if (reg >= 0 && reg < 32)
247 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
254 g_assert (dreg != sreg);
255 code = mono_arm_emit_load_imm (code, dreg, imm);
256 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
261 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
263 /* we can use r0-r3, since this is called only for incoming args on the stack */
264 if (size > sizeof (gpointer) * 4) {
266 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
267 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
268 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
269 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
270 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
271 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
272 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
273 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
274 ARM_B_COND (code, ARMCOND_NE, 0);
275 arm_patch (code - 4, start_loop);
278 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
279 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
281 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
282 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
288 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
289 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
290 doffset = soffset = 0;
292 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
293 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
299 g_assert (size == 0);
304 emit_call_reg (guint8 *code, int reg)
307 ARM_BLX_REG (code, reg);
309 #ifdef USE_JUMP_TABLES
310 g_assert_not_reached ();
312 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
316 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
322 emit_call_seq (MonoCompile *cfg, guint8 *code)
324 #ifdef USE_JUMP_TABLES
325 code = mono_arm_patchable_bl (code, ARMCOND_AL);
327 if (cfg->method->dynamic) {
328 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
330 *(gpointer*)code = NULL;
332 code = emit_call_reg (code, ARMREG_IP);
341 mono_arm_patchable_b (guint8 *code, int cond)
343 #ifdef USE_JUMP_TABLES
346 jte = mono_jumptable_add_entry ();
347 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
348 ARM_BX_COND (code, cond, ARMREG_IP);
350 ARM_B_COND (code, cond, 0);
356 mono_arm_patchable_bl (guint8 *code, int cond)
358 #ifdef USE_JUMP_TABLES
361 jte = mono_jumptable_add_entry ();
362 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
363 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
365 ARM_BL_COND (code, cond, 0);
370 #ifdef USE_JUMP_TABLES
372 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
374 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
375 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
380 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
382 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
383 ARM_LDR_IMM (code, reg, reg, 0);
389 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
391 switch (ins->opcode) {
394 case OP_FCALL_MEMBASE:
396 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
398 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
400 ARM_FMSR (code, ins->dreg, ARMREG_R0);
401 ARM_CVTS (code, ins->dreg, ins->dreg);
405 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
407 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
426 gboolean get_lmf_fast = FALSE;
429 #ifdef HAVE_AEABI_READ_TP
430 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
432 if (lmf_addr_tls_offset != -1) {
435 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
436 (gpointer)"__aeabi_read_tp");
437 code = emit_call_seq (cfg, code);
439 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
445 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
448 /* Inline mono_get_lmf_addr () */
449 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
451 /* Load mono_jit_tls_id */
453 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
454 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
456 *(gpointer*)code = NULL;
458 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
459 /* call pthread_getspecific () */
460 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
461 (gpointer)"pthread_getspecific");
462 code = emit_call_seq (cfg, code);
463 /* lmf_addr = &jit_tls->lmf */
464 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
465 g_assert (arm_is_imm8 (lmf_offset));
466 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
474 (gpointer)"mono_get_lmf_addr");
475 code = emit_call_seq (cfg, code);
477 /* we build the MonoLMF structure on the stack - see mini-arm.h */
478 /* lmf_offset is the offset from the previous stack pointer,
479 * alloc_size is the total stack space allocated, so the offset
480 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
481 * The pointer to the struct is put in r1 (new_lmf).
482 * ip is used as scratch
483 * The callee-saved registers are already in the MonoLMF structure
485 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
486 /* r0 is the result from mono_get_lmf_addr () */
487 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
488 /* new_lmf->previous_lmf = *lmf_addr */
489 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
491 /* *(lmf_addr) = r1 */
492 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
493 /* Skip method (only needed for trampoline LMF frames) */
494 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
495 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
496 /* save the current IP */
497 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
498 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
500 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
501 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
512 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
516 for (list = inst->float_args; list; list = list->next) {
517 FloatArgData *fad = list->data;
518 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
519 gboolean imm = arm_is_fpimm8 (var->inst_offset);
521 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
527 if (*offset + *max_len > cfg->code_size) {
528 cfg->code_size += *max_len;
529 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
531 code = cfg->native_code + *offset;
535 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
536 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
538 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
540 *offset = code - cfg->native_code;
547 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
551 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
553 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
556 if (!arm_is_fpimm8 (inst->inst_offset)) {
557 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
558 ARM_FSTD (code, reg, ARMREG_LR, 0);
560 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
567 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FLDD (code, reg, ARMREG_LR, 0);
580 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
589 * Emit code to pop an LMF structure from the LMF stack.
592 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
596 if (lmf_offset < 32) {
597 basereg = cfg->frame_reg;
602 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
605 /* ip = previous_lmf */
606 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
608 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
609 /* *(lmf_addr) = previous_lmf */
610 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
615 #endif /* #ifndef DISABLE_JIT */
618 * mono_arch_get_argument_info:
619 * @csig: a method signature
620 * @param_count: the number of parameters to consider
621 * @arg_info: an array to store the result infos
623 * Gathers information on parameters such as size, alignment and
624 * padding. arg_info should be large enought to hold param_count + 1 entries.
626 * Returns the size of the activation frame.
629 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
631 int k, frame_size = 0;
632 guint32 size, align, pad;
636 t = mini_type_get_underlying_type (gsctx, csig->ret);
637 if (MONO_TYPE_ISSTRUCT (t)) {
638 frame_size += sizeof (gpointer);
642 arg_info [0].offset = offset;
645 frame_size += sizeof (gpointer);
649 arg_info [0].size = frame_size;
651 for (k = 0; k < param_count; k++) {
652 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
654 /* ignore alignment for now */
657 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
658 arg_info [k].pad = pad;
660 arg_info [k + 1].pad = 0;
661 arg_info [k + 1].size = size;
663 arg_info [k + 1].offset = offset;
667 align = MONO_ARCH_FRAME_ALIGNMENT;
668 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
669 arg_info [k].pad = pad;
674 #define MAX_ARCH_DELEGATE_PARAMS 3
677 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
679 guint8 *code, *start;
682 start = code = mono_global_codeman_reserve (12);
684 /* Replace the this argument with the target */
685 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
686 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
687 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
689 g_assert ((code - start) <= 12);
691 mono_arch_flush_icache (start, 12);
695 size = 8 + param_count * 4;
696 start = code = mono_global_codeman_reserve (size);
698 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
699 /* slide down the arguments */
700 for (i = 0; i < param_count; ++i) {
701 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
703 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
705 g_assert ((code - start) <= size);
707 mono_arch_flush_icache (start, size);
711 *code_size = code - start;
717 * mono_arch_get_delegate_invoke_impls:
719 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
723 mono_arch_get_delegate_invoke_impls (void)
731 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
732 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
734 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
735 code = get_delegate_invoke_impl (FALSE, i, &code_len);
736 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
737 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
745 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
747 guint8 *code, *start;
750 /* FIXME: Support more cases */
751 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
752 if (MONO_TYPE_ISSTRUCT (sig_ret))
756 static guint8* cached = NULL;
757 mono_mini_arch_lock ();
759 mono_mini_arch_unlock ();
764 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
766 start = get_delegate_invoke_impl (TRUE, 0, NULL);
768 mono_mini_arch_unlock ();
771 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
774 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
776 for (i = 0; i < sig->param_count; ++i)
777 if (!mono_is_regsize_var (sig->params [i]))
780 mono_mini_arch_lock ();
781 code = cache [sig->param_count];
783 mono_mini_arch_unlock ();
788 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
789 start = mono_aot_get_trampoline (name);
792 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
794 cache [sig->param_count] = start;
795 mono_mini_arch_unlock ();
803 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
805 return (gpointer)regs [ARMREG_R0];
809 * Initialize the cpu to execute managed code.
812 mono_arch_cpu_init (void)
814 #if defined(__APPLE__)
817 i8_align = __alignof__ (gint64);
822 create_function_wrapper (gpointer function)
824 guint8 *start, *code;
826 start = code = mono_global_codeman_reserve (96);
829 * Construct the MonoContext structure on the stack.
832 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
834 /* save ip, lr and pc into their correspodings ctx.regs slots. */
835 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
836 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
837 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
839 /* save r0..r10 and fp */
840 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
841 ARM_STM (code, ARMREG_IP, 0x0fff);
843 /* now we can update fp. */
844 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
846 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
847 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
848 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
849 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
851 /* make ctx.eip hold the address of the call. */
852 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
853 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
855 /* r0 now points to the MonoContext */
856 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
859 #ifdef USE_JUMP_TABLES
861 gpointer *jte = mono_jumptable_add_entry ();
862 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
866 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
868 *(gpointer*)code = function;
871 ARM_BLX_REG (code, ARMREG_IP);
873 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
874 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
875 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
876 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
878 /* make ip point to the regs array, then restore everything, including pc. */
879 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
880 ARM_LDM (code, ARMREG_IP, 0xffff);
882 mono_arch_flush_icache (start, code - start);
888 * Initialize architecture specific code.
891 mono_arch_init (void)
893 const char *cpu_arch;
895 InitializeCriticalSection (&mini_arch_mutex);
896 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
897 if (mini_get_debug_options ()->soft_breakpoints) {
898 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
899 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
904 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
905 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
906 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
909 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
910 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
911 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
912 #if defined(ENABLE_GSHAREDVT)
913 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
916 #if defined(__ARM_EABI__)
917 eabi_supported = TRUE;
920 #if defined(ARM_FPU_VFP_HARD)
921 arm_fpu = MONO_ARM_FPU_VFP_HARD;
923 arm_fpu = MONO_ARM_FPU_VFP;
925 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
926 /* If we're compiling with a soft float fallback and it
927 turns out that no VFP unit is available, we need to
928 switch to soft float. We don't do this for iOS, since
929 iOS devices always have a VFP unit. */
930 if (!mono_hwcap_arm_has_vfp)
931 arm_fpu = MONO_ARM_FPU_NONE;
935 v5_supported = mono_hwcap_arm_is_v5;
936 v6_supported = mono_hwcap_arm_is_v6;
937 v7_supported = mono_hwcap_arm_is_v7;
938 v7s_supported = mono_hwcap_arm_is_v7s;
940 #if defined(__APPLE__)
941 /* iOS is special-cased here because we don't yet
942 have a way to properly detect CPU features on it. */
943 thumb_supported = TRUE;
946 thumb_supported = mono_hwcap_arm_has_thumb;
947 thumb2_supported = mono_hwcap_arm_has_thumb2;
950 /* Format: armv(5|6|7[s])[-thumb[2]] */
951 cpu_arch = g_getenv ("MONO_CPU_ARCH");
953 /* Do this here so it overrides any detection. */
955 if (strncmp (cpu_arch, "armv", 4) == 0) {
956 v5_supported = cpu_arch [4] >= '5';
957 v6_supported = cpu_arch [4] >= '6';
958 v7_supported = cpu_arch [4] >= '7';
959 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
962 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
963 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
968 * Cleanup architecture specific code.
971 mono_arch_cleanup (void)
976 * This function returns the optimizations supported on this cpu.
979 mono_arch_cpu_optimizations (guint32 *exclude_mask)
981 /* no arm-specific optimizations yet */
987 * This function test for all SIMD functions supported.
989 * Returns a bitmask corresponding to all supported versions.
993 mono_arch_cpu_enumerate_simd_versions (void)
995 /* SIMD is currently unimplemented */
1003 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1005 if (v7s_supported) {
1019 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1021 mono_arch_is_soft_float (void)
1023 return arm_fpu == MONO_ARM_FPU_NONE;
1028 mono_arm_is_hard_float (void)
1030 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1034 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1037 t = mini_type_get_underlying_type (gsctx, t);
1044 case MONO_TYPE_FNPTR:
1046 case MONO_TYPE_OBJECT:
1047 case MONO_TYPE_STRING:
1048 case MONO_TYPE_CLASS:
1049 case MONO_TYPE_SZARRAY:
1050 case MONO_TYPE_ARRAY:
1052 case MONO_TYPE_GENERICINST:
1053 if (!mono_type_generic_inst_is_valuetype (t))
1056 case MONO_TYPE_VALUETYPE:
1063 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1068 for (i = 0; i < cfg->num_varinfo; i++) {
1069 MonoInst *ins = cfg->varinfo [i];
1070 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1073 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1076 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1079 /* we can only allocate 32 bit values */
1080 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1081 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1082 g_assert (i == vmv->idx);
1083 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1090 #define USE_EXTRA_TEMPS 0
1093 mono_arch_get_global_int_regs (MonoCompile *cfg)
1097 mono_arch_compute_omit_fp (cfg);
1100 * FIXME: Interface calls might go through a static rgctx trampoline which
1101 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1104 if (cfg->flags & MONO_CFG_HAS_CALLS)
1105 cfg->uses_rgctx_reg = TRUE;
1107 if (cfg->arch.omit_fp)
1108 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1109 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1110 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1111 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1113 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1117 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1118 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1119 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1120 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1121 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1127 * mono_arch_regalloc_cost:
1129 * Return the cost, in number of memory references, of the action of
1130 * allocating the variable VMV into a register during global register
1134 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1140 #endif /* #ifndef DISABLE_JIT */
1142 #ifndef __GNUC_PREREQ
1143 #define __GNUC_PREREQ(maj, min) (0)
1147 mono_arch_flush_icache (guint8 *code, gint size)
1149 #if defined(__native_client__)
1150 // For Native Client we don't have to flush i-cache here,
1151 // as it's being done by dyncode interface.
1154 #ifdef MONO_CROSS_COMPILE
1156 sys_icache_invalidate (code, size);
1157 #elif __GNUC_PREREQ(4, 1)
1158 __clear_cache (code, code + size);
1159 #elif defined(PLATFORM_ANDROID)
1160 const int syscall = 0xf0002;
1168 : "r" (code), "r" (code + size), "r" (syscall)
1169 : "r0", "r1", "r7", "r2"
1172 __asm __volatile ("mov r0, %0\n"
1175 "swi 0x9f0002 @ sys_cacheflush"
1177 : "r" (code), "r" (code + size), "r" (0)
1178 : "r0", "r1", "r3" );
1180 #endif /* !__native_client__ */
1191 RegTypeStructByAddr,
1192 /* gsharedvt argument passed by addr in greg */
1193 RegTypeGSharedVtInReg,
1194 /* gsharedvt argument passed by addr on stack */
1195 RegTypeGSharedVtOnStack,
1200 guint16 vtsize; /* in param area */
1204 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1209 guint32 stack_usage;
1210 gboolean vtype_retaddr;
1211 /* The index of the vret arg in the argument list */
1221 /*#define __alignof__(a) sizeof(a)*/
1222 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1225 #define PARAM_REGS 4
1228 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1231 if (*gr > ARMREG_R3) {
1233 ainfo->offset = *stack_size;
1234 ainfo->reg = ARMREG_SP; /* in the caller */
1235 ainfo->storage = RegTypeBase;
1238 ainfo->storage = RegTypeGeneral;
1245 split = i8_align == 4;
1250 if (*gr == ARMREG_R3 && split) {
1251 /* first word in r3 and the second on the stack */
1252 ainfo->offset = *stack_size;
1253 ainfo->reg = ARMREG_SP; /* in the caller */
1254 ainfo->storage = RegTypeBaseGen;
1256 } else if (*gr >= ARMREG_R3) {
1257 if (eabi_supported) {
1258 /* darwin aligns longs to 4 byte only */
1259 if (i8_align == 8) {
1264 ainfo->offset = *stack_size;
1265 ainfo->reg = ARMREG_SP; /* in the caller */
1266 ainfo->storage = RegTypeBase;
1269 if (eabi_supported) {
1270 if (i8_align == 8 && ((*gr) & 1))
1273 ainfo->storage = RegTypeIRegPair;
1282 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1285 * If we're calling a function like this:
1287 * void foo(float a, double b, float c)
1289 * We pass a in s0 and b in d1. That leaves us
1290 * with s1 being unused. The armhf ABI recognizes
1291 * this and requires register assignment to then
1292 * use that for the next single-precision arg,
1293 * i.e. c in this example. So float_spare either
1294 * tells us which reg to use for the next single-
1295 * precision arg, or it's -1, meaning use *fpr.
1297 * Note that even though most of the JIT speaks
1298 * double-precision, fpr represents single-
1299 * precision registers.
1301 * See parts 5.5 and 6.1.2 of the AAPCS for how
1305 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1306 ainfo->storage = RegTypeFP;
1310 * If we're passing a double-precision value
1311 * and *fpr is odd (e.g. it's s1, s3, ...)
1312 * we need to use the next even register. So
1313 * we mark the current *fpr as a spare that
1314 * can be used for the next single-precision
1318 *float_spare = *fpr;
1323 * At this point, we have an even register
1324 * so we assign that and move along.
1328 } else if (*float_spare >= 0) {
1330 * We're passing a single-precision value
1331 * and it looks like a spare single-
1332 * precision register is available. Let's
1336 ainfo->reg = *float_spare;
1340 * If we hit this branch, we're passing a
1341 * single-precision value and we can simply
1342 * use the next available register.
1350 * We've exhausted available floating point
1351 * regs, so pass the rest on the stack.
1359 ainfo->offset = *stack_size;
1360 ainfo->reg = ARMREG_SP;
1361 ainfo->storage = RegTypeBase;
1368 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1370 guint i, gr, fpr, pstart;
1372 int n = sig->hasthis + sig->param_count;
1373 MonoType *simpletype;
1374 guint32 stack_size = 0;
1376 gboolean is_pinvoke = sig->pinvoke;
1380 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1382 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1389 t = mini_type_get_underlying_type (gsctx, sig->ret);
1390 if (MONO_TYPE_ISSTRUCT (t)) {
1393 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1394 cinfo->ret.storage = RegTypeStructByVal;
1396 cinfo->vtype_retaddr = TRUE;
1398 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1399 cinfo->vtype_retaddr = TRUE;
1405 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1406 * the first argument, allowing 'this' to be always passed in the first arg reg.
1407 * Also do this if the first argument is a reference type, since virtual calls
1408 * are sometimes made using calli without sig->hasthis set, like in the delegate
1411 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1413 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1415 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1419 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1420 cinfo->vret_arg_index = 1;
1424 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1428 if (cinfo->vtype_retaddr)
1429 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1432 DEBUG(printf("params: %d\n", sig->param_count));
1433 for (i = pstart; i < sig->param_count; ++i) {
1434 ArgInfo *ainfo = &cinfo->args [n];
1436 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1437 /* Prevent implicit arguments and sig_cookie from
1438 being passed in registers */
1441 /* Emit the signature cookie just before the implicit arguments */
1442 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1444 DEBUG(printf("param %d: ", i));
1445 if (sig->params [i]->byref) {
1446 DEBUG(printf("byref\n"));
1447 add_general (&gr, &stack_size, ainfo, TRUE);
1451 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1452 switch (simpletype->type) {
1453 case MONO_TYPE_BOOLEAN:
1456 cinfo->args [n].size = 1;
1457 add_general (&gr, &stack_size, ainfo, TRUE);
1460 case MONO_TYPE_CHAR:
1463 cinfo->args [n].size = 2;
1464 add_general (&gr, &stack_size, ainfo, TRUE);
1469 cinfo->args [n].size = 4;
1470 add_general (&gr, &stack_size, ainfo, TRUE);
1476 case MONO_TYPE_FNPTR:
1477 case MONO_TYPE_CLASS:
1478 case MONO_TYPE_OBJECT:
1479 case MONO_TYPE_STRING:
1480 case MONO_TYPE_SZARRAY:
1481 case MONO_TYPE_ARRAY:
1482 cinfo->args [n].size = sizeof (gpointer);
1483 add_general (&gr, &stack_size, ainfo, TRUE);
1486 case MONO_TYPE_GENERICINST:
1487 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1488 cinfo->args [n].size = sizeof (gpointer);
1489 add_general (&gr, &stack_size, ainfo, TRUE);
1493 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1494 /* gsharedvt arguments are passed by ref */
1495 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1496 add_general (&gr, &stack_size, ainfo, TRUE);
1497 switch (ainfo->storage) {
1498 case RegTypeGeneral:
1499 ainfo->storage = RegTypeGSharedVtInReg;
1502 ainfo->storage = RegTypeGSharedVtOnStack;
1505 g_assert_not_reached ();
1511 case MONO_TYPE_TYPEDBYREF:
1512 case MONO_TYPE_VALUETYPE: {
1518 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1519 size = sizeof (MonoTypedRef);
1520 align = sizeof (gpointer);
1522 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1524 size = mono_class_native_size (klass, &align);
1526 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1528 DEBUG(printf ("load %d bytes struct\n", size));
1531 align_size += (sizeof (gpointer) - 1);
1532 align_size &= ~(sizeof (gpointer) - 1);
1533 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1534 ainfo->storage = RegTypeStructByVal;
1535 ainfo->struct_size = size;
1536 /* FIXME: align stack_size if needed */
1537 if (eabi_supported) {
1538 if (align >= 8 && (gr & 1))
1541 if (gr > ARMREG_R3) {
1543 ainfo->vtsize = nwords;
1545 int rest = ARMREG_R3 - gr + 1;
1546 int n_in_regs = rest >= nwords? nwords: rest;
1548 ainfo->size = n_in_regs;
1549 ainfo->vtsize = nwords - n_in_regs;
1552 nwords -= n_in_regs;
1554 if (sig->call_convention == MONO_CALL_VARARG)
1555 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1556 stack_size = ALIGN_TO (stack_size, align);
1557 ainfo->offset = stack_size;
1558 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1559 stack_size += nwords * sizeof (gpointer);
1566 add_general (&gr, &stack_size, ainfo, FALSE);
1573 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1575 add_general (&gr, &stack_size, ainfo, TRUE);
1583 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1585 add_general (&gr, &stack_size, ainfo, FALSE);
1590 case MONO_TYPE_MVAR:
1591 /* gsharedvt arguments are passed by ref */
1592 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1593 add_general (&gr, &stack_size, ainfo, TRUE);
1594 switch (ainfo->storage) {
1595 case RegTypeGeneral:
1596 ainfo->storage = RegTypeGSharedVtInReg;
1599 ainfo->storage = RegTypeGSharedVtOnStack;
1602 g_assert_not_reached ();
1607 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1611 /* Handle the case where there are no implicit arguments */
1612 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1613 /* Prevent implicit arguments and sig_cookie from
1614 being passed in registers */
1617 /* Emit the signature cookie just before the implicit arguments */
1618 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1622 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1623 switch (simpletype->type) {
1624 case MONO_TYPE_BOOLEAN:
1629 case MONO_TYPE_CHAR:
1635 case MONO_TYPE_FNPTR:
1636 case MONO_TYPE_CLASS:
1637 case MONO_TYPE_OBJECT:
1638 case MONO_TYPE_SZARRAY:
1639 case MONO_TYPE_ARRAY:
1640 case MONO_TYPE_STRING:
1641 cinfo->ret.storage = RegTypeGeneral;
1642 cinfo->ret.reg = ARMREG_R0;
1646 cinfo->ret.storage = RegTypeIRegPair;
1647 cinfo->ret.reg = ARMREG_R0;
1651 cinfo->ret.storage = RegTypeFP;
1653 if (IS_HARD_FLOAT) {
1654 cinfo->ret.reg = ARM_VFP_F0;
1656 cinfo->ret.reg = ARMREG_R0;
1660 case MONO_TYPE_GENERICINST:
1661 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1662 cinfo->ret.storage = RegTypeGeneral;
1663 cinfo->ret.reg = ARMREG_R0;
1666 // FIXME: Only for variable types
1667 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1668 cinfo->ret.storage = RegTypeStructByAddr;
1669 g_assert (cinfo->vtype_retaddr);
1673 case MONO_TYPE_VALUETYPE:
1674 case MONO_TYPE_TYPEDBYREF:
1675 if (cinfo->ret.storage != RegTypeStructByVal)
1676 cinfo->ret.storage = RegTypeStructByAddr;
1679 case MONO_TYPE_MVAR:
1680 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1681 cinfo->ret.storage = RegTypeStructByAddr;
1682 g_assert (cinfo->vtype_retaddr);
1684 case MONO_TYPE_VOID:
1687 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1691 /* align stack size to 8 */
1692 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1693 stack_size = (stack_size + 7) & ~7;
1695 cinfo->stack_usage = stack_size;
1701 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1703 MonoType *callee_ret;
1707 if (cfg->compile_aot && !cfg->full_aot)
1708 /* OP_TAILCALL doesn't work with AOT */
1711 c1 = get_call_info (NULL, NULL, caller_sig);
1712 c2 = get_call_info (NULL, NULL, callee_sig);
1715 * Tail calls with more callee stack usage than the caller cannot be supported, since
1716 * the extra stack space would be left on the stack after the tail call.
1718 res = c1->stack_usage >= c2->stack_usage;
1719 callee_ret = mini_replace_type (callee_sig->ret);
1720 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1721 /* An address on the callee's stack is passed as the first argument */
1724 if (c2->stack_usage > 16 * 4)
1736 debug_omit_fp (void)
1739 return mono_debug_count ();
1746 * mono_arch_compute_omit_fp:
1748 * Determine whenever the frame pointer can be eliminated.
1751 mono_arch_compute_omit_fp (MonoCompile *cfg)
1753 MonoMethodSignature *sig;
1754 MonoMethodHeader *header;
1758 if (cfg->arch.omit_fp_computed)
1761 header = cfg->header;
1763 sig = mono_method_signature (cfg->method);
1765 if (!cfg->arch.cinfo)
1766 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1767 cinfo = cfg->arch.cinfo;
1770 * FIXME: Remove some of the restrictions.
1772 cfg->arch.omit_fp = TRUE;
1773 cfg->arch.omit_fp_computed = TRUE;
1775 if (cfg->disable_omit_fp)
1776 cfg->arch.omit_fp = FALSE;
1777 if (!debug_omit_fp ())
1778 cfg->arch.omit_fp = FALSE;
1780 if (cfg->method->save_lmf)
1781 cfg->arch.omit_fp = FALSE;
1783 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1784 cfg->arch.omit_fp = FALSE;
1785 if (header->num_clauses)
1786 cfg->arch.omit_fp = FALSE;
1787 if (cfg->param_area)
1788 cfg->arch.omit_fp = FALSE;
1789 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1790 cfg->arch.omit_fp = FALSE;
1791 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1792 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1793 cfg->arch.omit_fp = FALSE;
1794 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1795 ArgInfo *ainfo = &cinfo->args [i];
1797 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1799 * The stack offset can only be determined when the frame
1802 cfg->arch.omit_fp = FALSE;
1807 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1808 MonoInst *ins = cfg->varinfo [i];
1811 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1816 * Set var information according to the calling convention. arm version.
1817 * The locals var stuff should most likely be split in another method.
1820 mono_arch_allocate_vars (MonoCompile *cfg)
1822 MonoMethodSignature *sig;
1823 MonoMethodHeader *header;
1826 int i, offset, size, align, curinst;
1830 sig = mono_method_signature (cfg->method);
1832 if (!cfg->arch.cinfo)
1833 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1834 cinfo = cfg->arch.cinfo;
1835 sig_ret = mini_replace_type (sig->ret);
1837 mono_arch_compute_omit_fp (cfg);
1839 if (cfg->arch.omit_fp)
1840 cfg->frame_reg = ARMREG_SP;
1842 cfg->frame_reg = ARMREG_FP;
1844 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1846 /* allow room for the vararg method args: void* and long/double */
1847 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1848 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1850 header = cfg->header;
1852 /* See mono_arch_get_global_int_regs () */
1853 if (cfg->flags & MONO_CFG_HAS_CALLS)
1854 cfg->uses_rgctx_reg = TRUE;
1856 if (cfg->frame_reg != ARMREG_SP)
1857 cfg->used_int_regs |= 1 << cfg->frame_reg;
1859 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1860 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1861 cfg->used_int_regs |= (1 << ARMREG_V5);
1865 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1866 if (sig_ret->type != MONO_TYPE_VOID) {
1867 cfg->ret->opcode = OP_REGVAR;
1868 cfg->ret->inst_c0 = ARMREG_R0;
1871 /* local vars are at a positive offset from the stack pointer */
1873 * also note that if the function uses alloca, we use FP
1874 * to point at the local variables.
1876 offset = 0; /* linkage area */
1877 /* align the offset to 16 bytes: not sure this is needed here */
1879 //offset &= ~(8 - 1);
1881 /* add parameter area size for called functions */
1882 offset += cfg->param_area;
1885 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1888 /* allow room to save the return value */
1889 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1892 /* the MonoLMF structure is stored just below the stack pointer */
1893 if (cinfo->ret.storage == RegTypeStructByVal) {
1894 cfg->ret->opcode = OP_REGOFFSET;
1895 cfg->ret->inst_basereg = cfg->frame_reg;
1896 offset += sizeof (gpointer) - 1;
1897 offset &= ~(sizeof (gpointer) - 1);
1898 cfg->ret->inst_offset = - offset;
1899 offset += sizeof(gpointer);
1900 } else if (cinfo->vtype_retaddr) {
1901 ins = cfg->vret_addr;
1902 offset += sizeof(gpointer) - 1;
1903 offset &= ~(sizeof(gpointer) - 1);
1904 ins->inst_offset = offset;
1905 ins->opcode = OP_REGOFFSET;
1906 ins->inst_basereg = cfg->frame_reg;
1907 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1908 printf ("vret_addr =");
1909 mono_print_ins (cfg->vret_addr);
1911 offset += sizeof(gpointer);
1914 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1915 if (cfg->arch.seq_point_info_var) {
1918 ins = cfg->arch.seq_point_info_var;
1922 offset += align - 1;
1923 offset &= ~(align - 1);
1924 ins->opcode = OP_REGOFFSET;
1925 ins->inst_basereg = cfg->frame_reg;
1926 ins->inst_offset = offset;
1929 ins = cfg->arch.ss_trigger_page_var;
1932 offset += align - 1;
1933 offset &= ~(align - 1);
1934 ins->opcode = OP_REGOFFSET;
1935 ins->inst_basereg = cfg->frame_reg;
1936 ins->inst_offset = offset;
1940 if (cfg->arch.seq_point_read_var) {
1943 ins = cfg->arch.seq_point_read_var;
1947 offset += align - 1;
1948 offset &= ~(align - 1);
1949 ins->opcode = OP_REGOFFSET;
1950 ins->inst_basereg = cfg->frame_reg;
1951 ins->inst_offset = offset;
1954 ins = cfg->arch.seq_point_ss_method_var;
1957 offset += align - 1;
1958 offset &= ~(align - 1);
1959 ins->opcode = OP_REGOFFSET;
1960 ins->inst_basereg = cfg->frame_reg;
1961 ins->inst_offset = offset;
1964 ins = cfg->arch.seq_point_bp_method_var;
1967 offset += align - 1;
1968 offset &= ~(align - 1);
1969 ins->opcode = OP_REGOFFSET;
1970 ins->inst_basereg = cfg->frame_reg;
1971 ins->inst_offset = offset;
1975 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
1976 /* Allocate a temporary used by the atomic ops */
1980 /* Allocate a local slot to hold the sig cookie address */
1981 offset += align - 1;
1982 offset &= ~(align - 1);
1983 cfg->arch.atomic_tmp_offset = offset;
1986 cfg->arch.atomic_tmp_offset = -1;
1989 cfg->locals_min_stack_offset = offset;
1991 curinst = cfg->locals_start;
1992 for (i = curinst; i < cfg->num_varinfo; ++i) {
1995 ins = cfg->varinfo [i];
1996 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1999 t = ins->inst_vtype;
2000 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2003 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2004 * pinvoke wrappers when they call functions returning structure */
2005 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2006 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2010 size = mono_type_size (t, &align);
2012 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2013 * since it loads/stores misaligned words, which don't do the right thing.
2015 if (align < 4 && size >= 4)
2017 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2018 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2019 offset += align - 1;
2020 offset &= ~(align - 1);
2021 ins->opcode = OP_REGOFFSET;
2022 ins->inst_offset = offset;
2023 ins->inst_basereg = cfg->frame_reg;
2025 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2028 cfg->locals_max_stack_offset = offset;
2032 ins = cfg->args [curinst];
2033 if (ins->opcode != OP_REGVAR) {
2034 ins->opcode = OP_REGOFFSET;
2035 ins->inst_basereg = cfg->frame_reg;
2036 offset += sizeof (gpointer) - 1;
2037 offset &= ~(sizeof (gpointer) - 1);
2038 ins->inst_offset = offset;
2039 offset += sizeof (gpointer);
2044 if (sig->call_convention == MONO_CALL_VARARG) {
2048 /* Allocate a local slot to hold the sig cookie address */
2049 offset += align - 1;
2050 offset &= ~(align - 1);
2051 cfg->sig_cookie = offset;
2055 for (i = 0; i < sig->param_count; ++i) {
2056 ins = cfg->args [curinst];
2058 if (ins->opcode != OP_REGVAR) {
2059 ins->opcode = OP_REGOFFSET;
2060 ins->inst_basereg = cfg->frame_reg;
2061 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2063 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2064 * since it loads/stores misaligned words, which don't do the right thing.
2066 if (align < 4 && size >= 4)
2068 /* The code in the prolog () stores words when storing vtypes received in a register */
2069 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2071 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2072 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2073 offset += align - 1;
2074 offset &= ~(align - 1);
2075 ins->inst_offset = offset;
2081 /* align the offset to 8 bytes */
2082 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2083 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2088 cfg->stack_offset = offset;
2092 mono_arch_create_vars (MonoCompile *cfg)
2094 MonoMethodSignature *sig;
2098 sig = mono_method_signature (cfg->method);
2100 if (!cfg->arch.cinfo)
2101 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2102 cinfo = cfg->arch.cinfo;
2104 if (IS_HARD_FLOAT) {
2105 for (i = 0; i < 2; i++) {
2106 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2107 inst->flags |= MONO_INST_VOLATILE;
2109 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2113 if (cinfo->ret.storage == RegTypeStructByVal)
2114 cfg->ret_var_is_local = TRUE;
2116 if (cinfo->vtype_retaddr) {
2117 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2118 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2119 printf ("vret_addr = ");
2120 mono_print_ins (cfg->vret_addr);
2124 if (cfg->gen_seq_points) {
2125 if (cfg->soft_breakpoints) {
2126 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2127 ins->flags |= MONO_INST_VOLATILE;
2128 cfg->arch.seq_point_read_var = ins;
2130 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2131 ins->flags |= MONO_INST_VOLATILE;
2132 cfg->arch.seq_point_ss_method_var = ins;
2134 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2135 ins->flags |= MONO_INST_VOLATILE;
2136 cfg->arch.seq_point_bp_method_var = ins;
2138 g_assert (!cfg->compile_aot);
2139 } else if (cfg->compile_aot) {
2140 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2141 ins->flags |= MONO_INST_VOLATILE;
2142 cfg->arch.seq_point_info_var = ins;
2144 /* Allocate a separate variable for this to save 1 load per seq point */
2145 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2146 ins->flags |= MONO_INST_VOLATILE;
2147 cfg->arch.ss_trigger_page_var = ins;
2153 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2155 MonoMethodSignature *tmp_sig;
2158 if (call->tail_call)
2161 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2164 * mono_ArgIterator_Setup assumes the signature cookie is
2165 * passed first and all the arguments which were before it are
2166 * passed on the stack after the signature. So compensate by
2167 * passing a different signature.
2169 tmp_sig = mono_metadata_signature_dup (call->signature);
2170 tmp_sig->param_count -= call->signature->sentinelpos;
2171 tmp_sig->sentinelpos = 0;
2172 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2174 sig_reg = mono_alloc_ireg (cfg);
2175 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2182 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2187 LLVMCallInfo *linfo;
2189 n = sig->param_count + sig->hasthis;
2191 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2193 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2196 * LLVM always uses the native ABI while we use our own ABI, the
2197 * only difference is the handling of vtypes:
2198 * - we only pass/receive them in registers in some cases, and only
2199 * in 1 or 2 integer registers.
2201 if (cinfo->vtype_retaddr) {
2202 /* Vtype returned using a hidden argument */
2203 linfo->ret.storage = LLVMArgVtypeRetAddr;
2204 linfo->vret_arg_index = cinfo->vret_arg_index;
2205 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2206 cfg->exception_message = g_strdup ("unknown ret conv");
2207 cfg->disable_llvm = TRUE;
2211 for (i = 0; i < n; ++i) {
2212 ainfo = cinfo->args + i;
2214 linfo->args [i].storage = LLVMArgNone;
2216 switch (ainfo->storage) {
2217 case RegTypeGeneral:
2218 case RegTypeIRegPair:
2220 linfo->args [i].storage = LLVMArgInIReg;
2222 case RegTypeStructByVal:
2223 // FIXME: Passing entirely on the stack or split reg/stack
2224 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2225 linfo->args [i].storage = LLVMArgVtypeInReg;
2226 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2227 if (ainfo->size == 2)
2228 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2230 linfo->args [i].pair_storage [1] = LLVMArgNone;
2232 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2233 cfg->disable_llvm = TRUE;
2237 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2238 cfg->disable_llvm = TRUE;
2248 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2251 MonoMethodSignature *sig;
2255 sig = call->signature;
2256 n = sig->param_count + sig->hasthis;
2258 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2260 for (i = 0; i < n; ++i) {
2261 ArgInfo *ainfo = cinfo->args + i;
2264 if (i >= sig->hasthis)
2265 t = sig->params [i - sig->hasthis];
2267 t = &mono_defaults.int_class->byval_arg;
2268 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2270 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2271 /* Emit the signature cookie just before the implicit arguments */
2272 emit_sig_cookie (cfg, call, cinfo);
2275 in = call->args [i];
2277 switch (ainfo->storage) {
2278 case RegTypeGeneral:
2279 case RegTypeIRegPair:
2280 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2281 MONO_INST_NEW (cfg, ins, OP_MOVE);
2282 ins->dreg = mono_alloc_ireg (cfg);
2283 ins->sreg1 = in->dreg + 1;
2284 MONO_ADD_INS (cfg->cbb, ins);
2285 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2287 MONO_INST_NEW (cfg, ins, OP_MOVE);
2288 ins->dreg = mono_alloc_ireg (cfg);
2289 ins->sreg1 = in->dreg + 2;
2290 MONO_ADD_INS (cfg->cbb, ins);
2291 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2292 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2293 if (ainfo->size == 4) {
2294 if (IS_SOFT_FLOAT) {
2295 /* mono_emit_call_args () have already done the r8->r4 conversion */
2296 /* The converted value is in an int vreg */
2297 MONO_INST_NEW (cfg, ins, OP_MOVE);
2298 ins->dreg = mono_alloc_ireg (cfg);
2299 ins->sreg1 = in->dreg;
2300 MONO_ADD_INS (cfg->cbb, ins);
2301 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2306 creg = mono_alloc_ireg (cfg);
2307 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2308 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2311 if (IS_SOFT_FLOAT) {
2312 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2313 ins->dreg = mono_alloc_ireg (cfg);
2314 ins->sreg1 = in->dreg;
2315 MONO_ADD_INS (cfg->cbb, ins);
2316 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2318 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2319 ins->dreg = mono_alloc_ireg (cfg);
2320 ins->sreg1 = in->dreg;
2321 MONO_ADD_INS (cfg->cbb, ins);
2322 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2326 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2327 creg = mono_alloc_ireg (cfg);
2328 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2329 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2330 creg = mono_alloc_ireg (cfg);
2331 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2332 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2335 cfg->flags |= MONO_CFG_HAS_FPOUT;
2337 MONO_INST_NEW (cfg, ins, OP_MOVE);
2338 ins->dreg = mono_alloc_ireg (cfg);
2339 ins->sreg1 = in->dreg;
2340 MONO_ADD_INS (cfg->cbb, ins);
2342 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2345 case RegTypeStructByAddr:
2348 /* FIXME: where si the data allocated? */
2349 arg->backend.reg3 = ainfo->reg;
2350 call->used_iregs |= 1 << ainfo->reg;
2351 g_assert_not_reached ();
2354 case RegTypeStructByVal:
2355 case RegTypeGSharedVtInReg:
2356 case RegTypeGSharedVtOnStack:
2357 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2358 ins->opcode = OP_OUTARG_VT;
2359 ins->sreg1 = in->dreg;
2360 ins->klass = in->klass;
2361 ins->inst_p0 = call;
2362 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2363 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2364 mono_call_inst_add_outarg_vt (cfg, call, ins);
2365 MONO_ADD_INS (cfg->cbb, ins);
2368 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2369 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2370 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2371 if (t->type == MONO_TYPE_R8) {
2372 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2375 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2380 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2383 case RegTypeBaseGen:
2384 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2385 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2386 MONO_INST_NEW (cfg, ins, OP_MOVE);
2387 ins->dreg = mono_alloc_ireg (cfg);
2388 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2389 MONO_ADD_INS (cfg->cbb, ins);
2390 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2391 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2394 /* This should work for soft-float as well */
2396 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2397 creg = mono_alloc_ireg (cfg);
2398 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2399 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2400 creg = mono_alloc_ireg (cfg);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2402 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2403 cfg->flags |= MONO_CFG_HAS_FPOUT;
2405 g_assert_not_reached ();
2409 int fdreg = mono_alloc_freg (cfg);
2411 if (ainfo->size == 8) {
2412 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2413 ins->sreg1 = in->dreg;
2415 MONO_ADD_INS (cfg->cbb, ins);
2417 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2422 * Mono's register allocator doesn't speak single-precision registers that
2423 * overlap double-precision registers (i.e. armhf). So we have to work around
2424 * the register allocator and load the value from memory manually.
2426 * So we create a variable for the float argument and an instruction to store
2427 * the argument into the variable. We then store the list of these arguments
2428 * in cfg->float_args. This list is then used by emit_float_args later to
2429 * pass the arguments in the various call opcodes.
2431 * This is not very nice, and we should really try to fix the allocator.
2434 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2436 /* Make sure the instruction isn't seen as pointless and removed.
2438 float_arg->flags |= MONO_INST_VOLATILE;
2440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2442 /* We use the dreg to look up the instruction later. The hreg is used to
2443 * emit the instruction that loads the value into the FP reg.
2445 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2446 fad->vreg = float_arg->dreg;
2447 fad->hreg = ainfo->reg;
2449 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2452 call->used_iregs |= 1 << ainfo->reg;
2453 cfg->flags |= MONO_CFG_HAS_FPOUT;
2457 g_assert_not_reached ();
2461 /* Handle the case where there are no implicit arguments */
2462 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2463 emit_sig_cookie (cfg, call, cinfo);
2465 if (cinfo->ret.storage == RegTypeStructByVal) {
2466 /* The JIT will transform this into a normal call */
2467 call->vret_in_reg = TRUE;
2468 } else if (cinfo->vtype_retaddr) {
2470 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2471 vtarg->sreg1 = call->vret_var->dreg;
2472 vtarg->dreg = mono_alloc_preg (cfg);
2473 MONO_ADD_INS (cfg->cbb, vtarg);
2475 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2478 call->stack_usage = cinfo->stack_usage;
2484 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2486 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2487 ArgInfo *ainfo = ins->inst_p1;
2488 int ovf_size = ainfo->vtsize;
2489 int doffset = ainfo->offset;
2490 int struct_size = ainfo->struct_size;
2491 int i, soffset, dreg, tmpreg;
2493 if (ainfo->storage == RegTypeGSharedVtInReg) {
2495 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2498 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2499 /* Pass by addr on stack */
2500 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2505 for (i = 0; i < ainfo->size; ++i) {
2506 dreg = mono_alloc_ireg (cfg);
2507 switch (struct_size) {
2509 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2515 tmpreg = mono_alloc_ireg (cfg);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2518 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2519 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2521 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2522 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2525 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2528 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2529 soffset += sizeof (gpointer);
2530 struct_size -= sizeof (gpointer);
2532 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2534 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2538 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2540 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2543 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2546 if (COMPILE_LLVM (cfg)) {
2547 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2549 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2550 ins->sreg1 = val->dreg + 1;
2551 ins->sreg2 = val->dreg + 2;
2552 MONO_ADD_INS (cfg->cbb, ins);
2557 case MONO_ARM_FPU_NONE:
2558 if (ret->type == MONO_TYPE_R8) {
2561 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2562 ins->dreg = cfg->ret->dreg;
2563 ins->sreg1 = val->dreg;
2564 MONO_ADD_INS (cfg->cbb, ins);
2567 if (ret->type == MONO_TYPE_R4) {
2568 /* Already converted to an int in method_to_ir () */
2569 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2573 case MONO_ARM_FPU_VFP:
2574 case MONO_ARM_FPU_VFP_HARD:
2575 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2578 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2579 ins->dreg = cfg->ret->dreg;
2580 ins->sreg1 = val->dreg;
2581 MONO_ADD_INS (cfg->cbb, ins);
2586 g_assert_not_reached ();
2590 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2593 #endif /* #ifndef DISABLE_JIT */
2596 mono_arch_is_inst_imm (gint64 imm)
2601 #define DYN_CALL_STACK_ARGS 6
2604 MonoMethodSignature *sig;
2609 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2615 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2619 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2622 switch (cinfo->ret.storage) {
2624 case RegTypeGeneral:
2625 case RegTypeIRegPair:
2626 case RegTypeStructByAddr:
2637 for (i = 0; i < cinfo->nargs; ++i) {
2638 ArgInfo *ainfo = &cinfo->args [i];
2641 switch (ainfo->storage) {
2642 case RegTypeGeneral:
2644 case RegTypeIRegPair:
2647 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2650 case RegTypeStructByVal:
2651 if (ainfo->size == 0)
2652 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2654 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2655 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2663 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2664 for (i = 0; i < sig->param_count; ++i) {
2665 MonoType *t = sig->params [i];
2691 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2693 ArchDynCallInfo *info;
2696 cinfo = get_call_info (NULL, NULL, sig);
2698 if (!dyn_call_supported (cinfo, sig)) {
2703 info = g_new0 (ArchDynCallInfo, 1);
2704 // FIXME: Preprocess the info to speed up start_dyn_call ()
2706 info->cinfo = cinfo;
2708 return (MonoDynCallInfo*)info;
2712 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2714 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2716 g_free (ainfo->cinfo);
2721 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2723 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2724 DynCallArgs *p = (DynCallArgs*)buf;
2725 int arg_index, greg, i, j, pindex;
2726 MonoMethodSignature *sig = dinfo->sig;
2728 g_assert (buf_len >= sizeof (DynCallArgs));
2737 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2738 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2743 if (dinfo->cinfo->vtype_retaddr)
2744 p->regs [greg ++] = (mgreg_t)ret;
2746 for (i = pindex; i < sig->param_count; i++) {
2747 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2748 gpointer *arg = args [arg_index ++];
2749 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2752 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2754 else if (ainfo->storage == RegTypeBase)
2755 slot = PARAM_REGS + (ainfo->offset / 4);
2757 g_assert_not_reached ();
2760 p->regs [slot] = (mgreg_t)*arg;
2765 case MONO_TYPE_STRING:
2766 case MONO_TYPE_CLASS:
2767 case MONO_TYPE_ARRAY:
2768 case MONO_TYPE_SZARRAY:
2769 case MONO_TYPE_OBJECT:
2773 p->regs [slot] = (mgreg_t)*arg;
2775 case MONO_TYPE_BOOLEAN:
2777 p->regs [slot] = *(guint8*)arg;
2780 p->regs [slot] = *(gint8*)arg;
2783 p->regs [slot] = *(gint16*)arg;
2786 case MONO_TYPE_CHAR:
2787 p->regs [slot] = *(guint16*)arg;
2790 p->regs [slot] = *(gint32*)arg;
2793 p->regs [slot] = *(guint32*)arg;
2797 p->regs [slot ++] = (mgreg_t)arg [0];
2798 p->regs [slot] = (mgreg_t)arg [1];
2801 p->regs [slot] = *(mgreg_t*)arg;
2804 p->regs [slot ++] = (mgreg_t)arg [0];
2805 p->regs [slot] = (mgreg_t)arg [1];
2807 case MONO_TYPE_GENERICINST:
2808 if (MONO_TYPE_IS_REFERENCE (t)) {
2809 p->regs [slot] = (mgreg_t)*arg;
2814 case MONO_TYPE_VALUETYPE:
2815 g_assert (ainfo->storage == RegTypeStructByVal);
2817 if (ainfo->size == 0)
2818 slot = PARAM_REGS + (ainfo->offset / 4);
2822 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2823 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2826 g_assert_not_reached ();
2832 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2834 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2835 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2837 guint8 *ret = ((DynCallArgs*)buf)->ret;
2838 mgreg_t res = ((DynCallArgs*)buf)->res;
2839 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2841 ptype = mini_type_get_underlying_type (NULL, sig->ret);
2842 switch (ptype->type) {
2843 case MONO_TYPE_VOID:
2844 *(gpointer*)ret = NULL;
2846 case MONO_TYPE_STRING:
2847 case MONO_TYPE_CLASS:
2848 case MONO_TYPE_ARRAY:
2849 case MONO_TYPE_SZARRAY:
2850 case MONO_TYPE_OBJECT:
2854 *(gpointer*)ret = (gpointer)res;
2860 case MONO_TYPE_BOOLEAN:
2861 *(guint8*)ret = res;
2864 *(gint16*)ret = res;
2867 case MONO_TYPE_CHAR:
2868 *(guint16*)ret = res;
2871 *(gint32*)ret = res;
2874 *(guint32*)ret = res;
2878 /* This handles endianness as well */
2879 ((gint32*)ret) [0] = res;
2880 ((gint32*)ret) [1] = res2;
2882 case MONO_TYPE_GENERICINST:
2883 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2884 *(gpointer*)ret = (gpointer)res;
2889 case MONO_TYPE_VALUETYPE:
2890 g_assert (ainfo->cinfo->vtype_retaddr);
2895 *(float*)ret = *(float*)&res;
2897 case MONO_TYPE_R8: {
2904 *(double*)ret = *(double*)®s;
2908 g_assert_not_reached ();
2915 * Allow tracing to work with this interface (with an optional argument)
2919 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2923 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2924 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2925 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2926 code = emit_call_reg (code, ARMREG_R2);
2940 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2943 int save_mode = SAVE_NONE;
2945 MonoMethod *method = cfg->method;
2946 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2947 int rtype = ret_type->type;
2948 int save_offset = cfg->param_area;
2952 offset = code - cfg->native_code;
2953 /* we need about 16 instructions */
2954 if (offset > (cfg->code_size - 16 * 4)) {
2955 cfg->code_size *= 2;
2956 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2957 code = cfg->native_code + offset;
2960 case MONO_TYPE_VOID:
2961 /* special case string .ctor icall */
2962 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2963 save_mode = SAVE_ONE;
2965 save_mode = SAVE_NONE;
2969 save_mode = SAVE_TWO;
2973 save_mode = SAVE_ONE_FP;
2975 save_mode = SAVE_ONE;
2979 save_mode = SAVE_TWO_FP;
2981 save_mode = SAVE_TWO;
2983 case MONO_TYPE_GENERICINST:
2984 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2985 save_mode = SAVE_ONE;
2989 case MONO_TYPE_VALUETYPE:
2990 save_mode = SAVE_STRUCT;
2993 save_mode = SAVE_ONE;
2997 switch (save_mode) {
2999 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3000 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3001 if (enable_arguments) {
3002 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3003 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3007 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3008 if (enable_arguments) {
3009 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3013 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3014 if (enable_arguments) {
3015 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3019 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3020 if (enable_arguments) {
3021 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3025 if (enable_arguments) {
3026 /* FIXME: get the actual address */
3027 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3035 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3036 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3037 code = emit_call_reg (code, ARMREG_IP);
3039 switch (save_mode) {
3041 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3042 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3045 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3048 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3051 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3062 * The immediate field for cond branches is big enough for all reasonable methods
3064 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3065 if (0 && ins->inst_true_bb->native_offset) { \
3066 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3068 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3069 ARM_B_COND (code, (condcode), 0); \
3072 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3074 /* emit an exception if condition is fail
3076 * We assign the extra code used to throw the implicit exceptions
3077 * to cfg->bb_exit as far as the big branch handling is concerned
3079 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3081 mono_add_patch_info (cfg, code - cfg->native_code, \
3082 MONO_PATCH_INFO_EXC, exc_name); \
3083 ARM_BL_COND (code, (condcode), 0); \
3086 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3089 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3094 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3096 MonoInst *ins, *n, *last_ins = NULL;
3098 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3099 switch (ins->opcode) {
3102 /* Already done by an arch-independent pass */
3104 case OP_LOAD_MEMBASE:
3105 case OP_LOADI4_MEMBASE:
3107 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3108 * OP_LOAD_MEMBASE offset(basereg), reg
3110 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3111 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3112 ins->inst_basereg == last_ins->inst_destbasereg &&
3113 ins->inst_offset == last_ins->inst_offset) {
3114 if (ins->dreg == last_ins->sreg1) {
3115 MONO_DELETE_INS (bb, ins);
3118 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3119 ins->opcode = OP_MOVE;
3120 ins->sreg1 = last_ins->sreg1;
3124 * Note: reg1 must be different from the basereg in the second load
3125 * OP_LOAD_MEMBASE offset(basereg), reg1
3126 * OP_LOAD_MEMBASE offset(basereg), reg2
3128 * OP_LOAD_MEMBASE offset(basereg), reg1
3129 * OP_MOVE reg1, reg2
3131 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3132 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3133 ins->inst_basereg != last_ins->dreg &&
3134 ins->inst_basereg == last_ins->inst_basereg &&
3135 ins->inst_offset == last_ins->inst_offset) {
3137 if (ins->dreg == last_ins->dreg) {
3138 MONO_DELETE_INS (bb, ins);
3141 ins->opcode = OP_MOVE;
3142 ins->sreg1 = last_ins->dreg;
3145 //g_assert_not_reached ();
3149 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3150 * OP_LOAD_MEMBASE offset(basereg), reg
3152 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3153 * OP_ICONST reg, imm
3155 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3156 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3157 ins->inst_basereg == last_ins->inst_destbasereg &&
3158 ins->inst_offset == last_ins->inst_offset) {
3159 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3160 ins->opcode = OP_ICONST;
3161 ins->inst_c0 = last_ins->inst_imm;
3162 g_assert_not_reached (); // check this rule
3166 case OP_LOADU1_MEMBASE:
3167 case OP_LOADI1_MEMBASE:
3168 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3169 ins->inst_basereg == last_ins->inst_destbasereg &&
3170 ins->inst_offset == last_ins->inst_offset) {
3171 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3172 ins->sreg1 = last_ins->sreg1;
3175 case OP_LOADU2_MEMBASE:
3176 case OP_LOADI2_MEMBASE:
3177 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3178 ins->inst_basereg == last_ins->inst_destbasereg &&
3179 ins->inst_offset == last_ins->inst_offset) {
3180 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3181 ins->sreg1 = last_ins->sreg1;
3185 ins->opcode = OP_MOVE;
3189 if (ins->dreg == ins->sreg1) {
3190 MONO_DELETE_INS (bb, ins);
3194 * OP_MOVE sreg, dreg
3195 * OP_MOVE dreg, sreg
3197 if (last_ins && last_ins->opcode == OP_MOVE &&
3198 ins->sreg1 == last_ins->dreg &&
3199 ins->dreg == last_ins->sreg1) {
3200 MONO_DELETE_INS (bb, ins);
3208 bb->last_ins = last_ins;
3212 * the branch_cc_table should maintain the order of these
3226 branch_cc_table [] = {
3240 #define ADD_NEW_INS(cfg,dest,op) do { \
3241 MONO_INST_NEW ((cfg), (dest), (op)); \
3242 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3246 map_to_reg_reg_op (int op)
3255 case OP_COMPARE_IMM:
3257 case OP_ICOMPARE_IMM:
3271 case OP_LOAD_MEMBASE:
3272 return OP_LOAD_MEMINDEX;
3273 case OP_LOADI4_MEMBASE:
3274 return OP_LOADI4_MEMINDEX;
3275 case OP_LOADU4_MEMBASE:
3276 return OP_LOADU4_MEMINDEX;
3277 case OP_LOADU1_MEMBASE:
3278 return OP_LOADU1_MEMINDEX;
3279 case OP_LOADI2_MEMBASE:
3280 return OP_LOADI2_MEMINDEX;
3281 case OP_LOADU2_MEMBASE:
3282 return OP_LOADU2_MEMINDEX;
3283 case OP_LOADI1_MEMBASE:
3284 return OP_LOADI1_MEMINDEX;
3285 case OP_STOREI1_MEMBASE_REG:
3286 return OP_STOREI1_MEMINDEX;
3287 case OP_STOREI2_MEMBASE_REG:
3288 return OP_STOREI2_MEMINDEX;
3289 case OP_STOREI4_MEMBASE_REG:
3290 return OP_STOREI4_MEMINDEX;
3291 case OP_STORE_MEMBASE_REG:
3292 return OP_STORE_MEMINDEX;
3293 case OP_STORER4_MEMBASE_REG:
3294 return OP_STORER4_MEMINDEX;
3295 case OP_STORER8_MEMBASE_REG:
3296 return OP_STORER8_MEMINDEX;
3297 case OP_STORE_MEMBASE_IMM:
3298 return OP_STORE_MEMBASE_REG;
3299 case OP_STOREI1_MEMBASE_IMM:
3300 return OP_STOREI1_MEMBASE_REG;
3301 case OP_STOREI2_MEMBASE_IMM:
3302 return OP_STOREI2_MEMBASE_REG;
3303 case OP_STOREI4_MEMBASE_IMM:
3304 return OP_STOREI4_MEMBASE_REG;
3306 g_assert_not_reached ();
3310 * Remove from the instruction list the instructions that can't be
3311 * represented with very simple instructions with no register
3315 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3317 MonoInst *ins, *temp, *last_ins = NULL;
3318 int rot_amount, imm8, low_imm;
3320 MONO_BB_FOR_EACH_INS (bb, ins) {
3322 switch (ins->opcode) {
3326 case OP_COMPARE_IMM:
3327 case OP_ICOMPARE_IMM:
3341 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3342 ADD_NEW_INS (cfg, temp, OP_ICONST);
3343 temp->inst_c0 = ins->inst_imm;
3344 temp->dreg = mono_alloc_ireg (cfg);
3345 ins->sreg2 = temp->dreg;
3346 ins->opcode = mono_op_imm_to_op (ins->opcode);
3348 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3354 if (ins->inst_imm == 1) {
3355 ins->opcode = OP_MOVE;
3358 if (ins->inst_imm == 0) {
3359 ins->opcode = OP_ICONST;
3363 imm8 = mono_is_power_of_two (ins->inst_imm);
3365 ins->opcode = OP_SHL_IMM;
3366 ins->inst_imm = imm8;
3369 ADD_NEW_INS (cfg, temp, OP_ICONST);
3370 temp->inst_c0 = ins->inst_imm;
3371 temp->dreg = mono_alloc_ireg (cfg);
3372 ins->sreg2 = temp->dreg;
3373 ins->opcode = OP_IMUL;
3379 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3380 /* ARM sets the C flag to 1 if there was _no_ overflow */
3381 ins->next->opcode = OP_COND_EXC_NC;
3384 case OP_IDIV_UN_IMM:
3386 case OP_IREM_UN_IMM:
3387 ADD_NEW_INS (cfg, temp, OP_ICONST);
3388 temp->inst_c0 = ins->inst_imm;
3389 temp->dreg = mono_alloc_ireg (cfg);
3390 ins->sreg2 = temp->dreg;
3391 ins->opcode = mono_op_imm_to_op (ins->opcode);
3393 case OP_LOCALLOC_IMM:
3394 ADD_NEW_INS (cfg, temp, OP_ICONST);
3395 temp->inst_c0 = ins->inst_imm;
3396 temp->dreg = mono_alloc_ireg (cfg);
3397 ins->sreg1 = temp->dreg;
3398 ins->opcode = OP_LOCALLOC;
3400 case OP_LOAD_MEMBASE:
3401 case OP_LOADI4_MEMBASE:
3402 case OP_LOADU4_MEMBASE:
3403 case OP_LOADU1_MEMBASE:
3404 /* we can do two things: load the immed in a register
3405 * and use an indexed load, or see if the immed can be
3406 * represented as an ad_imm + a load with a smaller offset
3407 * that fits. We just do the first for now, optimize later.
3409 if (arm_is_imm12 (ins->inst_offset))
3411 ADD_NEW_INS (cfg, temp, OP_ICONST);
3412 temp->inst_c0 = ins->inst_offset;
3413 temp->dreg = mono_alloc_ireg (cfg);
3414 ins->sreg2 = temp->dreg;
3415 ins->opcode = map_to_reg_reg_op (ins->opcode);
3417 case OP_LOADI2_MEMBASE:
3418 case OP_LOADU2_MEMBASE:
3419 case OP_LOADI1_MEMBASE:
3420 if (arm_is_imm8 (ins->inst_offset))
3422 ADD_NEW_INS (cfg, temp, OP_ICONST);
3423 temp->inst_c0 = ins->inst_offset;
3424 temp->dreg = mono_alloc_ireg (cfg);
3425 ins->sreg2 = temp->dreg;
3426 ins->opcode = map_to_reg_reg_op (ins->opcode);
3428 case OP_LOADR4_MEMBASE:
3429 case OP_LOADR8_MEMBASE:
3430 if (arm_is_fpimm8 (ins->inst_offset))
3432 low_imm = ins->inst_offset & 0x1ff;
3433 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3434 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3435 temp->inst_imm = ins->inst_offset & ~0x1ff;
3436 temp->sreg1 = ins->inst_basereg;
3437 temp->dreg = mono_alloc_ireg (cfg);
3438 ins->inst_basereg = temp->dreg;
3439 ins->inst_offset = low_imm;
3443 ADD_NEW_INS (cfg, temp, OP_ICONST);
3444 temp->inst_c0 = ins->inst_offset;
3445 temp->dreg = mono_alloc_ireg (cfg);
3447 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3448 add_ins->sreg1 = ins->inst_basereg;
3449 add_ins->sreg2 = temp->dreg;
3450 add_ins->dreg = mono_alloc_ireg (cfg);
3452 ins->inst_basereg = add_ins->dreg;
3453 ins->inst_offset = 0;
3456 case OP_STORE_MEMBASE_REG:
3457 case OP_STOREI4_MEMBASE_REG:
3458 case OP_STOREI1_MEMBASE_REG:
3459 if (arm_is_imm12 (ins->inst_offset))
3461 ADD_NEW_INS (cfg, temp, OP_ICONST);
3462 temp->inst_c0 = ins->inst_offset;
3463 temp->dreg = mono_alloc_ireg (cfg);
3464 ins->sreg2 = temp->dreg;
3465 ins->opcode = map_to_reg_reg_op (ins->opcode);
3467 case OP_STOREI2_MEMBASE_REG:
3468 if (arm_is_imm8 (ins->inst_offset))
3470 ADD_NEW_INS (cfg, temp, OP_ICONST);
3471 temp->inst_c0 = ins->inst_offset;
3472 temp->dreg = mono_alloc_ireg (cfg);
3473 ins->sreg2 = temp->dreg;
3474 ins->opcode = map_to_reg_reg_op (ins->opcode);
3476 case OP_STORER4_MEMBASE_REG:
3477 case OP_STORER8_MEMBASE_REG:
3478 if (arm_is_fpimm8 (ins->inst_offset))
3480 low_imm = ins->inst_offset & 0x1ff;
3481 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3482 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3483 temp->inst_imm = ins->inst_offset & ~0x1ff;
3484 temp->sreg1 = ins->inst_destbasereg;
3485 temp->dreg = mono_alloc_ireg (cfg);
3486 ins->inst_destbasereg = temp->dreg;
3487 ins->inst_offset = low_imm;
3491 ADD_NEW_INS (cfg, temp, OP_ICONST);
3492 temp->inst_c0 = ins->inst_offset;
3493 temp->dreg = mono_alloc_ireg (cfg);
3495 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3496 add_ins->sreg1 = ins->inst_destbasereg;
3497 add_ins->sreg2 = temp->dreg;
3498 add_ins->dreg = mono_alloc_ireg (cfg);
3500 ins->inst_destbasereg = add_ins->dreg;
3501 ins->inst_offset = 0;
3504 case OP_STORE_MEMBASE_IMM:
3505 case OP_STOREI1_MEMBASE_IMM:
3506 case OP_STOREI2_MEMBASE_IMM:
3507 case OP_STOREI4_MEMBASE_IMM:
3508 ADD_NEW_INS (cfg, temp, OP_ICONST);
3509 temp->inst_c0 = ins->inst_imm;
3510 temp->dreg = mono_alloc_ireg (cfg);
3511 ins->sreg1 = temp->dreg;
3512 ins->opcode = map_to_reg_reg_op (ins->opcode);
3514 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3516 gboolean swap = FALSE;
3520 /* Optimized away */
3525 /* Some fp compares require swapped operands */
3526 switch (ins->next->opcode) {
3528 ins->next->opcode = OP_FBLT;
3532 ins->next->opcode = OP_FBLT_UN;
3536 ins->next->opcode = OP_FBGE;
3540 ins->next->opcode = OP_FBGE_UN;
3548 ins->sreg1 = ins->sreg2;
3557 bb->last_ins = last_ins;
3558 bb->max_vreg = cfg->next_vreg;
3562 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3566 if (long_ins->opcode == OP_LNEG) {
3568 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3575 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3577 /* sreg is a float, dreg is an integer reg */
3579 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3581 ARM_TOSIZD (code, vfp_scratch1, sreg);
3583 ARM_TOUIZD (code, vfp_scratch1, sreg);
3584 ARM_FMRS (code, dreg, vfp_scratch1);
3585 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3589 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3590 else if (size == 2) {
3591 ARM_SHL_IMM (code, dreg, dreg, 16);
3592 ARM_SHR_IMM (code, dreg, dreg, 16);
3596 ARM_SHL_IMM (code, dreg, dreg, 24);
3597 ARM_SAR_IMM (code, dreg, dreg, 24);
3598 } else if (size == 2) {
3599 ARM_SHL_IMM (code, dreg, dreg, 16);
3600 ARM_SAR_IMM (code, dreg, dreg, 16);
3606 #endif /* #ifndef DISABLE_JIT */
3610 const guchar *target;
3615 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3618 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3619 PatchData *pdata = (PatchData*)user_data;
3620 guchar *code = data;
3621 guint32 *thunks = data;
3622 guint32 *endthunks = (guint32*)(code + bsize);
3624 int difflow, diffhigh;
3626 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3627 difflow = (char*)pdata->code - (char*)thunks;
3628 diffhigh = (char*)pdata->code - (char*)endthunks;
3629 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3633 * The thunk is composed of 3 words:
3634 * load constant from thunks [2] into ARM_IP
3637 * Note that the LR register is already setup
3639 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3640 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3641 while (thunks < endthunks) {
3642 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3643 if (thunks [2] == (guint32)pdata->target) {
3644 arm_patch (pdata->code, (guchar*)thunks);
3645 mono_arch_flush_icache (pdata->code, 4);
3648 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3649 /* found a free slot instead: emit thunk */
3650 /* ARMREG_IP is fine to use since this can't be an IMT call
3653 code = (guchar*)thunks;
3654 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3655 if (thumb_supported)
3656 ARM_BX (code, ARMREG_IP);
3658 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3659 thunks [2] = (guint32)pdata->target;
3660 mono_arch_flush_icache ((guchar*)thunks, 12);
3662 arm_patch (pdata->code, (guchar*)thunks);
3663 mono_arch_flush_icache (pdata->code, 4);
3667 /* skip 12 bytes, the size of the thunk */
3671 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3677 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3682 domain = mono_domain_get ();
3685 pdata.target = target;
3686 pdata.absolute = absolute;
3690 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3693 if (pdata.found != 1) {
3694 mono_domain_lock (domain);
3695 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3698 /* this uses the first available slot */
3700 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3702 mono_domain_unlock (domain);
3705 if (pdata.found != 1) {
3707 GHashTableIter iter;
3708 MonoJitDynamicMethodInfo *ji;
3711 * This might be a dynamic method, search its code manager. We can only
3712 * use the dynamic method containing CODE, since the others might be freed later.
3716 mono_domain_lock (domain);
3717 hash = domain_jit_info (domain)->dynamic_code_hash;
3719 /* FIXME: Speed this up */
3720 g_hash_table_iter_init (&iter, hash);
3721 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3722 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3723 if (pdata.found == 1)
3727 mono_domain_unlock (domain);
3729 if (pdata.found != 1)
3730 g_print ("thunk failed for %p from %p\n", target, code);
3731 g_assert (pdata.found == 1);
3735 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3737 guint32 *code32 = (void*)code;
3738 guint32 ins = *code32;
3739 guint32 prim = (ins >> 25) & 7;
3740 guint32 tval = GPOINTER_TO_UINT (target);
3742 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3743 if (prim == 5) { /* 101b */
3744 /* the diff starts 8 bytes from the branch opcode */
3745 gint diff = target - code - 8;
3747 gint tmask = 0xffffffff;
3748 if (tval & 1) { /* entering thumb mode */
3749 diff = target - 1 - code - 8;
3750 g_assert (thumb_supported);
3751 tbits = 0xf << 28; /* bl->blx bit pattern */
3752 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3753 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3757 tmask = ~(1 << 24); /* clear the link bit */
3758 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3763 if (diff <= 33554431) {
3765 ins = (ins & 0xff000000) | diff;
3767 *code32 = ins | tbits;
3771 /* diff between 0 and -33554432 */
3772 if (diff >= -33554432) {
3774 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3776 *code32 = ins | tbits;
3781 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3785 #ifdef USE_JUMP_TABLES
3787 gpointer *jte = mono_jumptable_get_entry (code);
3789 jte [0] = (gpointer) target;
3793 * The alternative call sequences looks like this:
3795 * ldr ip, [pc] // loads the address constant
3796 * b 1f // jumps around the constant
3797 * address constant embedded in the code
3802 * There are two cases for patching:
3803 * a) at the end of method emission: in this case code points to the start
3804 * of the call sequence
3805 * b) during runtime patching of the call site: in this case code points
3806 * to the mov pc, ip instruction
3808 * We have to handle also the thunk jump code sequence:
3812 * address constant // execution never reaches here
3814 if ((ins & 0x0ffffff0) == 0x12fff10) {
3815 /* Branch and exchange: the address is constructed in a reg
3816 * We can patch BX when the code sequence is the following:
3817 * ldr ip, [pc, #0] ; 0x8
3824 guint8 *emit = (guint8*)ccode;
3825 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3827 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3828 ARM_BX (emit, ARMREG_IP);
3830 /*patching from magic trampoline*/
3831 if (ins == ccode [3]) {
3832 g_assert (code32 [-4] == ccode [0]);
3833 g_assert (code32 [-3] == ccode [1]);
3834 g_assert (code32 [-1] == ccode [2]);
3835 code32 [-2] = (guint32)target;
3838 /*patching from JIT*/
3839 if (ins == ccode [0]) {
3840 g_assert (code32 [1] == ccode [1]);
3841 g_assert (code32 [3] == ccode [2]);
3842 g_assert (code32 [4] == ccode [3]);
3843 code32 [2] = (guint32)target;
3846 g_assert_not_reached ();
3847 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3855 guint8 *emit = (guint8*)ccode;
3856 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3858 ARM_BLX_REG (emit, ARMREG_IP);
3860 g_assert (code32 [-3] == ccode [0]);
3861 g_assert (code32 [-2] == ccode [1]);
3862 g_assert (code32 [0] == ccode [2]);
3864 code32 [-1] = (guint32)target;
3867 guint32 *tmp = ccode;
3868 guint8 *emit = (guint8*)tmp;
3869 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3870 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3871 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3872 ARM_BX (emit, ARMREG_IP);
3873 if (ins == ccode [2]) {
3874 g_assert_not_reached (); // should be -2 ...
3875 code32 [-1] = (guint32)target;
3878 if (ins == ccode [0]) {
3879 /* handles both thunk jump code and the far call sequence */
3880 code32 [2] = (guint32)target;
3883 g_assert_not_reached ();
3885 // g_print ("patched with 0x%08x\n", ins);
3890 arm_patch (guchar *code, const guchar *target)
3892 arm_patch_general (NULL, code, target, NULL);
3896 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3897 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3898 * to be used with the emit macros.
3899 * Return -1 otherwise.
3902 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3905 for (i = 0; i < 31; i+= 2) {
3906 res = (val << (32 - i)) | (val >> i);
3909 *rot_amount = i? 32 - i: 0;
3916 * Emits in code a sequence of instructions that load the value 'val'
3917 * into the dreg register. Uses at most 4 instructions.
3920 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3922 int imm8, rot_amount;
3924 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3925 /* skip the constant pool */
3931 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3932 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3933 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3934 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3937 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3939 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3943 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3945 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3947 if (val & 0xFF0000) {
3948 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3950 if (val & 0xFF000000) {
3951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3953 } else if (val & 0xFF00) {
3954 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3955 if (val & 0xFF0000) {
3956 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3958 if (val & 0xFF000000) {
3959 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3961 } else if (val & 0xFF0000) {
3962 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3963 if (val & 0xFF000000) {
3964 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3967 //g_assert_not_reached ();
3973 mono_arm_thumb_supported (void)
3975 return thumb_supported;
3981 * emit_load_volatile_arguments:
3983 * Load volatile arguments from the stack to the original input registers.
3984 * Required before a tail call.
3987 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3989 MonoMethod *method = cfg->method;
3990 MonoMethodSignature *sig;
3995 /* FIXME: Generate intermediate code instead */
3997 sig = mono_method_signature (method);
3999 /* This is the opposite of the code in emit_prolog */
4003 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4005 if (cinfo->vtype_retaddr) {
4006 ArgInfo *ainfo = &cinfo->ret;
4007 inst = cfg->vret_addr;
4008 g_assert (arm_is_imm12 (inst->inst_offset));
4009 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4011 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4012 ArgInfo *ainfo = cinfo->args + i;
4013 inst = cfg->args [pos];
4015 if (cfg->verbose_level > 2)
4016 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4017 if (inst->opcode == OP_REGVAR) {
4018 if (ainfo->storage == RegTypeGeneral)
4019 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4020 else if (ainfo->storage == RegTypeFP) {
4021 g_assert_not_reached ();
4022 } else if (ainfo->storage == RegTypeBase) {
4026 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4027 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4029 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4030 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4034 g_assert_not_reached ();
4036 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4037 switch (ainfo->size) {
4044 g_assert (arm_is_imm12 (inst->inst_offset));
4045 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4046 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4047 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4050 if (arm_is_imm12 (inst->inst_offset)) {
4051 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4053 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4054 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4058 } else if (ainfo->storage == RegTypeBaseGen) {
4061 } else if (ainfo->storage == RegTypeBase) {
4063 } else if (ainfo->storage == RegTypeFP) {
4064 g_assert_not_reached ();
4065 } else if (ainfo->storage == RegTypeStructByVal) {
4066 int doffset = inst->inst_offset;
4070 if (mono_class_from_mono_type (inst->inst_vtype))
4071 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4072 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4073 if (arm_is_imm12 (doffset)) {
4074 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4076 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4077 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4079 soffset += sizeof (gpointer);
4080 doffset += sizeof (gpointer);
4085 } else if (ainfo->storage == RegTypeStructByAddr) {
4100 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4105 guint8 *code = cfg->native_code + cfg->code_len;
4106 MonoInst *last_ins = NULL;
4107 guint last_offset = 0;
4109 int imm8, rot_amount;
4111 /* we don't align basic blocks of loops on arm */
4113 if (cfg->verbose_level > 2)
4114 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4116 cpos = bb->max_offset;
4118 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4119 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4120 //g_assert (!mono_compile_aot);
4123 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4124 /* this is not thread save, but good enough */
4125 /* fixme: howto handle overflows? */
4126 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4129 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4130 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4131 (gpointer)"mono_break");
4132 code = emit_call_seq (cfg, code);
4135 MONO_BB_FOR_EACH_INS (bb, ins) {
4136 offset = code - cfg->native_code;
4138 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4140 if (offset > (cfg->code_size - max_len - 16)) {
4141 cfg->code_size *= 2;
4142 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4143 code = cfg->native_code + offset;
4145 // if (ins->cil_code)
4146 // g_print ("cil code\n");
4147 mono_debug_record_line_number (cfg, ins, offset);
4149 switch (ins->opcode) {
4150 case OP_MEMORY_BARRIER:
4152 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4153 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4157 #ifdef HAVE_AEABI_READ_TP
4158 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4159 (gpointer)"__aeabi_read_tp");
4160 code = emit_call_seq (cfg, code);
4162 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4164 g_assert_not_reached ();
4167 case OP_ATOMIC_EXCHANGE_I4:
4168 case OP_ATOMIC_CAS_I4:
4169 case OP_ATOMIC_ADD_NEW_I4: {
4173 g_assert (v7_supported);
4176 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4178 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4180 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4184 g_assert (cfg->arch.atomic_tmp_offset != -1);
4185 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4187 switch (ins->opcode) {
4188 case OP_ATOMIC_EXCHANGE_I4:
4190 ARM_DMB (code, ARM_DMB_SY);
4191 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4192 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4193 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4195 ARM_B_COND (code, ARMCOND_NE, 0);
4196 arm_patch (buf [1], buf [0]);
4198 case OP_ATOMIC_CAS_I4:
4199 ARM_DMB (code, ARM_DMB_SY);
4201 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4202 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4204 ARM_B_COND (code, ARMCOND_NE, 0);
4205 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4206 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4208 ARM_B_COND (code, ARMCOND_NE, 0);
4209 arm_patch (buf [2], buf [0]);
4210 arm_patch (buf [1], code);
4212 case OP_ATOMIC_ADD_NEW_I4:
4214 ARM_DMB (code, ARM_DMB_SY);
4215 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4216 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4217 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4218 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4220 ARM_B_COND (code, ARMCOND_NE, 0);
4221 arm_patch (buf [1], buf [0]);
4224 g_assert_not_reached ();
4227 ARM_DMB (code, ARM_DMB_SY);
4228 if (tmpreg != ins->dreg)
4229 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4230 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4235 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4236 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4239 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4240 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4242 case OP_STOREI1_MEMBASE_IMM:
4243 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4244 g_assert (arm_is_imm12 (ins->inst_offset));
4245 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4247 case OP_STOREI2_MEMBASE_IMM:
4248 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4249 g_assert (arm_is_imm8 (ins->inst_offset));
4250 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4252 case OP_STORE_MEMBASE_IMM:
4253 case OP_STOREI4_MEMBASE_IMM:
4254 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4255 g_assert (arm_is_imm12 (ins->inst_offset));
4256 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4258 case OP_STOREI1_MEMBASE_REG:
4259 g_assert (arm_is_imm12 (ins->inst_offset));
4260 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4262 case OP_STOREI2_MEMBASE_REG:
4263 g_assert (arm_is_imm8 (ins->inst_offset));
4264 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4266 case OP_STORE_MEMBASE_REG:
4267 case OP_STOREI4_MEMBASE_REG:
4268 /* this case is special, since it happens for spill code after lowering has been called */
4269 if (arm_is_imm12 (ins->inst_offset)) {
4270 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4272 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4273 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4276 case OP_STOREI1_MEMINDEX:
4277 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4279 case OP_STOREI2_MEMINDEX:
4280 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4282 case OP_STORE_MEMINDEX:
4283 case OP_STOREI4_MEMINDEX:
4284 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4287 g_assert_not_reached ();
4289 case OP_LOAD_MEMINDEX:
4290 case OP_LOADI4_MEMINDEX:
4291 case OP_LOADU4_MEMINDEX:
4292 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4294 case OP_LOADI1_MEMINDEX:
4295 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4297 case OP_LOADU1_MEMINDEX:
4298 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4300 case OP_LOADI2_MEMINDEX:
4301 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4303 case OP_LOADU2_MEMINDEX:
4304 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4306 case OP_LOAD_MEMBASE:
4307 case OP_LOADI4_MEMBASE:
4308 case OP_LOADU4_MEMBASE:
4309 /* this case is special, since it happens for spill code after lowering has been called */
4310 if (arm_is_imm12 (ins->inst_offset)) {
4311 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4313 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4314 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4317 case OP_LOADI1_MEMBASE:
4318 g_assert (arm_is_imm8 (ins->inst_offset));
4319 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4321 case OP_LOADU1_MEMBASE:
4322 g_assert (arm_is_imm12 (ins->inst_offset));
4323 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4325 case OP_LOADU2_MEMBASE:
4326 g_assert (arm_is_imm8 (ins->inst_offset));
4327 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4329 case OP_LOADI2_MEMBASE:
4330 g_assert (arm_is_imm8 (ins->inst_offset));
4331 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4333 case OP_ICONV_TO_I1:
4334 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4335 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4337 case OP_ICONV_TO_I2:
4338 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4339 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4341 case OP_ICONV_TO_U1:
4342 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4344 case OP_ICONV_TO_U2:
4345 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4346 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4350 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4352 case OP_COMPARE_IMM:
4353 case OP_ICOMPARE_IMM:
4354 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4355 g_assert (imm8 >= 0);
4356 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4360 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4361 * So instead of emitting a trap, we emit a call a C function and place a
4364 //*(int*)code = 0xef9f0001;
4367 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4368 (gpointer)"mono_break");
4369 code = emit_call_seq (cfg, code);
4371 case OP_RELAXED_NOP:
4376 case OP_DUMMY_STORE:
4377 case OP_DUMMY_ICONST:
4378 case OP_DUMMY_R8CONST:
4379 case OP_NOT_REACHED:
4382 case OP_SEQ_POINT: {
4384 MonoInst *info_var = cfg->arch.seq_point_info_var;
4385 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4386 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4387 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4388 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4390 int dreg = ARMREG_LR;
4392 if (cfg->soft_breakpoints) {
4393 g_assert (!cfg->compile_aot);
4397 * For AOT, we use one got slot per method, which will point to a
4398 * SeqPointInfo structure, containing all the information required
4399 * by the code below.
4401 if (cfg->compile_aot) {
4402 g_assert (info_var);
4403 g_assert (info_var->opcode == OP_REGOFFSET);
4404 g_assert (arm_is_imm12 (info_var->inst_offset));
4407 if (!cfg->soft_breakpoints) {
4409 * Read from the single stepping trigger page. This will cause a
4410 * SIGSEGV when single stepping is enabled.
4411 * We do this _before_ the breakpoint, so single stepping after
4412 * a breakpoint is hit will step to the next IL offset.
4414 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4417 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4418 if (cfg->soft_breakpoints) {
4419 /* Load the address of the sequence point trigger variable. */
4422 g_assert (var->opcode == OP_REGOFFSET);
4423 g_assert (arm_is_imm12 (var->inst_offset));
4424 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4426 /* Read the value and check whether it is non-zero. */
4427 ARM_LDR_IMM (code, dreg, dreg, 0);
4428 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4430 /* Load the address of the sequence point method. */
4431 var = ss_method_var;
4433 g_assert (var->opcode == OP_REGOFFSET);
4434 g_assert (arm_is_imm12 (var->inst_offset));
4435 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4437 /* Call it conditionally. */
4438 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4440 if (cfg->compile_aot) {
4441 /* Load the trigger page addr from the variable initialized in the prolog */
4442 var = ss_trigger_page_var;
4444 g_assert (var->opcode == OP_REGOFFSET);
4445 g_assert (arm_is_imm12 (var->inst_offset));
4446 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4448 #ifdef USE_JUMP_TABLES
4449 gpointer *jte = mono_jumptable_add_entry ();
4450 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4451 jte [0] = ss_trigger_page;
4453 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4455 *(int*)code = (int)ss_trigger_page;
4459 ARM_LDR_IMM (code, dreg, dreg, 0);
4463 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4465 if (cfg->soft_breakpoints) {
4466 /* Load the address of the breakpoint method into ip. */
4467 var = bp_method_var;
4469 g_assert (var->opcode == OP_REGOFFSET);
4470 g_assert (arm_is_imm12 (var->inst_offset));
4471 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4474 * A placeholder for a possible breakpoint inserted by
4475 * mono_arch_set_breakpoint ().
4478 } else if (cfg->compile_aot) {
4479 guint32 offset = code - cfg->native_code;
4482 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4483 /* Add the offset */
4484 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4485 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4486 if (arm_is_imm12 ((int)val)) {
4487 ARM_LDR_IMM (code, dreg, dreg, val);
4489 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4491 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4493 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4494 g_assert (!(val & 0xFF000000));
4496 ARM_LDR_IMM (code, dreg, dreg, 0);
4498 /* What is faster, a branch or a load ? */
4499 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4500 /* The breakpoint instruction */
4501 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4504 * A placeholder for a possible breakpoint inserted by
4505 * mono_arch_set_breakpoint ().
4507 for (i = 0; i < 4; ++i)
4514 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4517 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4521 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4524 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4525 g_assert (imm8 >= 0);
4526 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4530 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4531 g_assert (imm8 >= 0);
4532 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4536 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4537 g_assert (imm8 >= 0);
4538 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4541 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4542 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4544 case OP_IADD_OVF_UN:
4545 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4546 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4549 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4550 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4552 case OP_ISUB_OVF_UN:
4553 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4554 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4556 case OP_ADD_OVF_CARRY:
4557 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4558 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4560 case OP_ADD_OVF_UN_CARRY:
4561 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4562 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4564 case OP_SUB_OVF_CARRY:
4565 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4566 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4568 case OP_SUB_OVF_UN_CARRY:
4569 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4570 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4574 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4577 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4578 g_assert (imm8 >= 0);
4579 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4582 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4586 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4590 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4591 g_assert (imm8 >= 0);
4592 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4596 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4597 g_assert (imm8 >= 0);
4598 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4600 case OP_ARM_RSBS_IMM:
4601 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4602 g_assert (imm8 >= 0);
4603 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4605 case OP_ARM_RSC_IMM:
4606 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4607 g_assert (imm8 >= 0);
4608 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4611 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4615 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4616 g_assert (imm8 >= 0);
4617 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4620 g_assert (v7s_supported);
4621 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4624 g_assert (v7s_supported);
4625 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4628 g_assert (v7s_supported);
4629 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4630 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4633 g_assert (v7s_supported);
4634 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4635 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4639 g_assert_not_reached ();
4641 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4645 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4646 g_assert (imm8 >= 0);
4647 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4650 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4654 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4655 g_assert (imm8 >= 0);
4656 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4659 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4664 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4665 else if (ins->dreg != ins->sreg1)
4666 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4669 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4674 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4675 else if (ins->dreg != ins->sreg1)
4676 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4679 case OP_ISHR_UN_IMM:
4681 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4682 else if (ins->dreg != ins->sreg1)
4683 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4686 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4689 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4692 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4695 if (ins->dreg == ins->sreg2)
4696 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4698 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4701 g_assert_not_reached ();
4704 /* FIXME: handle ovf/ sreg2 != dreg */
4705 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4706 /* FIXME: MUL doesn't set the C/O flags on ARM */
4708 case OP_IMUL_OVF_UN:
4709 /* FIXME: handle ovf/ sreg2 != dreg */
4710 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4711 /* FIXME: MUL doesn't set the C/O flags on ARM */
4714 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4717 /* Load the GOT offset */
4718 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4719 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4721 *(gpointer*)code = NULL;
4723 /* Load the value from the GOT */
4724 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4726 case OP_OBJC_GET_SELECTOR:
4727 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4728 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4730 *(gpointer*)code = NULL;
4732 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4734 case OP_ICONV_TO_I4:
4735 case OP_ICONV_TO_U4:
4737 if (ins->dreg != ins->sreg1)
4738 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4741 int saved = ins->sreg2;
4742 if (ins->sreg2 == ARM_LSW_REG) {
4743 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4746 if (ins->sreg1 != ARM_LSW_REG)
4747 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4748 if (saved != ARM_MSW_REG)
4749 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4754 ARM_CPYD (code, ins->dreg, ins->sreg1);
4756 case OP_FCONV_TO_R4:
4758 ARM_CVTD (code, ins->dreg, ins->sreg1);
4759 ARM_CVTS (code, ins->dreg, ins->dreg);
4764 * Keep in sync with mono_arch_emit_epilog
4766 g_assert (!cfg->method->save_lmf);
4768 code = emit_load_volatile_arguments (cfg, code);
4770 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4772 if (cfg->used_int_regs)
4773 ARM_POP (code, cfg->used_int_regs);
4774 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4776 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4778 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4779 if (cfg->compile_aot) {
4780 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4782 *(gpointer*)code = NULL;
4784 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4786 code = mono_arm_patchable_b (code, ARMCOND_AL);
4790 MonoCallInst *call = (MonoCallInst*)ins;
4793 * The stack looks like the following:
4794 * <caller argument area>
4797 * <callee argument area>
4798 * Need to copy the arguments from the callee argument area to
4799 * the caller argument area, and pop the frame.
4801 if (call->stack_usage) {
4802 int i, prev_sp_offset = 0;
4804 /* Compute size of saved registers restored below */
4806 prev_sp_offset = 2 * 4;
4808 prev_sp_offset = 1 * 4;
4809 for (i = 0; i < 16; ++i) {
4810 if (cfg->used_int_regs & (1 << i))
4811 prev_sp_offset += 4;
4814 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4816 /* Copy arguments on the stack to our argument area */
4817 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4818 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4819 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4824 * Keep in sync with mono_arch_emit_epilog
4826 g_assert (!cfg->method->save_lmf);
4828 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4830 if (cfg->used_int_regs)
4831 ARM_POP (code, cfg->used_int_regs);
4832 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4834 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4837 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4838 if (cfg->compile_aot) {
4839 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4841 *(gpointer*)code = NULL;
4843 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4845 code = mono_arm_patchable_b (code, ARMCOND_AL);
4850 /* ensure ins->sreg1 is not NULL */
4851 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4854 g_assert (cfg->sig_cookie < 128);
4855 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4856 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4865 call = (MonoCallInst*)ins;
4868 code = emit_float_args (cfg, call, code, &max_len, &offset);
4870 if (ins->flags & MONO_INST_HAS_METHOD)
4871 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4873 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4874 code = emit_call_seq (cfg, code);
4875 ins->flags |= MONO_INST_GC_CALLSITE;
4876 ins->backend.pc_offset = code - cfg->native_code;
4877 code = emit_move_return_value (cfg, ins, code);
4883 case OP_VOIDCALL_REG:
4886 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4888 code = emit_call_reg (code, ins->sreg1);
4889 ins->flags |= MONO_INST_GC_CALLSITE;
4890 ins->backend.pc_offset = code - cfg->native_code;
4891 code = emit_move_return_value (cfg, ins, code);
4893 case OP_FCALL_MEMBASE:
4894 case OP_LCALL_MEMBASE:
4895 case OP_VCALL_MEMBASE:
4896 case OP_VCALL2_MEMBASE:
4897 case OP_VOIDCALL_MEMBASE:
4898 case OP_CALL_MEMBASE: {
4899 gboolean imt_arg = FALSE;
4901 g_assert (ins->sreg1 != ARMREG_LR);
4902 call = (MonoCallInst*)ins;
4905 code = emit_float_args (cfg, call, code, &max_len, &offset);
4907 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4909 if (!arm_is_imm12 (ins->inst_offset))
4910 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4911 #ifdef USE_JUMP_TABLES
4917 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4919 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4921 if (!arm_is_imm12 (ins->inst_offset))
4922 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4924 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4927 * We can't embed the method in the code stream in PIC code, or
4929 * Instead, we put it in V5 in code emitted by
4930 * mono_arch_emit_imt_argument (), and embed NULL here to
4931 * signal the IMT thunk that the value is in V5.
4933 #ifdef USE_JUMP_TABLES
4934 /* In case of jumptables we always use value in V5. */
4937 if (call->dynamic_imt_arg)
4938 *((gpointer*)code) = NULL;
4940 *((gpointer*)code) = (gpointer)call->method;
4944 ins->flags |= MONO_INST_GC_CALLSITE;
4945 ins->backend.pc_offset = code - cfg->native_code;
4946 code = emit_move_return_value (cfg, ins, code);
4950 /* keep alignment */
4951 int alloca_waste = cfg->param_area;
4954 /* round the size to 8 bytes */
4955 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4956 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4958 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4959 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4960 /* memzero the area: dreg holds the size, sp is the pointer */
4961 if (ins->flags & MONO_INST_INIT) {
4962 guint8 *start_loop, *branch_to_cond;
4963 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4964 branch_to_cond = code;
4967 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4968 arm_patch (branch_to_cond, code);
4969 /* decrement by 4 and set flags */
4970 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4971 ARM_B_COND (code, ARMCOND_GE, 0);
4972 arm_patch (code - 4, start_loop);
4974 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4979 MonoInst *var = cfg->dyn_call_var;
4981 g_assert (var->opcode == OP_REGOFFSET);
4982 g_assert (arm_is_imm12 (var->inst_offset));
4984 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4985 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4987 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4989 /* Save args buffer */
4990 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4992 /* Set stack slots using R0 as scratch reg */
4993 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4994 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4995 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4996 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4999 /* Set argument registers */
5000 for (i = 0; i < PARAM_REGS; ++i)
5001 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5004 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5005 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5008 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5009 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
5010 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
5014 if (ins->sreg1 != ARMREG_R0)
5015 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5016 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5017 (gpointer)"mono_arch_throw_exception");
5018 code = emit_call_seq (cfg, code);
5022 if (ins->sreg1 != ARMREG_R0)
5023 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5024 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5025 (gpointer)"mono_arch_rethrow_exception");
5026 code = emit_call_seq (cfg, code);
5029 case OP_START_HANDLER: {
5030 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5033 /* Reserve a param area, see filter-stack.exe */
5034 if (cfg->param_area) {
5035 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5036 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5038 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5039 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5043 if (arm_is_imm12 (spvar->inst_offset)) {
5044 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5046 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5047 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5051 case OP_ENDFILTER: {
5052 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5055 /* Free the param area */
5056 if (cfg->param_area) {
5057 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5058 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5060 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5061 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5065 if (ins->sreg1 != ARMREG_R0)
5066 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5067 if (arm_is_imm12 (spvar->inst_offset)) {
5068 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5070 g_assert (ARMREG_IP != spvar->inst_basereg);
5071 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5072 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5074 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5077 case OP_ENDFINALLY: {
5078 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5081 /* Free the param area */
5082 if (cfg->param_area) {
5083 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5084 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5086 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5087 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5091 if (arm_is_imm12 (spvar->inst_offset)) {
5092 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5094 g_assert (ARMREG_IP != spvar->inst_basereg);
5095 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5096 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5098 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5101 case OP_CALL_HANDLER:
5102 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5103 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5104 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5107 ins->inst_c0 = code - cfg->native_code;
5110 /*if (ins->inst_target_bb->native_offset) {
5112 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5114 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5115 code = mono_arm_patchable_b (code, ARMCOND_AL);
5119 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5123 * In the normal case we have:
5124 * ldr pc, [pc, ins->sreg1 << 2]
5127 * ldr lr, [pc, ins->sreg1 << 2]
5129 * After follows the data.
5130 * FIXME: add aot support.
5132 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5133 #ifdef USE_JUMP_TABLES
5135 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5136 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5137 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5141 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5142 if (offset + max_len > (cfg->code_size - 16)) {
5143 cfg->code_size += max_len;
5144 cfg->code_size *= 2;
5145 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5146 code = cfg->native_code + offset;
5148 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5150 code += 4 * GPOINTER_TO_INT (ins->klass);
5155 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5156 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5160 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5161 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5165 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5166 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5170 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5171 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5175 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5179 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5180 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5183 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5184 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5187 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5192 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5193 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5195 case OP_COND_EXC_EQ:
5196 case OP_COND_EXC_NE_UN:
5197 case OP_COND_EXC_LT:
5198 case OP_COND_EXC_LT_UN:
5199 case OP_COND_EXC_GT:
5200 case OP_COND_EXC_GT_UN:
5201 case OP_COND_EXC_GE:
5202 case OP_COND_EXC_GE_UN:
5203 case OP_COND_EXC_LE:
5204 case OP_COND_EXC_LE_UN:
5205 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5207 case OP_COND_EXC_IEQ:
5208 case OP_COND_EXC_INE_UN:
5209 case OP_COND_EXC_ILT:
5210 case OP_COND_EXC_ILT_UN:
5211 case OP_COND_EXC_IGT:
5212 case OP_COND_EXC_IGT_UN:
5213 case OP_COND_EXC_IGE:
5214 case OP_COND_EXC_IGE_UN:
5215 case OP_COND_EXC_ILE:
5216 case OP_COND_EXC_ILE_UN:
5217 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5220 case OP_COND_EXC_IC:
5221 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5223 case OP_COND_EXC_OV:
5224 case OP_COND_EXC_IOV:
5225 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5227 case OP_COND_EXC_NC:
5228 case OP_COND_EXC_INC:
5229 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5231 case OP_COND_EXC_NO:
5232 case OP_COND_EXC_INO:
5233 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5245 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5248 /* floating point opcodes */
5250 if (cfg->compile_aot) {
5251 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5253 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5255 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5258 /* FIXME: we can optimize the imm load by dealing with part of
5259 * the displacement in LDFD (aligning to 512).
5261 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5262 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5266 if (cfg->compile_aot) {
5267 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5269 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5271 ARM_CVTS (code, ins->dreg, ins->dreg);
5273 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5274 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5275 ARM_CVTS (code, ins->dreg, ins->dreg);
5278 case OP_STORER8_MEMBASE_REG:
5279 /* This is generated by the local regalloc pass which runs after the lowering pass */
5280 if (!arm_is_fpimm8 (ins->inst_offset)) {
5281 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5282 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5283 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5285 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5288 case OP_LOADR8_MEMBASE:
5289 /* This is generated by the local regalloc pass which runs after the lowering pass */
5290 if (!arm_is_fpimm8 (ins->inst_offset)) {
5291 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5292 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5293 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5295 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5298 case OP_STORER4_MEMBASE_REG:
5299 g_assert (arm_is_fpimm8 (ins->inst_offset));
5300 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5301 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5302 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5303 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5305 case OP_LOADR4_MEMBASE:
5306 g_assert (arm_is_fpimm8 (ins->inst_offset));
5307 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5308 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5309 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5310 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5312 case OP_ICONV_TO_R_UN: {
5313 g_assert_not_reached ();
5316 case OP_ICONV_TO_R4:
5317 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5318 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5319 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5320 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5321 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5323 case OP_ICONV_TO_R8:
5324 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5325 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5326 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5327 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5331 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5332 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5334 if (!IS_HARD_FLOAT) {
5335 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5338 if (IS_HARD_FLOAT) {
5339 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5341 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5345 case OP_FCONV_TO_I1:
5346 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5348 case OP_FCONV_TO_U1:
5349 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5351 case OP_FCONV_TO_I2:
5352 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5354 case OP_FCONV_TO_U2:
5355 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5357 case OP_FCONV_TO_I4:
5359 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5361 case OP_FCONV_TO_U4:
5363 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5365 case OP_FCONV_TO_I8:
5366 case OP_FCONV_TO_U8:
5367 g_assert_not_reached ();
5368 /* Implemented as helper calls */
5370 case OP_LCONV_TO_R_UN:
5371 g_assert_not_reached ();
5372 /* Implemented as helper calls */
5374 case OP_LCONV_TO_OVF_I4_2: {
5375 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5377 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5380 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5381 high_bit_not_set = code;
5382 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5384 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5385 valid_negative = code;
5386 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5387 invalid_negative = code;
5388 ARM_B_COND (code, ARMCOND_AL, 0);
5390 arm_patch (high_bit_not_set, code);
5392 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5393 valid_positive = code;
5394 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5396 arm_patch (invalid_negative, code);
5397 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5399 arm_patch (valid_negative, code);
5400 arm_patch (valid_positive, code);
5402 if (ins->dreg != ins->sreg1)
5403 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5407 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5410 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5413 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5416 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5419 ARM_NEGD (code, ins->dreg, ins->sreg1);
5423 g_assert_not_reached ();
5427 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5433 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5436 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5437 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5441 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5444 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5445 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5449 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5452 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5453 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5454 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5458 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5461 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5462 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5466 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5469 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5470 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5471 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5475 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5479 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5483 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5486 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5487 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5491 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5494 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5495 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5498 /* ARM FPA flags table:
5499 * N Less than ARMCOND_MI
5500 * Z Equal ARMCOND_EQ
5501 * C Greater Than or Equal ARMCOND_CS
5502 * V Unordered ARMCOND_VS
5505 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5508 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5511 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5514 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5515 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5521 g_assert_not_reached ();
5525 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5527 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5528 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5529 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5533 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5534 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5539 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5540 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5542 #ifdef USE_JUMP_TABLES
5544 gpointer *jte = mono_jumptable_add_entries (2);
5545 jte [0] = GUINT_TO_POINTER (0xffffffff);
5546 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5547 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5548 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5551 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5552 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5554 *(guint32*)code = 0xffffffff;
5556 *(guint32*)code = 0x7fefffff;
5559 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5561 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5562 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5564 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5565 ARM_CPYD (code, ins->dreg, ins->sreg1);
5567 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5568 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5573 case OP_GC_LIVENESS_DEF:
5574 case OP_GC_LIVENESS_USE:
5575 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5576 ins->backend.pc_offset = code - cfg->native_code;
5578 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5579 ins->backend.pc_offset = code - cfg->native_code;
5580 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5584 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5585 g_assert_not_reached ();
5588 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5589 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5590 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5591 g_assert_not_reached ();
5597 last_offset = offset;
5600 cfg->code_len = code - cfg->native_code;
5603 #endif /* DISABLE_JIT */
5605 #ifdef HAVE_AEABI_READ_TP
5606 void __aeabi_read_tp (void);
5610 mono_arch_register_lowlevel_calls (void)
5612 /* The signature doesn't matter */
5613 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5614 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5616 #ifndef MONO_CROSS_COMPILE
5617 #ifdef HAVE_AEABI_READ_TP
5618 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5623 #define patch_lis_ori(ip,val) do {\
5624 guint16 *__lis_ori = (guint16*)(ip); \
5625 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5626 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5630 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5632 MonoJumpInfo *patch_info;
5633 gboolean compile_aot = !run_cctors;
5635 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5636 unsigned char *ip = patch_info->ip.i + code;
5637 const unsigned char *target;
5639 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5640 #ifdef USE_JUMP_TABLES
5641 gpointer *jt = mono_jumptable_get_entry (ip);
5643 gpointer *jt = (gpointer*)(ip + 8);
5646 /* jt is the inlined jump table, 2 instructions after ip
5647 * In the normal case we store the absolute addresses,
5648 * otherwise the displacements.
5650 for (i = 0; i < patch_info->data.table->table_size; i++)
5651 jt [i] = code + (int)patch_info->data.table->table [i];
5656 switch (patch_info->type) {
5657 case MONO_PATCH_INFO_BB:
5658 case MONO_PATCH_INFO_LABEL:
5661 /* No need to patch these */
5666 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5668 switch (patch_info->type) {
5669 case MONO_PATCH_INFO_IP:
5670 g_assert_not_reached ();
5671 patch_lis_ori (ip, ip);
5673 case MONO_PATCH_INFO_METHOD_REL:
5674 g_assert_not_reached ();
5675 *((gpointer *)(ip)) = code + patch_info->data.offset;
5677 case MONO_PATCH_INFO_METHODCONST:
5678 case MONO_PATCH_INFO_CLASS:
5679 case MONO_PATCH_INFO_IMAGE:
5680 case MONO_PATCH_INFO_FIELD:
5681 case MONO_PATCH_INFO_VTABLE:
5682 case MONO_PATCH_INFO_IID:
5683 case MONO_PATCH_INFO_SFLDA:
5684 case MONO_PATCH_INFO_LDSTR:
5685 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5686 case MONO_PATCH_INFO_LDTOKEN:
5687 g_assert_not_reached ();
5688 /* from OP_AOTCONST : lis + ori */
5689 patch_lis_ori (ip, target);
5691 case MONO_PATCH_INFO_R4:
5692 case MONO_PATCH_INFO_R8:
5693 g_assert_not_reached ();
5694 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5696 case MONO_PATCH_INFO_EXC_NAME:
5697 g_assert_not_reached ();
5698 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5700 case MONO_PATCH_INFO_NONE:
5701 case MONO_PATCH_INFO_BB_OVF:
5702 case MONO_PATCH_INFO_EXC_OVF:
5703 /* everything is dealt with at epilog output time */
5708 arm_patch_general (domain, ip, target, dyn_code_mp);
5715 * Stack frame layout:
5717 * ------------------- fp
5718 * MonoLMF structure or saved registers
5719 * -------------------
5721 * -------------------
5723 * -------------------
5724 * optional 8 bytes for tracing
5725 * -------------------
5726 * param area size is cfg->param_area
5727 * ------------------- sp
5730 mono_arch_emit_prolog (MonoCompile *cfg)
5732 MonoMethod *method = cfg->method;
5734 MonoMethodSignature *sig;
5736 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5741 int prev_sp_offset, reg_offset;
5743 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5746 sig = mono_method_signature (method);
5747 cfg->code_size = 256 + sig->param_count * 64;
5748 code = cfg->native_code = g_malloc (cfg->code_size);
5750 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5752 alloc_size = cfg->stack_offset;
5758 * The iphone uses R7 as the frame pointer, and it points at the saved
5763 * We can't use r7 as a frame pointer since it points into the middle of
5764 * the frame, so we keep using our own frame pointer.
5765 * FIXME: Optimize this.
5767 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5768 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5769 prev_sp_offset += 8; /* r7 and lr */
5770 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5771 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5774 if (!method->save_lmf) {
5776 /* No need to push LR again */
5777 if (cfg->used_int_regs)
5778 ARM_PUSH (code, cfg->used_int_regs);
5780 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5781 prev_sp_offset += 4;
5783 for (i = 0; i < 16; ++i) {
5784 if (cfg->used_int_regs & (1 << i))
5785 prev_sp_offset += 4;
5787 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5789 for (i = 0; i < 16; ++i) {
5790 if ((cfg->used_int_regs & (1 << i))) {
5791 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5792 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5797 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5798 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5800 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5801 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5804 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5805 ARM_PUSH (code, 0x5ff0);
5806 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5807 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5809 for (i = 0; i < 16; ++i) {
5810 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5811 /* The original r7 is saved at the start */
5812 if (!(iphone_abi && i == ARMREG_R7))
5813 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5817 g_assert (reg_offset == 4 * 10);
5818 pos += sizeof (MonoLMF) - (4 * 10);
5822 orig_alloc_size = alloc_size;
5823 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5824 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5825 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5826 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5829 /* the stack used in the pushed regs */
5830 if (prev_sp_offset & 4)
5832 cfg->stack_usage = alloc_size;
5834 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5835 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5837 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5838 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5840 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5842 if (cfg->frame_reg != ARMREG_SP) {
5843 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5844 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5846 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5847 prev_sp_offset += alloc_size;
5849 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5850 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5852 /* compute max_offset in order to use short forward jumps
5853 * we could skip do it on arm because the immediate displacement
5854 * for jumps is large enough, it may be useful later for constant pools
5857 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5858 MonoInst *ins = bb->code;
5859 bb->max_offset = max_offset;
5861 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5864 MONO_BB_FOR_EACH_INS (bb, ins)
5865 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5868 /* store runtime generic context */
5869 if (cfg->rgctx_var) {
5870 MonoInst *ins = cfg->rgctx_var;
5872 g_assert (ins->opcode == OP_REGOFFSET);
5874 if (arm_is_imm12 (ins->inst_offset)) {
5875 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5877 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5878 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5882 /* load arguments allocated to register from the stack */
5885 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5887 if (cinfo->vtype_retaddr) {
5888 ArgInfo *ainfo = &cinfo->ret;
5889 inst = cfg->vret_addr;
5890 g_assert (arm_is_imm12 (inst->inst_offset));
5891 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5894 if (sig->call_convention == MONO_CALL_VARARG) {
5895 ArgInfo *cookie = &cinfo->sig_cookie;
5897 /* Save the sig cookie address */
5898 g_assert (cookie->storage == RegTypeBase);
5900 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5901 g_assert (arm_is_imm12 (cfg->sig_cookie));
5902 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5903 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5906 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5907 ArgInfo *ainfo = cinfo->args + i;
5908 inst = cfg->args [pos];
5910 if (cfg->verbose_level > 2)
5911 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5912 if (inst->opcode == OP_REGVAR) {
5913 if (ainfo->storage == RegTypeGeneral)
5914 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5915 else if (ainfo->storage == RegTypeFP) {
5916 g_assert_not_reached ();
5917 } else if (ainfo->storage == RegTypeBase) {
5918 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5919 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5921 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5922 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5925 g_assert_not_reached ();
5927 if (cfg->verbose_level > 2)
5928 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5930 /* the argument should be put on the stack: FIXME handle size != word */
5931 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5932 switch (ainfo->size) {
5934 if (arm_is_imm12 (inst->inst_offset))
5935 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5937 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5938 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5942 if (arm_is_imm8 (inst->inst_offset)) {
5943 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5945 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5946 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5950 if (arm_is_imm12 (inst->inst_offset)) {
5951 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5953 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5954 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5956 if (arm_is_imm12 (inst->inst_offset + 4)) {
5957 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5959 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5960 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5964 if (arm_is_imm12 (inst->inst_offset)) {
5965 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5967 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5968 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5972 } else if (ainfo->storage == RegTypeBaseGen) {
5973 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5974 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5976 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5977 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5979 if (arm_is_imm12 (inst->inst_offset + 4)) {
5980 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5981 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5984 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5985 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5986 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5988 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5989 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5990 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5992 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5993 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5996 switch (ainfo->size) {
5998 if (arm_is_imm8 (inst->inst_offset)) {
5999 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6001 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6002 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6006 if (arm_is_imm8 (inst->inst_offset)) {
6007 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6009 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6010 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6014 if (arm_is_imm12 (inst->inst_offset)) {
6015 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6017 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6018 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6020 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6021 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6023 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6024 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6026 if (arm_is_imm12 (inst->inst_offset + 4)) {
6027 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6029 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6030 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6034 if (arm_is_imm12 (inst->inst_offset)) {
6035 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6037 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6038 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6042 } else if (ainfo->storage == RegTypeFP) {
6043 int imm8, rot_amount;
6045 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6046 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6047 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6049 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6051 if (ainfo->size == 8)
6052 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6054 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6055 } else if (ainfo->storage == RegTypeStructByVal) {
6056 int doffset = inst->inst_offset;
6060 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6061 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6062 if (arm_is_imm12 (doffset)) {
6063 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6065 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6066 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6068 soffset += sizeof (gpointer);
6069 doffset += sizeof (gpointer);
6071 if (ainfo->vtsize) {
6072 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6073 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6074 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6076 } else if (ainfo->storage == RegTypeStructByAddr) {
6077 g_assert_not_reached ();
6078 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6079 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6081 g_assert_not_reached ();
6086 if (method->save_lmf)
6087 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6090 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6092 if (cfg->arch.seq_point_info_var) {
6093 MonoInst *ins = cfg->arch.seq_point_info_var;
6095 /* Initialize the variable from a GOT slot */
6096 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6097 #ifdef USE_JUMP_TABLES
6099 gpointer *jte = mono_jumptable_add_entry ();
6100 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6101 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6103 /** XXX: is it correct? */
6105 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6107 *(gpointer*)code = NULL;
6110 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6112 g_assert (ins->opcode == OP_REGOFFSET);
6114 if (arm_is_imm12 (ins->inst_offset)) {
6115 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6117 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6118 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6122 /* Initialize ss_trigger_page_var */
6123 if (!cfg->soft_breakpoints) {
6124 MonoInst *info_var = cfg->arch.seq_point_info_var;
6125 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6126 int dreg = ARMREG_LR;
6129 g_assert (info_var->opcode == OP_REGOFFSET);
6130 g_assert (arm_is_imm12 (info_var->inst_offset));
6132 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6133 /* Load the trigger page addr */
6134 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6135 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6139 if (cfg->arch.seq_point_read_var) {
6140 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6141 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6142 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6143 #ifdef USE_JUMP_TABLES
6146 g_assert (read_ins->opcode == OP_REGOFFSET);
6147 g_assert (arm_is_imm12 (read_ins->inst_offset));
6148 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6149 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6150 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6151 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6153 #ifdef USE_JUMP_TABLES
6154 jte = mono_jumptable_add_entries (3);
6155 jte [0] = (gpointer)&ss_trigger_var;
6156 jte [1] = single_step_func_wrapper;
6157 jte [2] = breakpoint_func_wrapper;
6158 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6160 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6162 *(volatile int **)code = &ss_trigger_var;
6164 *(gpointer*)code = single_step_func_wrapper;
6166 *(gpointer*)code = breakpoint_func_wrapper;
6170 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6171 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6172 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6173 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6174 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6175 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6178 cfg->code_len = code - cfg->native_code;
6179 g_assert (cfg->code_len < cfg->code_size);
6186 mono_arch_emit_epilog (MonoCompile *cfg)
6188 MonoMethod *method = cfg->method;
6189 int pos, i, rot_amount;
6190 int max_epilog_size = 16 + 20*4;
6194 if (cfg->method->save_lmf)
6195 max_epilog_size += 128;
6197 if (mono_jit_trace_calls != NULL)
6198 max_epilog_size += 50;
6200 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6201 max_epilog_size += 50;
6203 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6204 cfg->code_size *= 2;
6205 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6206 cfg->stat_code_reallocs++;
6210 * Keep in sync with OP_JMP
6212 code = cfg->native_code + cfg->code_len;
6214 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6215 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6219 /* Load returned vtypes into registers if needed */
6220 cinfo = cfg->arch.cinfo;
6221 if (cinfo->ret.storage == RegTypeStructByVal) {
6222 MonoInst *ins = cfg->ret;
6224 if (arm_is_imm12 (ins->inst_offset)) {
6225 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6227 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6228 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6232 if (method->save_lmf) {
6233 int lmf_offset, reg, sp_adj, regmask;
6234 /* all but r0-r3, sp and pc */
6235 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6238 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6240 /* This points to r4 inside MonoLMF->iregs */
6241 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6243 regmask = 0x9ff0; /* restore lr to pc */
6244 /* Skip caller saved registers not used by the method */
6245 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6246 regmask &= ~(1 << reg);
6251 /* Restored later */
6252 regmask &= ~(1 << ARMREG_PC);
6253 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6254 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6256 ARM_POP (code, regmask);
6258 /* Restore saved r7, restore LR to PC */
6259 /* Skip lr from the lmf */
6260 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6261 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6264 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6265 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6267 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6268 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6272 /* Restore saved gregs */
6273 if (cfg->used_int_regs)
6274 ARM_POP (code, cfg->used_int_regs);
6275 /* Restore saved r7, restore LR to PC */
6276 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6278 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6282 cfg->code_len = code - cfg->native_code;
6284 g_assert (cfg->code_len < cfg->code_size);
6289 mono_arch_emit_exceptions (MonoCompile *cfg)
6291 MonoJumpInfo *patch_info;
6294 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6295 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6296 int max_epilog_size = 50;
6298 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6299 exc_throw_pos [i] = NULL;
6300 exc_throw_found [i] = 0;
6303 /* count the number of exception infos */
6306 * make sure we have enough space for exceptions
6308 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6309 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6310 i = mini_exception_id_by_name (patch_info->data.target);
6311 if (!exc_throw_found [i]) {
6312 max_epilog_size += 32;
6313 exc_throw_found [i] = TRUE;
6318 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6319 cfg->code_size *= 2;
6320 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6321 cfg->stat_code_reallocs++;
6324 code = cfg->native_code + cfg->code_len;
6326 /* add code to raise exceptions */
6327 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6328 switch (patch_info->type) {
6329 case MONO_PATCH_INFO_EXC: {
6330 MonoClass *exc_class;
6331 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6333 i = mini_exception_id_by_name (patch_info->data.target);
6334 if (exc_throw_pos [i]) {
6335 arm_patch (ip, exc_throw_pos [i]);
6336 patch_info->type = MONO_PATCH_INFO_NONE;
6339 exc_throw_pos [i] = code;
6341 arm_patch (ip, code);
6343 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6344 g_assert (exc_class);
6346 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6347 #ifdef USE_JUMP_TABLES
6349 gpointer *jte = mono_jumptable_add_entries (2);
6350 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6351 patch_info->data.name = "mono_arch_throw_corlib_exception";
6352 patch_info->ip.i = code - cfg->native_code;
6353 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6354 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6355 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6356 ARM_BLX_REG (code, ARMREG_IP);
6357 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6360 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6361 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6362 patch_info->data.name = "mono_arch_throw_corlib_exception";
6363 patch_info->ip.i = code - cfg->native_code;
6365 *(guint32*)(gpointer)code = exc_class->type_token;
6376 cfg->code_len = code - cfg->native_code;
6378 g_assert (cfg->code_len < cfg->code_size);
6382 #endif /* #ifndef DISABLE_JIT */
6385 mono_arch_finish_init (void)
6390 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6395 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6402 mono_arch_print_tree (MonoInst *tree, int arity)
6412 mono_arch_get_patch_offset (guint8 *code)
6419 mono_arch_flush_register_windows (void)
6423 #ifdef MONO_ARCH_HAVE_IMT
6428 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6430 int method_reg = mono_alloc_ireg (cfg);
6431 #ifdef USE_JUMP_TABLES
6432 int use_jumptables = TRUE;
6434 int use_jumptables = FALSE;
6437 if (cfg->compile_aot) {
6440 call->dynamic_imt_arg = TRUE;
6443 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6445 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6446 ins->dreg = method_reg;
6447 ins->inst_p0 = call->method;
6448 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6449 MONO_ADD_INS (cfg->cbb, ins);
6451 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6452 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6453 /* Always pass in a register for simplicity */
6454 call->dynamic_imt_arg = TRUE;
6456 cfg->uses_rgctx_reg = TRUE;
6459 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6463 MONO_INST_NEW (cfg, ins, OP_PCONST);
6464 ins->inst_p0 = call->method;
6465 ins->dreg = method_reg;
6466 MONO_ADD_INS (cfg->cbb, ins);
6469 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6473 #endif /* DISABLE_JIT */
6476 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6478 #ifdef USE_JUMP_TABLES
6479 return (MonoMethod*)regs [ARMREG_V5];
6482 guint32 *code_ptr = (guint32*)code;
6484 method = GUINT_TO_POINTER (code_ptr [1]);
6488 return (MonoMethod*)regs [ARMREG_V5];
6490 /* The IMT value is stored in the code stream right after the LDC instruction. */
6491 /* This is no longer true for the gsharedvt_in trampoline */
6493 if (!IS_LDR_PC (code_ptr [0])) {
6494 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6495 g_assert (IS_LDR_PC (code_ptr [0]));
6499 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6500 return (MonoMethod*)regs [ARMREG_V5];
6502 return (MonoMethod*) method;
6507 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6509 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6512 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6513 #define BASE_SIZE (6 * 4)
6514 #define BSEARCH_ENTRY_SIZE (4 * 4)
6515 #define CMP_SIZE (3 * 4)
6516 #define BRANCH_SIZE (1 * 4)
6517 #define CALL_SIZE (2 * 4)
6518 #define WMC_SIZE (8 * 4)
6519 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6521 #ifdef USE_JUMP_TABLES
6523 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6525 g_assert (base [index] == NULL);
6526 base [index] = value;
6529 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6531 if (arm_is_imm12 (jti * 4)) {
6532 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6534 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6535 if ((jti * 4) >> 16)
6536 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6537 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6543 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6545 guint32 delta = DISTANCE (target, code);
6547 g_assert (delta >= 0 && delta <= 0xFFF);
6548 *target = *target | delta;
6554 #ifdef ENABLE_WRONG_METHOD_CHECK
6556 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6558 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6564 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6565 gpointer fail_tramp)
6568 arminstr_t *code, *start;
6569 #ifdef USE_JUMP_TABLES
6572 gboolean large_offsets = FALSE;
6573 guint32 **constant_pool_starts;
6574 arminstr_t *vtable_target = NULL;
6575 int extra_space = 0;
6577 #ifdef ENABLE_WRONG_METHOD_CHECK
6582 #ifdef USE_JUMP_TABLES
6583 for (i = 0; i < count; ++i) {
6584 MonoIMTCheckItem *item = imt_entries [i];
6585 item->chunk_size += 4 * 16;
6586 if (!item->is_equals)
6587 imt_entries [item->check_target_idx]->compare_done = TRUE;
6588 size += item->chunk_size;
6591 constant_pool_starts = g_new0 (guint32*, count);
6593 for (i = 0; i < count; ++i) {
6594 MonoIMTCheckItem *item = imt_entries [i];
6595 if (item->is_equals) {
6596 gboolean fail_case = !item->check_target_idx && fail_tramp;
6598 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6599 item->chunk_size += 32;
6600 large_offsets = TRUE;
6603 if (item->check_target_idx || fail_case) {
6604 if (!item->compare_done || fail_case)
6605 item->chunk_size += CMP_SIZE;
6606 item->chunk_size += BRANCH_SIZE;
6608 #ifdef ENABLE_WRONG_METHOD_CHECK
6609 item->chunk_size += WMC_SIZE;
6613 item->chunk_size += 16;
6614 large_offsets = TRUE;
6616 item->chunk_size += CALL_SIZE;
6618 item->chunk_size += BSEARCH_ENTRY_SIZE;
6619 imt_entries [item->check_target_idx]->compare_done = TRUE;
6621 size += item->chunk_size;
6625 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6629 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6631 code = mono_domain_code_reserve (domain, size);
6635 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6636 for (i = 0; i < count; ++i) {
6637 MonoIMTCheckItem *item = imt_entries [i];
6638 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6642 #ifdef USE_JUMP_TABLES
6643 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6644 /* If jumptables we always pass the IMT method in R5 */
6645 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6646 #define VTABLE_JTI 0
6647 #define IMT_METHOD_OFFSET 0
6648 #define TARGET_CODE_OFFSET 1
6649 #define JUMP_CODE_OFFSET 2
6650 #define RECORDS_PER_ENTRY 3
6651 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6652 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6653 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6655 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6656 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6657 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6658 set_jumptable_element (jte, VTABLE_JTI, vtable);
6661 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6663 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6664 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6665 vtable_target = code;
6666 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6668 if (mono_use_llvm) {
6669 /* LLVM always passes the IMT method in R5 */
6670 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6672 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6673 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6674 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6678 for (i = 0; i < count; ++i) {
6679 MonoIMTCheckItem *item = imt_entries [i];
6680 #ifdef USE_JUMP_TABLES
6681 guint32 imt_method_jti = 0, target_code_jti = 0;
6683 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6685 gint32 vtable_offset;
6687 item->code_target = (guint8*)code;
6689 if (item->is_equals) {
6690 gboolean fail_case = !item->check_target_idx && fail_tramp;
6692 if (item->check_target_idx || fail_case) {
6693 if (!item->compare_done || fail_case) {
6694 #ifdef USE_JUMP_TABLES
6695 imt_method_jti = IMT_METHOD_JTI (i);
6696 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6699 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6701 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6703 #ifdef USE_JUMP_TABLES
6704 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6705 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6706 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6708 item->jmp_code = (guint8*)code;
6709 ARM_B_COND (code, ARMCOND_NE, 0);
6712 /*Enable the commented code to assert on wrong method*/
6713 #ifdef ENABLE_WRONG_METHOD_CHECK
6714 #ifdef USE_JUMP_TABLES
6715 imt_method_jti = IMT_METHOD_JTI (i);
6716 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6719 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6721 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6723 ARM_B_COND (code, ARMCOND_EQ, 0);
6725 /* Define this if your system is so bad that gdb is failing. */
6726 #ifdef BROKEN_DEV_ENV
6727 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6729 arm_patch (code - 1, mini_dump_bad_imt);
6733 arm_patch (cond, code);
6737 if (item->has_target_code) {
6738 /* Load target address */
6739 #ifdef USE_JUMP_TABLES
6740 target_code_jti = TARGET_CODE_JTI (i);
6741 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6742 /* Restore registers */
6743 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6745 ARM_BX (code, ARMREG_R1);
6746 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6748 target_code_ins = code;
6749 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6750 /* Save it to the fourth slot */
6751 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6752 /* Restore registers and branch */
6753 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6755 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6758 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6759 if (!arm_is_imm12 (vtable_offset)) {
6761 * We need to branch to a computed address but we don't have
6762 * a free register to store it, since IP must contain the
6763 * vtable address. So we push the two values to the stack, and
6764 * load them both using LDM.
6766 /* Compute target address */
6767 #ifdef USE_JUMP_TABLES
6768 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6769 if (vtable_offset >> 16)
6770 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6771 /* IP had vtable base. */
6772 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6773 /* Restore registers and branch */
6774 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6775 ARM_BX (code, ARMREG_IP);
6777 vtable_offset_ins = code;
6778 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6779 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6780 /* Save it to the fourth slot */
6781 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6782 /* Restore registers and branch */
6783 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6785 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6788 #ifdef USE_JUMP_TABLES
6789 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6790 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6791 ARM_BX (code, ARMREG_IP);
6793 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6795 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6796 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6802 #ifdef USE_JUMP_TABLES
6803 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6804 target_code_jti = TARGET_CODE_JTI (i);
6805 /* Load target address */
6806 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6807 /* Restore registers */
6808 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6810 ARM_BX (code, ARMREG_R1);
6811 set_jumptable_element (jte, target_code_jti, fail_tramp);
6813 arm_patch (item->jmp_code, (guchar*)code);
6815 target_code_ins = code;
6816 /* Load target address */
6817 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6818 /* Save it to the fourth slot */
6819 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6820 /* Restore registers and branch */
6821 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6823 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6825 item->jmp_code = NULL;
6828 #ifdef USE_JUMP_TABLES
6830 set_jumptable_element (jte, imt_method_jti, item->key);
6833 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6835 /*must emit after unconditional branch*/
6836 if (vtable_target) {
6837 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6838 item->chunk_size += 4;
6839 vtable_target = NULL;
6842 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6843 constant_pool_starts [i] = code;
6845 code += extra_space;
6850 #ifdef USE_JUMP_TABLES
6851 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6852 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6853 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6854 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6855 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6857 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6858 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6860 item->jmp_code = (guint8*)code;
6861 ARM_B_COND (code, ARMCOND_HS, 0);
6867 for (i = 0; i < count; ++i) {
6868 MonoIMTCheckItem *item = imt_entries [i];
6869 if (item->jmp_code) {
6870 if (item->check_target_idx)
6871 #ifdef USE_JUMP_TABLES
6872 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6874 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6877 if (i > 0 && item->is_equals) {
6879 #ifdef USE_JUMP_TABLES
6880 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6881 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6883 arminstr_t *space_start = constant_pool_starts [i];
6884 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6885 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6893 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6894 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6899 #ifndef USE_JUMP_TABLES
6900 g_free (constant_pool_starts);
6903 mono_arch_flush_icache ((guint8*)start, size);
6904 mono_stats.imt_thunks_size += code - start;
6906 g_assert (DISTANCE (start, code) <= size);
6913 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6915 return ctx->regs [reg];
6919 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6921 ctx->regs [reg] = val;
6925 * mono_arch_get_trampolines:
6927 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6931 mono_arch_get_trampolines (gboolean aot)
6933 return mono_arm_get_exception_trampolines (aot);
6937 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6939 * mono_arch_set_breakpoint:
6941 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6942 * The location should contain code emitted by OP_SEQ_POINT.
6945 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6948 guint32 native_offset = ip - (guint8*)ji->code_start;
6949 MonoDebugOptions *opt = mini_get_debug_options ();
6951 if (opt->soft_breakpoints) {
6952 g_assert (!ji->from_aot);
6954 ARM_BLX_REG (code, ARMREG_LR);
6955 mono_arch_flush_icache (code - 4, 4);
6956 } else if (ji->from_aot) {
6957 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6959 g_assert (native_offset % 4 == 0);
6960 g_assert (info->bp_addrs [native_offset / 4] == 0);
6961 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6963 int dreg = ARMREG_LR;
6965 /* Read from another trigger page */
6966 #ifdef USE_JUMP_TABLES
6967 gpointer *jte = mono_jumptable_add_entry ();
6968 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6969 jte [0] = bp_trigger_page;
6971 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6973 *(int*)code = (int)bp_trigger_page;
6976 ARM_LDR_IMM (code, dreg, dreg, 0);
6978 mono_arch_flush_icache (code - 16, 16);
6981 /* This is currently implemented by emitting an SWI instruction, which
6982 * qemu/linux seems to convert to a SIGILL.
6984 *(int*)code = (0xef << 24) | 8;
6986 mono_arch_flush_icache (code - 4, 4);
6992 * mono_arch_clear_breakpoint:
6994 * Clear the breakpoint at IP.
6997 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6999 MonoDebugOptions *opt = mini_get_debug_options ();
7003 if (opt->soft_breakpoints) {
7004 g_assert (!ji->from_aot);
7007 mono_arch_flush_icache (code - 4, 4);
7008 } else if (ji->from_aot) {
7009 guint32 native_offset = ip - (guint8*)ji->code_start;
7010 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7012 g_assert (native_offset % 4 == 0);
7013 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7014 info->bp_addrs [native_offset / 4] = 0;
7016 for (i = 0; i < 4; ++i)
7019 mono_arch_flush_icache (ip, code - ip);
7024 * mono_arch_start_single_stepping:
7026 * Start single stepping.
7029 mono_arch_start_single_stepping (void)
7031 if (ss_trigger_page)
7032 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7038 * mono_arch_stop_single_stepping:
7040 * Stop single stepping.
7043 mono_arch_stop_single_stepping (void)
7045 if (ss_trigger_page)
7046 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7052 #define DBG_SIGNAL SIGBUS
7054 #define DBG_SIGNAL SIGSEGV
7058 * mono_arch_is_single_step_event:
7060 * Return whenever the machine state in SIGCTX corresponds to a single
7064 mono_arch_is_single_step_event (void *info, void *sigctx)
7066 siginfo_t *sinfo = info;
7068 if (!ss_trigger_page)
7071 /* Sometimes the address is off by 4 */
7072 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7079 * mono_arch_is_breakpoint_event:
7081 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7084 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7086 siginfo_t *sinfo = info;
7088 if (!ss_trigger_page)
7091 if (sinfo->si_signo == DBG_SIGNAL) {
7092 /* Sometimes the address is off by 4 */
7093 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7103 * mono_arch_skip_breakpoint:
7105 * See mini-amd64.c for docs.
7108 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7110 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7114 * mono_arch_skip_single_step:
7116 * See mini-amd64.c for docs.
7119 mono_arch_skip_single_step (MonoContext *ctx)
7121 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7124 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7127 * mono_arch_get_seq_point_info:
7129 * See mini-amd64.c for docs.
7132 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7137 // FIXME: Add a free function
7139 mono_domain_lock (domain);
7140 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7142 mono_domain_unlock (domain);
7145 ji = mono_jit_info_table_find (domain, (char*)code);
7148 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7150 info->ss_trigger_page = ss_trigger_page;
7151 info->bp_trigger_page = bp_trigger_page;
7153 mono_domain_lock (domain);
7154 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7156 mono_domain_unlock (domain);
7163 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7165 ext->lmf.previous_lmf = prev_lmf;
7166 /* Mark that this is a MonoLMFExt */
7167 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7168 ext->lmf.sp = (gssize)ext;
7172 * mono_arch_set_target:
7174 * Set the target architecture the JIT backend should generate code for, in the form
7175 * of a GNU target triplet. Only used in AOT mode.
7178 mono_arch_set_target (char *mtriple)
7180 /* The GNU target triple format is not very well documented */
7181 if (strstr (mtriple, "armv7")) {
7182 v5_supported = TRUE;
7183 v6_supported = TRUE;
7184 v7_supported = TRUE;
7186 if (strstr (mtriple, "armv6")) {
7187 v5_supported = TRUE;
7188 v6_supported = TRUE;
7190 if (strstr (mtriple, "armv7s")) {
7191 v7s_supported = TRUE;
7193 if (strstr (mtriple, "thumbv7s")) {
7194 v5_supported = TRUE;
7195 v6_supported = TRUE;
7196 v7_supported = TRUE;
7197 v7s_supported = TRUE;
7198 thumb_supported = TRUE;
7199 thumb2_supported = TRUE;
7201 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7202 v5_supported = TRUE;
7203 v6_supported = TRUE;
7204 thumb_supported = TRUE;
7207 if (strstr (mtriple, "gnueabi"))
7208 eabi_supported = TRUE;
7212 mono_arch_opcode_supported (int opcode)
7215 case OP_ATOMIC_EXCHANGE_I4:
7216 case OP_ATOMIC_CAS_I4:
7217 case OP_ATOMIC_ADD_NEW_I4:
7218 return v7_supported;
7224 #if defined(ENABLE_GSHAREDVT)
7226 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7228 #endif /* !MONOTOUCH */