2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 /* This mutex protects architecture specific caches */
90 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
91 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
92 static CRITICAL_SECTION mini_arch_mutex;
94 static gboolean v5_supported = FALSE;
95 static gboolean v6_supported = FALSE;
96 static gboolean v7_supported = FALSE;
97 static gboolean v7s_supported = FALSE;
98 static gboolean thumb_supported = FALSE;
99 static gboolean thumb2_supported = FALSE;
101 * Whenever to use the ARM EABI
103 static gboolean eabi_supported = FALSE;
106 * Whenever to use the iphone ABI extensions:
107 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
108 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
109 * This is required for debugging/profiling tools to work, but it has some overhead so it should
110 * only be turned on in debug builds.
112 static gboolean iphone_abi = FALSE;
115 * The FPU we are generating code for. This is NOT runtime configurable right now,
116 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
118 static MonoArmFPU arm_fpu;
120 #if defined(ARM_FPU_VFP_HARD)
122 * On armhf, d0-d7 are used for argument passing and d8-d15
123 * must be preserved across calls, which leaves us no room
124 * for scratch registers. So we use d14-d15 but back up their
125 * previous contents to a stack slot before using them - see
126 * mono_arm_emit_vfp_scratch_save/_restore ().
128 static int vfp_scratch1 = ARM_VFP_D14;
129 static int vfp_scratch2 = ARM_VFP_D15;
132 * On armel, d0-d7 do not need to be preserved, so we can
133 * freely make use of them as scratch registers.
135 static int vfp_scratch1 = ARM_VFP_D0;
136 static int vfp_scratch2 = ARM_VFP_D1;
141 static volatile int ss_trigger_var = 0;
143 static gpointer single_step_func_wrapper;
144 static gpointer breakpoint_func_wrapper;
147 * The code generated for sequence points reads from this location, which is
148 * made read-only when single stepping is enabled.
150 static gpointer ss_trigger_page;
152 /* Enabled breakpoints read from this trigger page */
153 static gpointer bp_trigger_page;
155 /* Structure used by the sequence points in AOTed code */
157 gpointer ss_trigger_page;
158 gpointer bp_trigger_page;
159 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
164 * floating point support: on ARM it is a mess, there are at least 3
165 * different setups, each of which binary incompat with the other.
166 * 1) FPA: old and ugly, but unfortunately what current distros use
167 * the double binary format has the two words swapped. 8 double registers.
168 * Implemented usually by kernel emulation.
169 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
170 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
171 * 3) VFP: the new and actually sensible and useful FP support. Implemented
172 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
174 * We do not care about FPA. We will support soft float and VFP.
176 int mono_exc_esp_offset = 0;
178 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
179 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
180 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
182 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
183 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
184 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
186 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
187 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
188 //#define DEBUG_IMT 0
190 /* A variant of ARM_LDR_IMM which can handle large offsets */
191 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
192 if (arm_is_imm12 ((offset))) { \
193 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
195 g_assert ((scratch_reg) != (basereg)); \
196 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
197 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
201 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
202 if (arm_is_imm12 ((offset))) { \
203 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
205 g_assert ((scratch_reg) != (basereg)); \
206 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
207 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
211 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
214 mono_arch_regname (int reg)
216 static const char * rnames[] = {
217 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
218 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
219 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
222 if (reg >= 0 && reg < 16)
228 mono_arch_fregname (int reg)
230 static const char * rnames[] = {
231 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
232 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
233 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
234 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
235 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
236 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
239 if (reg >= 0 && reg < 32)
247 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
254 g_assert (dreg != sreg);
255 code = mono_arm_emit_load_imm (code, dreg, imm);
256 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
261 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
263 /* we can use r0-r3, since this is called only for incoming args on the stack */
264 if (size > sizeof (gpointer) * 4) {
266 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
267 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
268 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
269 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
270 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
271 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
272 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
273 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
274 ARM_B_COND (code, ARMCOND_NE, 0);
275 arm_patch (code - 4, start_loop);
278 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
279 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
281 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
282 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
288 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
289 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
290 doffset = soffset = 0;
292 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
293 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
299 g_assert (size == 0);
304 emit_call_reg (guint8 *code, int reg)
307 ARM_BLX_REG (code, reg);
309 #ifdef USE_JUMP_TABLES
310 g_assert_not_reached ();
312 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
316 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
322 emit_call_seq (MonoCompile *cfg, guint8 *code)
324 #ifdef USE_JUMP_TABLES
325 code = mono_arm_patchable_bl (code, ARMCOND_AL);
327 if (cfg->method->dynamic) {
328 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
330 *(gpointer*)code = NULL;
332 code = emit_call_reg (code, ARMREG_IP);
341 mono_arm_patchable_b (guint8 *code, int cond)
343 #ifdef USE_JUMP_TABLES
346 jte = mono_jumptable_add_entry ();
347 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
348 ARM_BX_COND (code, cond, ARMREG_IP);
350 ARM_B_COND (code, cond, 0);
356 mono_arm_patchable_bl (guint8 *code, int cond)
358 #ifdef USE_JUMP_TABLES
361 jte = mono_jumptable_add_entry ();
362 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
363 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
365 ARM_BL_COND (code, cond, 0);
370 #ifdef USE_JUMP_TABLES
372 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
374 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
375 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
380 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
382 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
383 ARM_LDR_IMM (code, reg, reg, 0);
389 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
391 switch (ins->opcode) {
394 case OP_FCALL_MEMBASE:
396 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
398 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
400 ARM_FMSR (code, ins->dreg, ARMREG_R0);
401 ARM_CVTS (code, ins->dreg, ins->dreg);
405 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
407 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
426 gboolean get_lmf_fast = FALSE;
429 #ifdef HAVE_AEABI_READ_TP
430 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
432 if (lmf_addr_tls_offset != -1) {
435 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
436 (gpointer)"__aeabi_read_tp");
437 code = emit_call_seq (cfg, code);
439 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
445 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
448 /* Inline mono_get_lmf_addr () */
449 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
451 /* Load mono_jit_tls_id */
453 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
454 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
456 *(gpointer*)code = NULL;
458 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
459 /* call pthread_getspecific () */
460 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
461 (gpointer)"pthread_getspecific");
462 code = emit_call_seq (cfg, code);
463 /* lmf_addr = &jit_tls->lmf */
464 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
465 g_assert (arm_is_imm8 (lmf_offset));
466 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
474 (gpointer)"mono_get_lmf_addr");
475 code = emit_call_seq (cfg, code);
477 /* we build the MonoLMF structure on the stack - see mini-arm.h */
478 /* lmf_offset is the offset from the previous stack pointer,
479 * alloc_size is the total stack space allocated, so the offset
480 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
481 * The pointer to the struct is put in r1 (new_lmf).
482 * ip is used as scratch
483 * The callee-saved registers are already in the MonoLMF structure
485 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
486 /* r0 is the result from mono_get_lmf_addr () */
487 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
488 /* new_lmf->previous_lmf = *lmf_addr */
489 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
491 /* *(lmf_addr) = r1 */
492 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
493 /* Skip method (only needed for trampoline LMF frames) */
494 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
495 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
496 /* save the current IP */
497 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
498 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
500 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
501 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
512 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
516 for (list = inst->float_args; list; list = list->next) {
517 FloatArgData *fad = list->data;
518 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
519 gboolean imm = arm_is_fpimm8 (var->inst_offset);
521 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
527 if (*offset + *max_len > cfg->code_size) {
528 cfg->code_size += *max_len;
529 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
531 code = cfg->native_code + *offset;
535 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
536 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
538 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
540 *offset = code - cfg->native_code;
547 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
551 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
553 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
556 if (!arm_is_fpimm8 (inst->inst_offset)) {
557 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
558 ARM_FSTD (code, reg, ARMREG_LR, 0);
560 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
567 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FLDD (code, reg, ARMREG_LR, 0);
580 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
589 * Emit code to pop an LMF structure from the LMF stack.
592 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
596 if (lmf_offset < 32) {
597 basereg = cfg->frame_reg;
602 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
605 /* ip = previous_lmf */
606 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
608 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
609 /* *(lmf_addr) = previous_lmf */
610 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
615 #endif /* #ifndef DISABLE_JIT */
618 * mono_arch_get_argument_info:
619 * @csig: a method signature
620 * @param_count: the number of parameters to consider
621 * @arg_info: an array to store the result infos
623 * Gathers information on parameters such as size, alignment and
624 * padding. arg_info should be large enought to hold param_count + 1 entries.
626 * Returns the size of the activation frame.
629 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
631 int k, frame_size = 0;
632 guint32 size, align, pad;
636 t = mini_type_get_underlying_type (gsctx, csig->ret);
637 if (MONO_TYPE_ISSTRUCT (t)) {
638 frame_size += sizeof (gpointer);
642 arg_info [0].offset = offset;
645 frame_size += sizeof (gpointer);
649 arg_info [0].size = frame_size;
651 for (k = 0; k < param_count; k++) {
652 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
654 /* ignore alignment for now */
657 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
658 arg_info [k].pad = pad;
660 arg_info [k + 1].pad = 0;
661 arg_info [k + 1].size = size;
663 arg_info [k + 1].offset = offset;
667 align = MONO_ARCH_FRAME_ALIGNMENT;
668 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
669 arg_info [k].pad = pad;
674 #define MAX_ARCH_DELEGATE_PARAMS 3
677 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
679 guint8 *code, *start;
682 start = code = mono_global_codeman_reserve (12);
684 /* Replace the this argument with the target */
685 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
686 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
687 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
689 g_assert ((code - start) <= 12);
691 mono_arch_flush_icache (start, 12);
695 size = 8 + param_count * 4;
696 start = code = mono_global_codeman_reserve (size);
698 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
699 /* slide down the arguments */
700 for (i = 0; i < param_count; ++i) {
701 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
703 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
705 g_assert ((code - start) <= size);
707 mono_arch_flush_icache (start, size);
711 *code_size = code - start;
717 * mono_arch_get_delegate_invoke_impls:
719 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
723 mono_arch_get_delegate_invoke_impls (void)
731 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
732 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
734 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
735 code = get_delegate_invoke_impl (FALSE, i, &code_len);
736 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
737 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
745 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
747 guint8 *code, *start;
750 /* FIXME: Support more cases */
751 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
752 if (MONO_TYPE_ISSTRUCT (sig_ret))
756 static guint8* cached = NULL;
757 mono_mini_arch_lock ();
759 mono_mini_arch_unlock ();
764 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
766 start = get_delegate_invoke_impl (TRUE, 0, NULL);
768 mono_mini_arch_unlock ();
771 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
774 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
776 for (i = 0; i < sig->param_count; ++i)
777 if (!mono_is_regsize_var (sig->params [i]))
780 mono_mini_arch_lock ();
781 code = cache [sig->param_count];
783 mono_mini_arch_unlock ();
788 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
789 start = mono_aot_get_trampoline (name);
792 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
794 cache [sig->param_count] = start;
795 mono_mini_arch_unlock ();
803 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
805 return (gpointer)regs [ARMREG_R0];
809 * Initialize the cpu to execute managed code.
812 mono_arch_cpu_init (void)
817 i8_align = __alignof__ (gint64);
820 #ifdef MONO_CROSS_COMPILE
821 /* Need to set the alignment of i8 since it can different on the target */
822 #ifdef TARGET_ANDROID
824 mono_type_set_alignment (MONO_TYPE_I8, 8);
830 create_function_wrapper (gpointer function)
832 guint8 *start, *code;
834 start = code = mono_global_codeman_reserve (96);
837 * Construct the MonoContext structure on the stack.
840 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
842 /* save ip, lr and pc into their correspodings ctx.regs slots. */
843 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
844 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
845 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
847 /* save r0..r10 and fp */
848 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
849 ARM_STM (code, ARMREG_IP, 0x0fff);
851 /* now we can update fp. */
852 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
854 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
855 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
856 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
857 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
859 /* make ctx.eip hold the address of the call. */
860 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
861 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
863 /* r0 now points to the MonoContext */
864 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
867 #ifdef USE_JUMP_TABLES
869 gpointer *jte = mono_jumptable_add_entry ();
870 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
874 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
876 *(gpointer*)code = function;
879 ARM_BLX_REG (code, ARMREG_IP);
881 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
882 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
883 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
884 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
886 /* make ip point to the regs array, then restore everything, including pc. */
887 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
888 ARM_LDM (code, ARMREG_IP, 0xffff);
890 mono_arch_flush_icache (start, code - start);
896 * Initialize architecture specific code.
899 mono_arch_init (void)
901 const char *cpu_arch;
903 InitializeCriticalSection (&mini_arch_mutex);
904 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
905 if (mini_get_debug_options ()->soft_breakpoints) {
906 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
907 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
912 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
913 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
914 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
917 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
918 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
919 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
920 #if defined(ENABLE_GSHAREDVT)
921 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
924 #if defined(__ARM_EABI__)
925 eabi_supported = TRUE;
928 #if defined(ARM_FPU_VFP_HARD)
929 arm_fpu = MONO_ARM_FPU_VFP_HARD;
931 arm_fpu = MONO_ARM_FPU_VFP;
933 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
934 /* If we're compiling with a soft float fallback and it
935 turns out that no VFP unit is available, we need to
936 switch to soft float. We don't do this for iOS, since
937 iOS devices always have a VFP unit. */
938 if (!mono_hwcap_arm_has_vfp)
939 arm_fpu = MONO_ARM_FPU_NONE;
943 v5_supported = mono_hwcap_arm_is_v5;
944 v6_supported = mono_hwcap_arm_is_v6;
945 v7_supported = mono_hwcap_arm_is_v7;
946 v7s_supported = mono_hwcap_arm_is_v7s;
948 #if defined(__APPLE__)
949 /* iOS is special-cased here because we don't yet
950 have a way to properly detect CPU features on it. */
951 thumb_supported = TRUE;
954 thumb_supported = mono_hwcap_arm_has_thumb;
955 thumb2_supported = mono_hwcap_arm_has_thumb2;
958 /* Format: armv(5|6|7[s])[-thumb[2]] */
959 cpu_arch = g_getenv ("MONO_CPU_ARCH");
961 /* Do this here so it overrides any detection. */
963 if (strncmp (cpu_arch, "armv", 4) == 0) {
964 v5_supported = cpu_arch [4] >= '5';
965 v6_supported = cpu_arch [4] >= '6';
966 v7_supported = cpu_arch [4] >= '7';
967 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
970 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
971 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
976 * Cleanup architecture specific code.
979 mono_arch_cleanup (void)
984 * This function returns the optimizations supported on this cpu.
987 mono_arch_cpu_optimizations (guint32 *exclude_mask)
989 /* no arm-specific optimizations yet */
995 * This function test for all SIMD functions supported.
997 * Returns a bitmask corresponding to all supported versions.
1001 mono_arch_cpu_enumerate_simd_versions (void)
1003 /* SIMD is currently unimplemented */
1011 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1013 if (v7s_supported) {
1027 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1029 mono_arch_is_soft_float (void)
1031 return arm_fpu == MONO_ARM_FPU_NONE;
1036 mono_arm_is_hard_float (void)
1038 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1042 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1045 t = mini_type_get_underlying_type (gsctx, t);
1052 case MONO_TYPE_FNPTR:
1054 case MONO_TYPE_OBJECT:
1055 case MONO_TYPE_STRING:
1056 case MONO_TYPE_CLASS:
1057 case MONO_TYPE_SZARRAY:
1058 case MONO_TYPE_ARRAY:
1060 case MONO_TYPE_GENERICINST:
1061 if (!mono_type_generic_inst_is_valuetype (t))
1064 case MONO_TYPE_VALUETYPE:
1071 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1076 for (i = 0; i < cfg->num_varinfo; i++) {
1077 MonoInst *ins = cfg->varinfo [i];
1078 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1081 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1084 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1087 /* we can only allocate 32 bit values */
1088 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1089 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1090 g_assert (i == vmv->idx);
1091 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1098 #define USE_EXTRA_TEMPS 0
1101 mono_arch_get_global_int_regs (MonoCompile *cfg)
1105 mono_arch_compute_omit_fp (cfg);
1108 * FIXME: Interface calls might go through a static rgctx trampoline which
1109 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1112 if (cfg->flags & MONO_CFG_HAS_CALLS)
1113 cfg->uses_rgctx_reg = TRUE;
1115 if (cfg->arch.omit_fp)
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1118 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1119 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1121 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1122 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1124 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1125 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1126 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1127 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1128 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1129 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1135 * mono_arch_regalloc_cost:
1137 * Return the cost, in number of memory references, of the action of
1138 * allocating the variable VMV into a register during global register
1142 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1148 #endif /* #ifndef DISABLE_JIT */
1150 #ifndef __GNUC_PREREQ
1151 #define __GNUC_PREREQ(maj, min) (0)
1155 mono_arch_flush_icache (guint8 *code, gint size)
1157 #if defined(__native_client__)
1158 // For Native Client we don't have to flush i-cache here,
1159 // as it's being done by dyncode interface.
1162 #ifdef MONO_CROSS_COMPILE
1164 sys_icache_invalidate (code, size);
1165 #elif __GNUC_PREREQ(4, 1)
1166 __clear_cache (code, code + size);
1167 #elif defined(PLATFORM_ANDROID)
1168 const int syscall = 0xf0002;
1176 : "r" (code), "r" (code + size), "r" (syscall)
1177 : "r0", "r1", "r7", "r2"
1180 __asm __volatile ("mov r0, %0\n"
1183 "swi 0x9f0002 @ sys_cacheflush"
1185 : "r" (code), "r" (code + size), "r" (0)
1186 : "r0", "r1", "r3" );
1188 #endif /* !__native_client__ */
1199 RegTypeStructByAddr,
1200 /* gsharedvt argument passed by addr in greg */
1201 RegTypeGSharedVtInReg,
1202 /* gsharedvt argument passed by addr on stack */
1203 RegTypeGSharedVtOnStack,
1208 guint16 vtsize; /* in param area */
1212 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1217 guint32 stack_usage;
1218 gboolean vtype_retaddr;
1219 /* The index of the vret arg in the argument list */
1229 /*#define __alignof__(a) sizeof(a)*/
1230 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1233 #define PARAM_REGS 4
1236 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1239 if (*gr > ARMREG_R3) {
1241 ainfo->offset = *stack_size;
1242 ainfo->reg = ARMREG_SP; /* in the caller */
1243 ainfo->storage = RegTypeBase;
1246 ainfo->storage = RegTypeGeneral;
1253 split = i8_align == 4;
1258 if (*gr == ARMREG_R3 && split) {
1259 /* first word in r3 and the second on the stack */
1260 ainfo->offset = *stack_size;
1261 ainfo->reg = ARMREG_SP; /* in the caller */
1262 ainfo->storage = RegTypeBaseGen;
1264 } else if (*gr >= ARMREG_R3) {
1265 if (eabi_supported) {
1266 /* darwin aligns longs to 4 byte only */
1267 if (i8_align == 8) {
1272 ainfo->offset = *stack_size;
1273 ainfo->reg = ARMREG_SP; /* in the caller */
1274 ainfo->storage = RegTypeBase;
1277 if (eabi_supported) {
1278 if (i8_align == 8 && ((*gr) & 1))
1281 ainfo->storage = RegTypeIRegPair;
1290 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1293 * If we're calling a function like this:
1295 * void foo(float a, double b, float c)
1297 * We pass a in s0 and b in d1. That leaves us
1298 * with s1 being unused. The armhf ABI recognizes
1299 * this and requires register assignment to then
1300 * use that for the next single-precision arg,
1301 * i.e. c in this example. So float_spare either
1302 * tells us which reg to use for the next single-
1303 * precision arg, or it's -1, meaning use *fpr.
1305 * Note that even though most of the JIT speaks
1306 * double-precision, fpr represents single-
1307 * precision registers.
1309 * See parts 5.5 and 6.1.2 of the AAPCS for how
1313 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1314 ainfo->storage = RegTypeFP;
1318 * If we're passing a double-precision value
1319 * and *fpr is odd (e.g. it's s1, s3, ...)
1320 * we need to use the next even register. So
1321 * we mark the current *fpr as a spare that
1322 * can be used for the next single-precision
1326 *float_spare = *fpr;
1331 * At this point, we have an even register
1332 * so we assign that and move along.
1336 } else if (*float_spare >= 0) {
1338 * We're passing a single-precision value
1339 * and it looks like a spare single-
1340 * precision register is available. Let's
1344 ainfo->reg = *float_spare;
1348 * If we hit this branch, we're passing a
1349 * single-precision value and we can simply
1350 * use the next available register.
1358 * We've exhausted available floating point
1359 * regs, so pass the rest on the stack.
1367 ainfo->offset = *stack_size;
1368 ainfo->reg = ARMREG_SP;
1369 ainfo->storage = RegTypeBase;
1376 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1378 guint i, gr, fpr, pstart;
1380 int n = sig->hasthis + sig->param_count;
1381 MonoType *simpletype;
1382 guint32 stack_size = 0;
1384 gboolean is_pinvoke = sig->pinvoke;
1388 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1390 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1397 t = mini_type_get_underlying_type (gsctx, sig->ret);
1398 if (MONO_TYPE_ISSTRUCT (t)) {
1401 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1402 cinfo->ret.storage = RegTypeStructByVal;
1404 cinfo->vtype_retaddr = TRUE;
1406 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1407 cinfo->vtype_retaddr = TRUE;
1413 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1414 * the first argument, allowing 'this' to be always passed in the first arg reg.
1415 * Also do this if the first argument is a reference type, since virtual calls
1416 * are sometimes made using calli without sig->hasthis set, like in the delegate
1419 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1421 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1423 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1427 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1428 cinfo->vret_arg_index = 1;
1432 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1436 if (cinfo->vtype_retaddr)
1437 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1440 DEBUG(printf("params: %d\n", sig->param_count));
1441 for (i = pstart; i < sig->param_count; ++i) {
1442 ArgInfo *ainfo = &cinfo->args [n];
1444 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1445 /* Prevent implicit arguments and sig_cookie from
1446 being passed in registers */
1449 /* Emit the signature cookie just before the implicit arguments */
1450 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1452 DEBUG(printf("param %d: ", i));
1453 if (sig->params [i]->byref) {
1454 DEBUG(printf("byref\n"));
1455 add_general (&gr, &stack_size, ainfo, TRUE);
1459 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1460 switch (simpletype->type) {
1461 case MONO_TYPE_BOOLEAN:
1464 cinfo->args [n].size = 1;
1465 add_general (&gr, &stack_size, ainfo, TRUE);
1468 case MONO_TYPE_CHAR:
1471 cinfo->args [n].size = 2;
1472 add_general (&gr, &stack_size, ainfo, TRUE);
1477 cinfo->args [n].size = 4;
1478 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_FNPTR:
1485 case MONO_TYPE_CLASS:
1486 case MONO_TYPE_OBJECT:
1487 case MONO_TYPE_STRING:
1488 case MONO_TYPE_SZARRAY:
1489 case MONO_TYPE_ARRAY:
1490 cinfo->args [n].size = sizeof (gpointer);
1491 add_general (&gr, &stack_size, ainfo, TRUE);
1494 case MONO_TYPE_GENERICINST:
1495 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1496 cinfo->args [n].size = sizeof (gpointer);
1497 add_general (&gr, &stack_size, ainfo, TRUE);
1501 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1502 /* gsharedvt arguments are passed by ref */
1503 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1504 add_general (&gr, &stack_size, ainfo, TRUE);
1505 switch (ainfo->storage) {
1506 case RegTypeGeneral:
1507 ainfo->storage = RegTypeGSharedVtInReg;
1510 ainfo->storage = RegTypeGSharedVtOnStack;
1513 g_assert_not_reached ();
1519 case MONO_TYPE_TYPEDBYREF:
1520 case MONO_TYPE_VALUETYPE: {
1526 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1527 size = sizeof (MonoTypedRef);
1528 align = sizeof (gpointer);
1530 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1532 size = mono_class_native_size (klass, &align);
1534 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1536 DEBUG(printf ("load %d bytes struct\n", size));
1539 align_size += (sizeof (gpointer) - 1);
1540 align_size &= ~(sizeof (gpointer) - 1);
1541 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1542 ainfo->storage = RegTypeStructByVal;
1543 ainfo->struct_size = size;
1544 /* FIXME: align stack_size if needed */
1545 if (eabi_supported) {
1546 if (align >= 8 && (gr & 1))
1549 if (gr > ARMREG_R3) {
1551 ainfo->vtsize = nwords;
1553 int rest = ARMREG_R3 - gr + 1;
1554 int n_in_regs = rest >= nwords? nwords: rest;
1556 ainfo->size = n_in_regs;
1557 ainfo->vtsize = nwords - n_in_regs;
1560 nwords -= n_in_regs;
1562 if (sig->call_convention == MONO_CALL_VARARG)
1563 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1564 stack_size = ALIGN_TO (stack_size, align);
1565 ainfo->offset = stack_size;
1566 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1567 stack_size += nwords * sizeof (gpointer);
1574 add_general (&gr, &stack_size, ainfo, FALSE);
1581 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1583 add_general (&gr, &stack_size, ainfo, TRUE);
1591 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1593 add_general (&gr, &stack_size, ainfo, FALSE);
1598 case MONO_TYPE_MVAR:
1599 /* gsharedvt arguments are passed by ref */
1600 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1601 add_general (&gr, &stack_size, ainfo, TRUE);
1602 switch (ainfo->storage) {
1603 case RegTypeGeneral:
1604 ainfo->storage = RegTypeGSharedVtInReg;
1607 ainfo->storage = RegTypeGSharedVtOnStack;
1610 g_assert_not_reached ();
1615 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1619 /* Handle the case where there are no implicit arguments */
1620 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1621 /* Prevent implicit arguments and sig_cookie from
1622 being passed in registers */
1625 /* Emit the signature cookie just before the implicit arguments */
1626 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1630 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1631 switch (simpletype->type) {
1632 case MONO_TYPE_BOOLEAN:
1637 case MONO_TYPE_CHAR:
1643 case MONO_TYPE_FNPTR:
1644 case MONO_TYPE_CLASS:
1645 case MONO_TYPE_OBJECT:
1646 case MONO_TYPE_SZARRAY:
1647 case MONO_TYPE_ARRAY:
1648 case MONO_TYPE_STRING:
1649 cinfo->ret.storage = RegTypeGeneral;
1650 cinfo->ret.reg = ARMREG_R0;
1654 cinfo->ret.storage = RegTypeIRegPair;
1655 cinfo->ret.reg = ARMREG_R0;
1659 cinfo->ret.storage = RegTypeFP;
1661 if (IS_HARD_FLOAT) {
1662 cinfo->ret.reg = ARM_VFP_F0;
1664 cinfo->ret.reg = ARMREG_R0;
1668 case MONO_TYPE_GENERICINST:
1669 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1670 cinfo->ret.storage = RegTypeGeneral;
1671 cinfo->ret.reg = ARMREG_R0;
1674 // FIXME: Only for variable types
1675 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1676 cinfo->ret.storage = RegTypeStructByAddr;
1677 g_assert (cinfo->vtype_retaddr);
1681 case MONO_TYPE_VALUETYPE:
1682 case MONO_TYPE_TYPEDBYREF:
1683 if (cinfo->ret.storage != RegTypeStructByVal)
1684 cinfo->ret.storage = RegTypeStructByAddr;
1687 case MONO_TYPE_MVAR:
1688 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1689 cinfo->ret.storage = RegTypeStructByAddr;
1690 g_assert (cinfo->vtype_retaddr);
1692 case MONO_TYPE_VOID:
1695 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1699 /* align stack size to 8 */
1700 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1701 stack_size = (stack_size + 7) & ~7;
1703 cinfo->stack_usage = stack_size;
1709 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1711 MonoType *callee_ret;
1715 if (cfg->compile_aot && !cfg->full_aot)
1716 /* OP_TAILCALL doesn't work with AOT */
1719 c1 = get_call_info (NULL, NULL, caller_sig);
1720 c2 = get_call_info (NULL, NULL, callee_sig);
1723 * Tail calls with more callee stack usage than the caller cannot be supported, since
1724 * the extra stack space would be left on the stack after the tail call.
1726 res = c1->stack_usage >= c2->stack_usage;
1727 callee_ret = mini_replace_type (callee_sig->ret);
1728 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1729 /* An address on the callee's stack is passed as the first argument */
1732 if (c2->stack_usage > 16 * 4)
1744 debug_omit_fp (void)
1747 return mono_debug_count ();
1754 * mono_arch_compute_omit_fp:
1756 * Determine whenever the frame pointer can be eliminated.
1759 mono_arch_compute_omit_fp (MonoCompile *cfg)
1761 MonoMethodSignature *sig;
1762 MonoMethodHeader *header;
1766 if (cfg->arch.omit_fp_computed)
1769 header = cfg->header;
1771 sig = mono_method_signature (cfg->method);
1773 if (!cfg->arch.cinfo)
1774 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1775 cinfo = cfg->arch.cinfo;
1778 * FIXME: Remove some of the restrictions.
1780 cfg->arch.omit_fp = TRUE;
1781 cfg->arch.omit_fp_computed = TRUE;
1783 if (cfg->disable_omit_fp)
1784 cfg->arch.omit_fp = FALSE;
1785 if (!debug_omit_fp ())
1786 cfg->arch.omit_fp = FALSE;
1788 if (cfg->method->save_lmf)
1789 cfg->arch.omit_fp = FALSE;
1791 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1792 cfg->arch.omit_fp = FALSE;
1793 if (header->num_clauses)
1794 cfg->arch.omit_fp = FALSE;
1795 if (cfg->param_area)
1796 cfg->arch.omit_fp = FALSE;
1797 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1798 cfg->arch.omit_fp = FALSE;
1799 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1800 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1801 cfg->arch.omit_fp = FALSE;
1802 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1803 ArgInfo *ainfo = &cinfo->args [i];
1805 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1807 * The stack offset can only be determined when the frame
1810 cfg->arch.omit_fp = FALSE;
1815 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1816 MonoInst *ins = cfg->varinfo [i];
1819 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1824 * Set var information according to the calling convention. arm version.
1825 * The locals var stuff should most likely be split in another method.
1828 mono_arch_allocate_vars (MonoCompile *cfg)
1830 MonoMethodSignature *sig;
1831 MonoMethodHeader *header;
1834 int i, offset, size, align, curinst;
1838 sig = mono_method_signature (cfg->method);
1840 if (!cfg->arch.cinfo)
1841 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1842 cinfo = cfg->arch.cinfo;
1843 sig_ret = mini_replace_type (sig->ret);
1845 mono_arch_compute_omit_fp (cfg);
1847 if (cfg->arch.omit_fp)
1848 cfg->frame_reg = ARMREG_SP;
1850 cfg->frame_reg = ARMREG_FP;
1852 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1854 /* allow room for the vararg method args: void* and long/double */
1855 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1856 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1858 header = cfg->header;
1860 /* See mono_arch_get_global_int_regs () */
1861 if (cfg->flags & MONO_CFG_HAS_CALLS)
1862 cfg->uses_rgctx_reg = TRUE;
1864 if (cfg->frame_reg != ARMREG_SP)
1865 cfg->used_int_regs |= 1 << cfg->frame_reg;
1867 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1868 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1869 cfg->used_int_regs |= (1 << ARMREG_V5);
1873 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1874 if (sig_ret->type != MONO_TYPE_VOID) {
1875 cfg->ret->opcode = OP_REGVAR;
1876 cfg->ret->inst_c0 = ARMREG_R0;
1879 /* local vars are at a positive offset from the stack pointer */
1881 * also note that if the function uses alloca, we use FP
1882 * to point at the local variables.
1884 offset = 0; /* linkage area */
1885 /* align the offset to 16 bytes: not sure this is needed here */
1887 //offset &= ~(8 - 1);
1889 /* add parameter area size for called functions */
1890 offset += cfg->param_area;
1893 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1896 /* allow room to save the return value */
1897 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1900 /* the MonoLMF structure is stored just below the stack pointer */
1901 if (cinfo->ret.storage == RegTypeStructByVal) {
1902 cfg->ret->opcode = OP_REGOFFSET;
1903 cfg->ret->inst_basereg = cfg->frame_reg;
1904 offset += sizeof (gpointer) - 1;
1905 offset &= ~(sizeof (gpointer) - 1);
1906 cfg->ret->inst_offset = - offset;
1907 offset += sizeof(gpointer);
1908 } else if (cinfo->vtype_retaddr) {
1909 ins = cfg->vret_addr;
1910 offset += sizeof(gpointer) - 1;
1911 offset &= ~(sizeof(gpointer) - 1);
1912 ins->inst_offset = offset;
1913 ins->opcode = OP_REGOFFSET;
1914 ins->inst_basereg = cfg->frame_reg;
1915 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1916 printf ("vret_addr =");
1917 mono_print_ins (cfg->vret_addr);
1919 offset += sizeof(gpointer);
1922 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1923 if (cfg->arch.seq_point_info_var) {
1926 ins = cfg->arch.seq_point_info_var;
1930 offset += align - 1;
1931 offset &= ~(align - 1);
1932 ins->opcode = OP_REGOFFSET;
1933 ins->inst_basereg = cfg->frame_reg;
1934 ins->inst_offset = offset;
1937 ins = cfg->arch.ss_trigger_page_var;
1940 offset += align - 1;
1941 offset &= ~(align - 1);
1942 ins->opcode = OP_REGOFFSET;
1943 ins->inst_basereg = cfg->frame_reg;
1944 ins->inst_offset = offset;
1948 if (cfg->arch.seq_point_read_var) {
1951 ins = cfg->arch.seq_point_read_var;
1955 offset += align - 1;
1956 offset &= ~(align - 1);
1957 ins->opcode = OP_REGOFFSET;
1958 ins->inst_basereg = cfg->frame_reg;
1959 ins->inst_offset = offset;
1962 ins = cfg->arch.seq_point_ss_method_var;
1965 offset += align - 1;
1966 offset &= ~(align - 1);
1967 ins->opcode = OP_REGOFFSET;
1968 ins->inst_basereg = cfg->frame_reg;
1969 ins->inst_offset = offset;
1972 ins = cfg->arch.seq_point_bp_method_var;
1975 offset += align - 1;
1976 offset &= ~(align - 1);
1977 ins->opcode = OP_REGOFFSET;
1978 ins->inst_basereg = cfg->frame_reg;
1979 ins->inst_offset = offset;
1983 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
1984 /* Allocate a temporary used by the atomic ops */
1988 /* Allocate a local slot to hold the sig cookie address */
1989 offset += align - 1;
1990 offset &= ~(align - 1);
1991 cfg->arch.atomic_tmp_offset = offset;
1994 cfg->arch.atomic_tmp_offset = -1;
1997 cfg->locals_min_stack_offset = offset;
1999 curinst = cfg->locals_start;
2000 for (i = curinst; i < cfg->num_varinfo; ++i) {
2003 ins = cfg->varinfo [i];
2004 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2007 t = ins->inst_vtype;
2008 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2011 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2012 * pinvoke wrappers when they call functions returning structure */
2013 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2014 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2018 size = mono_type_size (t, &align);
2020 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2021 * since it loads/stores misaligned words, which don't do the right thing.
2023 if (align < 4 && size >= 4)
2025 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2026 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2027 offset += align - 1;
2028 offset &= ~(align - 1);
2029 ins->opcode = OP_REGOFFSET;
2030 ins->inst_offset = offset;
2031 ins->inst_basereg = cfg->frame_reg;
2033 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2036 cfg->locals_max_stack_offset = offset;
2040 ins = cfg->args [curinst];
2041 if (ins->opcode != OP_REGVAR) {
2042 ins->opcode = OP_REGOFFSET;
2043 ins->inst_basereg = cfg->frame_reg;
2044 offset += sizeof (gpointer) - 1;
2045 offset &= ~(sizeof (gpointer) - 1);
2046 ins->inst_offset = offset;
2047 offset += sizeof (gpointer);
2052 if (sig->call_convention == MONO_CALL_VARARG) {
2056 /* Allocate a local slot to hold the sig cookie address */
2057 offset += align - 1;
2058 offset &= ~(align - 1);
2059 cfg->sig_cookie = offset;
2063 for (i = 0; i < sig->param_count; ++i) {
2064 ins = cfg->args [curinst];
2066 if (ins->opcode != OP_REGVAR) {
2067 ins->opcode = OP_REGOFFSET;
2068 ins->inst_basereg = cfg->frame_reg;
2069 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2071 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2072 * since it loads/stores misaligned words, which don't do the right thing.
2074 if (align < 4 && size >= 4)
2076 /* The code in the prolog () stores words when storing vtypes received in a register */
2077 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2079 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2080 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2081 offset += align - 1;
2082 offset &= ~(align - 1);
2083 ins->inst_offset = offset;
2089 /* align the offset to 8 bytes */
2090 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2091 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2096 cfg->stack_offset = offset;
2100 mono_arch_create_vars (MonoCompile *cfg)
2102 MonoMethodSignature *sig;
2106 sig = mono_method_signature (cfg->method);
2108 if (!cfg->arch.cinfo)
2109 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2110 cinfo = cfg->arch.cinfo;
2112 if (IS_HARD_FLOAT) {
2113 for (i = 0; i < 2; i++) {
2114 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2115 inst->flags |= MONO_INST_VOLATILE;
2117 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2121 if (cinfo->ret.storage == RegTypeStructByVal)
2122 cfg->ret_var_is_local = TRUE;
2124 if (cinfo->vtype_retaddr) {
2125 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2126 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2127 printf ("vret_addr = ");
2128 mono_print_ins (cfg->vret_addr);
2132 if (cfg->gen_seq_points) {
2133 if (cfg->soft_breakpoints) {
2134 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2135 ins->flags |= MONO_INST_VOLATILE;
2136 cfg->arch.seq_point_read_var = ins;
2138 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2139 ins->flags |= MONO_INST_VOLATILE;
2140 cfg->arch.seq_point_ss_method_var = ins;
2142 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2143 ins->flags |= MONO_INST_VOLATILE;
2144 cfg->arch.seq_point_bp_method_var = ins;
2146 g_assert (!cfg->compile_aot);
2147 } else if (cfg->compile_aot) {
2148 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2149 ins->flags |= MONO_INST_VOLATILE;
2150 cfg->arch.seq_point_info_var = ins;
2152 /* Allocate a separate variable for this to save 1 load per seq point */
2153 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2154 ins->flags |= MONO_INST_VOLATILE;
2155 cfg->arch.ss_trigger_page_var = ins;
2161 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2163 MonoMethodSignature *tmp_sig;
2166 if (call->tail_call)
2169 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2172 * mono_ArgIterator_Setup assumes the signature cookie is
2173 * passed first and all the arguments which were before it are
2174 * passed on the stack after the signature. So compensate by
2175 * passing a different signature.
2177 tmp_sig = mono_metadata_signature_dup (call->signature);
2178 tmp_sig->param_count -= call->signature->sentinelpos;
2179 tmp_sig->sentinelpos = 0;
2180 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2182 sig_reg = mono_alloc_ireg (cfg);
2183 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2190 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2195 LLVMCallInfo *linfo;
2197 n = sig->param_count + sig->hasthis;
2199 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2201 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2204 * LLVM always uses the native ABI while we use our own ABI, the
2205 * only difference is the handling of vtypes:
2206 * - we only pass/receive them in registers in some cases, and only
2207 * in 1 or 2 integer registers.
2209 if (cinfo->vtype_retaddr) {
2210 /* Vtype returned using a hidden argument */
2211 linfo->ret.storage = LLVMArgVtypeRetAddr;
2212 linfo->vret_arg_index = cinfo->vret_arg_index;
2213 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2214 cfg->exception_message = g_strdup ("unknown ret conv");
2215 cfg->disable_llvm = TRUE;
2219 for (i = 0; i < n; ++i) {
2220 ainfo = cinfo->args + i;
2222 linfo->args [i].storage = LLVMArgNone;
2224 switch (ainfo->storage) {
2225 case RegTypeGeneral:
2226 case RegTypeIRegPair:
2228 linfo->args [i].storage = LLVMArgInIReg;
2230 case RegTypeStructByVal:
2231 // FIXME: Passing entirely on the stack or split reg/stack
2232 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2233 linfo->args [i].storage = LLVMArgVtypeInReg;
2234 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2235 if (ainfo->size == 2)
2236 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2238 linfo->args [i].pair_storage [1] = LLVMArgNone;
2240 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2241 cfg->disable_llvm = TRUE;
2245 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2246 cfg->disable_llvm = TRUE;
2256 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2259 MonoMethodSignature *sig;
2263 sig = call->signature;
2264 n = sig->param_count + sig->hasthis;
2266 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2268 for (i = 0; i < n; ++i) {
2269 ArgInfo *ainfo = cinfo->args + i;
2272 if (i >= sig->hasthis)
2273 t = sig->params [i - sig->hasthis];
2275 t = &mono_defaults.int_class->byval_arg;
2276 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2278 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2279 /* Emit the signature cookie just before the implicit arguments */
2280 emit_sig_cookie (cfg, call, cinfo);
2283 in = call->args [i];
2285 switch (ainfo->storage) {
2286 case RegTypeGeneral:
2287 case RegTypeIRegPair:
2288 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2289 MONO_INST_NEW (cfg, ins, OP_MOVE);
2290 ins->dreg = mono_alloc_ireg (cfg);
2291 ins->sreg1 = in->dreg + 1;
2292 MONO_ADD_INS (cfg->cbb, ins);
2293 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2295 MONO_INST_NEW (cfg, ins, OP_MOVE);
2296 ins->dreg = mono_alloc_ireg (cfg);
2297 ins->sreg1 = in->dreg + 2;
2298 MONO_ADD_INS (cfg->cbb, ins);
2299 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2300 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2301 if (ainfo->size == 4) {
2302 if (IS_SOFT_FLOAT) {
2303 /* mono_emit_call_args () have already done the r8->r4 conversion */
2304 /* The converted value is in an int vreg */
2305 MONO_INST_NEW (cfg, ins, OP_MOVE);
2306 ins->dreg = mono_alloc_ireg (cfg);
2307 ins->sreg1 = in->dreg;
2308 MONO_ADD_INS (cfg->cbb, ins);
2309 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2313 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2314 creg = mono_alloc_ireg (cfg);
2315 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2316 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2319 if (IS_SOFT_FLOAT) {
2320 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2321 ins->dreg = mono_alloc_ireg (cfg);
2322 ins->sreg1 = in->dreg;
2323 MONO_ADD_INS (cfg->cbb, ins);
2324 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2326 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2327 ins->dreg = mono_alloc_ireg (cfg);
2328 ins->sreg1 = in->dreg;
2329 MONO_ADD_INS (cfg->cbb, ins);
2330 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2334 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2335 creg = mono_alloc_ireg (cfg);
2336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2337 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2338 creg = mono_alloc_ireg (cfg);
2339 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2340 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2343 cfg->flags |= MONO_CFG_HAS_FPOUT;
2345 MONO_INST_NEW (cfg, ins, OP_MOVE);
2346 ins->dreg = mono_alloc_ireg (cfg);
2347 ins->sreg1 = in->dreg;
2348 MONO_ADD_INS (cfg->cbb, ins);
2350 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2353 case RegTypeStructByAddr:
2356 /* FIXME: where si the data allocated? */
2357 arg->backend.reg3 = ainfo->reg;
2358 call->used_iregs |= 1 << ainfo->reg;
2359 g_assert_not_reached ();
2362 case RegTypeStructByVal:
2363 case RegTypeGSharedVtInReg:
2364 case RegTypeGSharedVtOnStack:
2365 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2366 ins->opcode = OP_OUTARG_VT;
2367 ins->sreg1 = in->dreg;
2368 ins->klass = in->klass;
2369 ins->inst_p0 = call;
2370 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2371 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2372 mono_call_inst_add_outarg_vt (cfg, call, ins);
2373 MONO_ADD_INS (cfg->cbb, ins);
2376 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2378 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2379 if (t->type == MONO_TYPE_R8) {
2380 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2383 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2385 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2388 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2391 case RegTypeBaseGen:
2392 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2394 MONO_INST_NEW (cfg, ins, OP_MOVE);
2395 ins->dreg = mono_alloc_ireg (cfg);
2396 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2397 MONO_ADD_INS (cfg->cbb, ins);
2398 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2399 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2402 /* This should work for soft-float as well */
2404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2405 creg = mono_alloc_ireg (cfg);
2406 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2407 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2408 creg = mono_alloc_ireg (cfg);
2409 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2410 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2411 cfg->flags |= MONO_CFG_HAS_FPOUT;
2413 g_assert_not_reached ();
2417 int fdreg = mono_alloc_freg (cfg);
2419 if (ainfo->size == 8) {
2420 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2421 ins->sreg1 = in->dreg;
2423 MONO_ADD_INS (cfg->cbb, ins);
2425 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2430 * Mono's register allocator doesn't speak single-precision registers that
2431 * overlap double-precision registers (i.e. armhf). So we have to work around
2432 * the register allocator and load the value from memory manually.
2434 * So we create a variable for the float argument and an instruction to store
2435 * the argument into the variable. We then store the list of these arguments
2436 * in cfg->float_args. This list is then used by emit_float_args later to
2437 * pass the arguments in the various call opcodes.
2439 * This is not very nice, and we should really try to fix the allocator.
2442 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2444 /* Make sure the instruction isn't seen as pointless and removed.
2446 float_arg->flags |= MONO_INST_VOLATILE;
2448 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2450 /* We use the dreg to look up the instruction later. The hreg is used to
2451 * emit the instruction that loads the value into the FP reg.
2453 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2454 fad->vreg = float_arg->dreg;
2455 fad->hreg = ainfo->reg;
2457 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2460 call->used_iregs |= 1 << ainfo->reg;
2461 cfg->flags |= MONO_CFG_HAS_FPOUT;
2465 g_assert_not_reached ();
2469 /* Handle the case where there are no implicit arguments */
2470 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2471 emit_sig_cookie (cfg, call, cinfo);
2473 if (cinfo->ret.storage == RegTypeStructByVal) {
2474 /* The JIT will transform this into a normal call */
2475 call->vret_in_reg = TRUE;
2476 } else if (cinfo->vtype_retaddr) {
2478 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2479 vtarg->sreg1 = call->vret_var->dreg;
2480 vtarg->dreg = mono_alloc_preg (cfg);
2481 MONO_ADD_INS (cfg->cbb, vtarg);
2483 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2486 call->stack_usage = cinfo->stack_usage;
2492 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2494 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2495 ArgInfo *ainfo = ins->inst_p1;
2496 int ovf_size = ainfo->vtsize;
2497 int doffset = ainfo->offset;
2498 int struct_size = ainfo->struct_size;
2499 int i, soffset, dreg, tmpreg;
2501 if (ainfo->storage == RegTypeGSharedVtInReg) {
2503 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2506 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2507 /* Pass by addr on stack */
2508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2513 for (i = 0; i < ainfo->size; ++i) {
2514 dreg = mono_alloc_ireg (cfg);
2515 switch (struct_size) {
2517 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2520 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2523 tmpreg = mono_alloc_ireg (cfg);
2524 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2525 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2526 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2527 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2528 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2529 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2530 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2533 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2536 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2537 soffset += sizeof (gpointer);
2538 struct_size -= sizeof (gpointer);
2540 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2542 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2546 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2548 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2551 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2554 if (COMPILE_LLVM (cfg)) {
2555 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2557 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2558 ins->sreg1 = val->dreg + 1;
2559 ins->sreg2 = val->dreg + 2;
2560 MONO_ADD_INS (cfg->cbb, ins);
2565 case MONO_ARM_FPU_NONE:
2566 if (ret->type == MONO_TYPE_R8) {
2569 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2570 ins->dreg = cfg->ret->dreg;
2571 ins->sreg1 = val->dreg;
2572 MONO_ADD_INS (cfg->cbb, ins);
2575 if (ret->type == MONO_TYPE_R4) {
2576 /* Already converted to an int in method_to_ir () */
2577 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2581 case MONO_ARM_FPU_VFP:
2582 case MONO_ARM_FPU_VFP_HARD:
2583 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2586 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2587 ins->dreg = cfg->ret->dreg;
2588 ins->sreg1 = val->dreg;
2589 MONO_ADD_INS (cfg->cbb, ins);
2594 g_assert_not_reached ();
2598 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2601 #endif /* #ifndef DISABLE_JIT */
2604 mono_arch_is_inst_imm (gint64 imm)
2609 #define DYN_CALL_STACK_ARGS 6
2612 MonoMethodSignature *sig;
2617 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2623 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2627 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2630 switch (cinfo->ret.storage) {
2632 case RegTypeGeneral:
2633 case RegTypeIRegPair:
2634 case RegTypeStructByAddr:
2645 for (i = 0; i < cinfo->nargs; ++i) {
2646 ArgInfo *ainfo = &cinfo->args [i];
2649 switch (ainfo->storage) {
2650 case RegTypeGeneral:
2652 case RegTypeIRegPair:
2655 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2658 case RegTypeStructByVal:
2659 if (ainfo->size == 0)
2660 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2662 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2663 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2671 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2672 for (i = 0; i < sig->param_count; ++i) {
2673 MonoType *t = sig->params [i];
2699 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2701 ArchDynCallInfo *info;
2704 cinfo = get_call_info (NULL, NULL, sig);
2706 if (!dyn_call_supported (cinfo, sig)) {
2711 info = g_new0 (ArchDynCallInfo, 1);
2712 // FIXME: Preprocess the info to speed up start_dyn_call ()
2714 info->cinfo = cinfo;
2716 return (MonoDynCallInfo*)info;
2720 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2722 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2724 g_free (ainfo->cinfo);
2729 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2731 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2732 DynCallArgs *p = (DynCallArgs*)buf;
2733 int arg_index, greg, i, j, pindex;
2734 MonoMethodSignature *sig = dinfo->sig;
2736 g_assert (buf_len >= sizeof (DynCallArgs));
2745 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2746 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2751 if (dinfo->cinfo->vtype_retaddr)
2752 p->regs [greg ++] = (mgreg_t)ret;
2754 for (i = pindex; i < sig->param_count; i++) {
2755 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2756 gpointer *arg = args [arg_index ++];
2757 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2760 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2762 else if (ainfo->storage == RegTypeBase)
2763 slot = PARAM_REGS + (ainfo->offset / 4);
2765 g_assert_not_reached ();
2768 p->regs [slot] = (mgreg_t)*arg;
2773 case MONO_TYPE_STRING:
2774 case MONO_TYPE_CLASS:
2775 case MONO_TYPE_ARRAY:
2776 case MONO_TYPE_SZARRAY:
2777 case MONO_TYPE_OBJECT:
2781 p->regs [slot] = (mgreg_t)*arg;
2783 case MONO_TYPE_BOOLEAN:
2785 p->regs [slot] = *(guint8*)arg;
2788 p->regs [slot] = *(gint8*)arg;
2791 p->regs [slot] = *(gint16*)arg;
2794 case MONO_TYPE_CHAR:
2795 p->regs [slot] = *(guint16*)arg;
2798 p->regs [slot] = *(gint32*)arg;
2801 p->regs [slot] = *(guint32*)arg;
2805 p->regs [slot ++] = (mgreg_t)arg [0];
2806 p->regs [slot] = (mgreg_t)arg [1];
2809 p->regs [slot] = *(mgreg_t*)arg;
2812 p->regs [slot ++] = (mgreg_t)arg [0];
2813 p->regs [slot] = (mgreg_t)arg [1];
2815 case MONO_TYPE_GENERICINST:
2816 if (MONO_TYPE_IS_REFERENCE (t)) {
2817 p->regs [slot] = (mgreg_t)*arg;
2822 case MONO_TYPE_VALUETYPE:
2823 g_assert (ainfo->storage == RegTypeStructByVal);
2825 if (ainfo->size == 0)
2826 slot = PARAM_REGS + (ainfo->offset / 4);
2830 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2831 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2834 g_assert_not_reached ();
2840 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2842 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2843 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2845 guint8 *ret = ((DynCallArgs*)buf)->ret;
2846 mgreg_t res = ((DynCallArgs*)buf)->res;
2847 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2849 ptype = mini_type_get_underlying_type (NULL, sig->ret);
2850 switch (ptype->type) {
2851 case MONO_TYPE_VOID:
2852 *(gpointer*)ret = NULL;
2854 case MONO_TYPE_STRING:
2855 case MONO_TYPE_CLASS:
2856 case MONO_TYPE_ARRAY:
2857 case MONO_TYPE_SZARRAY:
2858 case MONO_TYPE_OBJECT:
2862 *(gpointer*)ret = (gpointer)res;
2868 case MONO_TYPE_BOOLEAN:
2869 *(guint8*)ret = res;
2872 *(gint16*)ret = res;
2875 case MONO_TYPE_CHAR:
2876 *(guint16*)ret = res;
2879 *(gint32*)ret = res;
2882 *(guint32*)ret = res;
2886 /* This handles endianness as well */
2887 ((gint32*)ret) [0] = res;
2888 ((gint32*)ret) [1] = res2;
2890 case MONO_TYPE_GENERICINST:
2891 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2892 *(gpointer*)ret = (gpointer)res;
2897 case MONO_TYPE_VALUETYPE:
2898 g_assert (ainfo->cinfo->vtype_retaddr);
2903 *(float*)ret = *(float*)&res;
2905 case MONO_TYPE_R8: {
2912 *(double*)ret = *(double*)®s;
2916 g_assert_not_reached ();
2923 * Allow tracing to work with this interface (with an optional argument)
2927 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2931 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2932 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2933 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2934 code = emit_call_reg (code, ARMREG_R2);
2948 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2951 int save_mode = SAVE_NONE;
2953 MonoMethod *method = cfg->method;
2954 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2955 int rtype = ret_type->type;
2956 int save_offset = cfg->param_area;
2960 offset = code - cfg->native_code;
2961 /* we need about 16 instructions */
2962 if (offset > (cfg->code_size - 16 * 4)) {
2963 cfg->code_size *= 2;
2964 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2965 code = cfg->native_code + offset;
2968 case MONO_TYPE_VOID:
2969 /* special case string .ctor icall */
2970 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2971 save_mode = SAVE_ONE;
2973 save_mode = SAVE_NONE;
2977 save_mode = SAVE_TWO;
2981 save_mode = SAVE_ONE_FP;
2983 save_mode = SAVE_ONE;
2987 save_mode = SAVE_TWO_FP;
2989 save_mode = SAVE_TWO;
2991 case MONO_TYPE_GENERICINST:
2992 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2993 save_mode = SAVE_ONE;
2997 case MONO_TYPE_VALUETYPE:
2998 save_mode = SAVE_STRUCT;
3001 save_mode = SAVE_ONE;
3005 switch (save_mode) {
3007 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3008 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3009 if (enable_arguments) {
3010 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3011 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3015 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3016 if (enable_arguments) {
3017 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3021 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3022 if (enable_arguments) {
3023 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3027 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3028 if (enable_arguments) {
3029 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3033 if (enable_arguments) {
3034 /* FIXME: get the actual address */
3035 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3043 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3044 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3045 code = emit_call_reg (code, ARMREG_IP);
3047 switch (save_mode) {
3049 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3050 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3053 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3056 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3059 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3070 * The immediate field for cond branches is big enough for all reasonable methods
3072 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3073 if (0 && ins->inst_true_bb->native_offset) { \
3074 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3076 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3077 ARM_B_COND (code, (condcode), 0); \
3080 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3082 /* emit an exception if condition is fail
3084 * We assign the extra code used to throw the implicit exceptions
3085 * to cfg->bb_exit as far as the big branch handling is concerned
3087 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3089 mono_add_patch_info (cfg, code - cfg->native_code, \
3090 MONO_PATCH_INFO_EXC, exc_name); \
3091 ARM_BL_COND (code, (condcode), 0); \
3094 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3097 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3102 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3104 MonoInst *ins, *n, *last_ins = NULL;
3106 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3107 switch (ins->opcode) {
3110 /* Already done by an arch-independent pass */
3112 case OP_LOAD_MEMBASE:
3113 case OP_LOADI4_MEMBASE:
3115 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3116 * OP_LOAD_MEMBASE offset(basereg), reg
3118 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3119 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3120 ins->inst_basereg == last_ins->inst_destbasereg &&
3121 ins->inst_offset == last_ins->inst_offset) {
3122 if (ins->dreg == last_ins->sreg1) {
3123 MONO_DELETE_INS (bb, ins);
3126 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3127 ins->opcode = OP_MOVE;
3128 ins->sreg1 = last_ins->sreg1;
3132 * Note: reg1 must be different from the basereg in the second load
3133 * OP_LOAD_MEMBASE offset(basereg), reg1
3134 * OP_LOAD_MEMBASE offset(basereg), reg2
3136 * OP_LOAD_MEMBASE offset(basereg), reg1
3137 * OP_MOVE reg1, reg2
3139 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3140 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3141 ins->inst_basereg != last_ins->dreg &&
3142 ins->inst_basereg == last_ins->inst_basereg &&
3143 ins->inst_offset == last_ins->inst_offset) {
3145 if (ins->dreg == last_ins->dreg) {
3146 MONO_DELETE_INS (bb, ins);
3149 ins->opcode = OP_MOVE;
3150 ins->sreg1 = last_ins->dreg;
3153 //g_assert_not_reached ();
3157 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3158 * OP_LOAD_MEMBASE offset(basereg), reg
3160 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3161 * OP_ICONST reg, imm
3163 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3164 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3165 ins->inst_basereg == last_ins->inst_destbasereg &&
3166 ins->inst_offset == last_ins->inst_offset) {
3167 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3168 ins->opcode = OP_ICONST;
3169 ins->inst_c0 = last_ins->inst_imm;
3170 g_assert_not_reached (); // check this rule
3174 case OP_LOADU1_MEMBASE:
3175 case OP_LOADI1_MEMBASE:
3176 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3177 ins->inst_basereg == last_ins->inst_destbasereg &&
3178 ins->inst_offset == last_ins->inst_offset) {
3179 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3180 ins->sreg1 = last_ins->sreg1;
3183 case OP_LOADU2_MEMBASE:
3184 case OP_LOADI2_MEMBASE:
3185 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3186 ins->inst_basereg == last_ins->inst_destbasereg &&
3187 ins->inst_offset == last_ins->inst_offset) {
3188 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3189 ins->sreg1 = last_ins->sreg1;
3193 ins->opcode = OP_MOVE;
3197 if (ins->dreg == ins->sreg1) {
3198 MONO_DELETE_INS (bb, ins);
3202 * OP_MOVE sreg, dreg
3203 * OP_MOVE dreg, sreg
3205 if (last_ins && last_ins->opcode == OP_MOVE &&
3206 ins->sreg1 == last_ins->dreg &&
3207 ins->dreg == last_ins->sreg1) {
3208 MONO_DELETE_INS (bb, ins);
3216 bb->last_ins = last_ins;
3220 * the branch_cc_table should maintain the order of these
3234 branch_cc_table [] = {
3248 #define ADD_NEW_INS(cfg,dest,op) do { \
3249 MONO_INST_NEW ((cfg), (dest), (op)); \
3250 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3254 map_to_reg_reg_op (int op)
3263 case OP_COMPARE_IMM:
3265 case OP_ICOMPARE_IMM:
3279 case OP_LOAD_MEMBASE:
3280 return OP_LOAD_MEMINDEX;
3281 case OP_LOADI4_MEMBASE:
3282 return OP_LOADI4_MEMINDEX;
3283 case OP_LOADU4_MEMBASE:
3284 return OP_LOADU4_MEMINDEX;
3285 case OP_LOADU1_MEMBASE:
3286 return OP_LOADU1_MEMINDEX;
3287 case OP_LOADI2_MEMBASE:
3288 return OP_LOADI2_MEMINDEX;
3289 case OP_LOADU2_MEMBASE:
3290 return OP_LOADU2_MEMINDEX;
3291 case OP_LOADI1_MEMBASE:
3292 return OP_LOADI1_MEMINDEX;
3293 case OP_STOREI1_MEMBASE_REG:
3294 return OP_STOREI1_MEMINDEX;
3295 case OP_STOREI2_MEMBASE_REG:
3296 return OP_STOREI2_MEMINDEX;
3297 case OP_STOREI4_MEMBASE_REG:
3298 return OP_STOREI4_MEMINDEX;
3299 case OP_STORE_MEMBASE_REG:
3300 return OP_STORE_MEMINDEX;
3301 case OP_STORER4_MEMBASE_REG:
3302 return OP_STORER4_MEMINDEX;
3303 case OP_STORER8_MEMBASE_REG:
3304 return OP_STORER8_MEMINDEX;
3305 case OP_STORE_MEMBASE_IMM:
3306 return OP_STORE_MEMBASE_REG;
3307 case OP_STOREI1_MEMBASE_IMM:
3308 return OP_STOREI1_MEMBASE_REG;
3309 case OP_STOREI2_MEMBASE_IMM:
3310 return OP_STOREI2_MEMBASE_REG;
3311 case OP_STOREI4_MEMBASE_IMM:
3312 return OP_STOREI4_MEMBASE_REG;
3314 g_assert_not_reached ();
3318 * Remove from the instruction list the instructions that can't be
3319 * represented with very simple instructions with no register
3323 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3325 MonoInst *ins, *temp, *last_ins = NULL;
3326 int rot_amount, imm8, low_imm;
3328 MONO_BB_FOR_EACH_INS (bb, ins) {
3330 switch (ins->opcode) {
3334 case OP_COMPARE_IMM:
3335 case OP_ICOMPARE_IMM:
3349 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3350 ADD_NEW_INS (cfg, temp, OP_ICONST);
3351 temp->inst_c0 = ins->inst_imm;
3352 temp->dreg = mono_alloc_ireg (cfg);
3353 ins->sreg2 = temp->dreg;
3354 ins->opcode = mono_op_imm_to_op (ins->opcode);
3356 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3362 if (ins->inst_imm == 1) {
3363 ins->opcode = OP_MOVE;
3366 if (ins->inst_imm == 0) {
3367 ins->opcode = OP_ICONST;
3371 imm8 = mono_is_power_of_two (ins->inst_imm);
3373 ins->opcode = OP_SHL_IMM;
3374 ins->inst_imm = imm8;
3377 ADD_NEW_INS (cfg, temp, OP_ICONST);
3378 temp->inst_c0 = ins->inst_imm;
3379 temp->dreg = mono_alloc_ireg (cfg);
3380 ins->sreg2 = temp->dreg;
3381 ins->opcode = OP_IMUL;
3387 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3388 /* ARM sets the C flag to 1 if there was _no_ overflow */
3389 ins->next->opcode = OP_COND_EXC_NC;
3392 case OP_IDIV_UN_IMM:
3394 case OP_IREM_UN_IMM:
3395 ADD_NEW_INS (cfg, temp, OP_ICONST);
3396 temp->inst_c0 = ins->inst_imm;
3397 temp->dreg = mono_alloc_ireg (cfg);
3398 ins->sreg2 = temp->dreg;
3399 ins->opcode = mono_op_imm_to_op (ins->opcode);
3401 case OP_LOCALLOC_IMM:
3402 ADD_NEW_INS (cfg, temp, OP_ICONST);
3403 temp->inst_c0 = ins->inst_imm;
3404 temp->dreg = mono_alloc_ireg (cfg);
3405 ins->sreg1 = temp->dreg;
3406 ins->opcode = OP_LOCALLOC;
3408 case OP_LOAD_MEMBASE:
3409 case OP_LOADI4_MEMBASE:
3410 case OP_LOADU4_MEMBASE:
3411 case OP_LOADU1_MEMBASE:
3412 /* we can do two things: load the immed in a register
3413 * and use an indexed load, or see if the immed can be
3414 * represented as an ad_imm + a load with a smaller offset
3415 * that fits. We just do the first for now, optimize later.
3417 if (arm_is_imm12 (ins->inst_offset))
3419 ADD_NEW_INS (cfg, temp, OP_ICONST);
3420 temp->inst_c0 = ins->inst_offset;
3421 temp->dreg = mono_alloc_ireg (cfg);
3422 ins->sreg2 = temp->dreg;
3423 ins->opcode = map_to_reg_reg_op (ins->opcode);
3425 case OP_LOADI2_MEMBASE:
3426 case OP_LOADU2_MEMBASE:
3427 case OP_LOADI1_MEMBASE:
3428 if (arm_is_imm8 (ins->inst_offset))
3430 ADD_NEW_INS (cfg, temp, OP_ICONST);
3431 temp->inst_c0 = ins->inst_offset;
3432 temp->dreg = mono_alloc_ireg (cfg);
3433 ins->sreg2 = temp->dreg;
3434 ins->opcode = map_to_reg_reg_op (ins->opcode);
3436 case OP_LOADR4_MEMBASE:
3437 case OP_LOADR8_MEMBASE:
3438 if (arm_is_fpimm8 (ins->inst_offset))
3440 low_imm = ins->inst_offset & 0x1ff;
3441 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3442 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3443 temp->inst_imm = ins->inst_offset & ~0x1ff;
3444 temp->sreg1 = ins->inst_basereg;
3445 temp->dreg = mono_alloc_ireg (cfg);
3446 ins->inst_basereg = temp->dreg;
3447 ins->inst_offset = low_imm;
3451 ADD_NEW_INS (cfg, temp, OP_ICONST);
3452 temp->inst_c0 = ins->inst_offset;
3453 temp->dreg = mono_alloc_ireg (cfg);
3455 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3456 add_ins->sreg1 = ins->inst_basereg;
3457 add_ins->sreg2 = temp->dreg;
3458 add_ins->dreg = mono_alloc_ireg (cfg);
3460 ins->inst_basereg = add_ins->dreg;
3461 ins->inst_offset = 0;
3464 case OP_STORE_MEMBASE_REG:
3465 case OP_STOREI4_MEMBASE_REG:
3466 case OP_STOREI1_MEMBASE_REG:
3467 if (arm_is_imm12 (ins->inst_offset))
3469 ADD_NEW_INS (cfg, temp, OP_ICONST);
3470 temp->inst_c0 = ins->inst_offset;
3471 temp->dreg = mono_alloc_ireg (cfg);
3472 ins->sreg2 = temp->dreg;
3473 ins->opcode = map_to_reg_reg_op (ins->opcode);
3475 case OP_STOREI2_MEMBASE_REG:
3476 if (arm_is_imm8 (ins->inst_offset))
3478 ADD_NEW_INS (cfg, temp, OP_ICONST);
3479 temp->inst_c0 = ins->inst_offset;
3480 temp->dreg = mono_alloc_ireg (cfg);
3481 ins->sreg2 = temp->dreg;
3482 ins->opcode = map_to_reg_reg_op (ins->opcode);
3484 case OP_STORER4_MEMBASE_REG:
3485 case OP_STORER8_MEMBASE_REG:
3486 if (arm_is_fpimm8 (ins->inst_offset))
3488 low_imm = ins->inst_offset & 0x1ff;
3489 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3490 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3491 temp->inst_imm = ins->inst_offset & ~0x1ff;
3492 temp->sreg1 = ins->inst_destbasereg;
3493 temp->dreg = mono_alloc_ireg (cfg);
3494 ins->inst_destbasereg = temp->dreg;
3495 ins->inst_offset = low_imm;
3499 ADD_NEW_INS (cfg, temp, OP_ICONST);
3500 temp->inst_c0 = ins->inst_offset;
3501 temp->dreg = mono_alloc_ireg (cfg);
3503 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3504 add_ins->sreg1 = ins->inst_destbasereg;
3505 add_ins->sreg2 = temp->dreg;
3506 add_ins->dreg = mono_alloc_ireg (cfg);
3508 ins->inst_destbasereg = add_ins->dreg;
3509 ins->inst_offset = 0;
3512 case OP_STORE_MEMBASE_IMM:
3513 case OP_STOREI1_MEMBASE_IMM:
3514 case OP_STOREI2_MEMBASE_IMM:
3515 case OP_STOREI4_MEMBASE_IMM:
3516 ADD_NEW_INS (cfg, temp, OP_ICONST);
3517 temp->inst_c0 = ins->inst_imm;
3518 temp->dreg = mono_alloc_ireg (cfg);
3519 ins->sreg1 = temp->dreg;
3520 ins->opcode = map_to_reg_reg_op (ins->opcode);
3522 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3524 gboolean swap = FALSE;
3528 /* Optimized away */
3533 /* Some fp compares require swapped operands */
3534 switch (ins->next->opcode) {
3536 ins->next->opcode = OP_FBLT;
3540 ins->next->opcode = OP_FBLT_UN;
3544 ins->next->opcode = OP_FBGE;
3548 ins->next->opcode = OP_FBGE_UN;
3556 ins->sreg1 = ins->sreg2;
3565 bb->last_ins = last_ins;
3566 bb->max_vreg = cfg->next_vreg;
3570 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3574 if (long_ins->opcode == OP_LNEG) {
3576 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3577 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3583 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3585 /* sreg is a float, dreg is an integer reg */
3587 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3589 ARM_TOSIZD (code, vfp_scratch1, sreg);
3591 ARM_TOUIZD (code, vfp_scratch1, sreg);
3592 ARM_FMRS (code, dreg, vfp_scratch1);
3593 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3597 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3598 else if (size == 2) {
3599 ARM_SHL_IMM (code, dreg, dreg, 16);
3600 ARM_SHR_IMM (code, dreg, dreg, 16);
3604 ARM_SHL_IMM (code, dreg, dreg, 24);
3605 ARM_SAR_IMM (code, dreg, dreg, 24);
3606 } else if (size == 2) {
3607 ARM_SHL_IMM (code, dreg, dreg, 16);
3608 ARM_SAR_IMM (code, dreg, dreg, 16);
3614 #endif /* #ifndef DISABLE_JIT */
3618 const guchar *target;
3623 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3626 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3627 PatchData *pdata = (PatchData*)user_data;
3628 guchar *code = data;
3629 guint32 *thunks = data;
3630 guint32 *endthunks = (guint32*)(code + bsize);
3632 int difflow, diffhigh;
3634 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3635 difflow = (char*)pdata->code - (char*)thunks;
3636 diffhigh = (char*)pdata->code - (char*)endthunks;
3637 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3641 * The thunk is composed of 3 words:
3642 * load constant from thunks [2] into ARM_IP
3645 * Note that the LR register is already setup
3647 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3648 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3649 while (thunks < endthunks) {
3650 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3651 if (thunks [2] == (guint32)pdata->target) {
3652 arm_patch (pdata->code, (guchar*)thunks);
3653 mono_arch_flush_icache (pdata->code, 4);
3656 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3657 /* found a free slot instead: emit thunk */
3658 /* ARMREG_IP is fine to use since this can't be an IMT call
3661 code = (guchar*)thunks;
3662 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3663 if (thumb_supported)
3664 ARM_BX (code, ARMREG_IP);
3666 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3667 thunks [2] = (guint32)pdata->target;
3668 mono_arch_flush_icache ((guchar*)thunks, 12);
3670 arm_patch (pdata->code, (guchar*)thunks);
3671 mono_arch_flush_icache (pdata->code, 4);
3675 /* skip 12 bytes, the size of the thunk */
3679 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3685 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3690 domain = mono_domain_get ();
3693 pdata.target = target;
3694 pdata.absolute = absolute;
3698 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3701 if (pdata.found != 1) {
3702 mono_domain_lock (domain);
3703 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3706 /* this uses the first available slot */
3708 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3710 mono_domain_unlock (domain);
3713 if (pdata.found != 1) {
3715 GHashTableIter iter;
3716 MonoJitDynamicMethodInfo *ji;
3719 * This might be a dynamic method, search its code manager. We can only
3720 * use the dynamic method containing CODE, since the others might be freed later.
3724 mono_domain_lock (domain);
3725 hash = domain_jit_info (domain)->dynamic_code_hash;
3727 /* FIXME: Speed this up */
3728 g_hash_table_iter_init (&iter, hash);
3729 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3730 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3731 if (pdata.found == 1)
3735 mono_domain_unlock (domain);
3737 if (pdata.found != 1)
3738 g_print ("thunk failed for %p from %p\n", target, code);
3739 g_assert (pdata.found == 1);
3743 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3745 guint32 *code32 = (void*)code;
3746 guint32 ins = *code32;
3747 guint32 prim = (ins >> 25) & 7;
3748 guint32 tval = GPOINTER_TO_UINT (target);
3750 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3751 if (prim == 5) { /* 101b */
3752 /* the diff starts 8 bytes from the branch opcode */
3753 gint diff = target - code - 8;
3755 gint tmask = 0xffffffff;
3756 if (tval & 1) { /* entering thumb mode */
3757 diff = target - 1 - code - 8;
3758 g_assert (thumb_supported);
3759 tbits = 0xf << 28; /* bl->blx bit pattern */
3760 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3761 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3765 tmask = ~(1 << 24); /* clear the link bit */
3766 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3771 if (diff <= 33554431) {
3773 ins = (ins & 0xff000000) | diff;
3775 *code32 = ins | tbits;
3779 /* diff between 0 and -33554432 */
3780 if (diff >= -33554432) {
3782 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3784 *code32 = ins | tbits;
3789 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3793 #ifdef USE_JUMP_TABLES
3795 gpointer *jte = mono_jumptable_get_entry (code);
3797 jte [0] = (gpointer) target;
3801 * The alternative call sequences looks like this:
3803 * ldr ip, [pc] // loads the address constant
3804 * b 1f // jumps around the constant
3805 * address constant embedded in the code
3810 * There are two cases for patching:
3811 * a) at the end of method emission: in this case code points to the start
3812 * of the call sequence
3813 * b) during runtime patching of the call site: in this case code points
3814 * to the mov pc, ip instruction
3816 * We have to handle also the thunk jump code sequence:
3820 * address constant // execution never reaches here
3822 if ((ins & 0x0ffffff0) == 0x12fff10) {
3823 /* Branch and exchange: the address is constructed in a reg
3824 * We can patch BX when the code sequence is the following:
3825 * ldr ip, [pc, #0] ; 0x8
3832 guint8 *emit = (guint8*)ccode;
3833 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3835 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3836 ARM_BX (emit, ARMREG_IP);
3838 /*patching from magic trampoline*/
3839 if (ins == ccode [3]) {
3840 g_assert (code32 [-4] == ccode [0]);
3841 g_assert (code32 [-3] == ccode [1]);
3842 g_assert (code32 [-1] == ccode [2]);
3843 code32 [-2] = (guint32)target;
3846 /*patching from JIT*/
3847 if (ins == ccode [0]) {
3848 g_assert (code32 [1] == ccode [1]);
3849 g_assert (code32 [3] == ccode [2]);
3850 g_assert (code32 [4] == ccode [3]);
3851 code32 [2] = (guint32)target;
3854 g_assert_not_reached ();
3855 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3863 guint8 *emit = (guint8*)ccode;
3864 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3866 ARM_BLX_REG (emit, ARMREG_IP);
3868 g_assert (code32 [-3] == ccode [0]);
3869 g_assert (code32 [-2] == ccode [1]);
3870 g_assert (code32 [0] == ccode [2]);
3872 code32 [-1] = (guint32)target;
3875 guint32 *tmp = ccode;
3876 guint8 *emit = (guint8*)tmp;
3877 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3878 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3879 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3880 ARM_BX (emit, ARMREG_IP);
3881 if (ins == ccode [2]) {
3882 g_assert_not_reached (); // should be -2 ...
3883 code32 [-1] = (guint32)target;
3886 if (ins == ccode [0]) {
3887 /* handles both thunk jump code and the far call sequence */
3888 code32 [2] = (guint32)target;
3891 g_assert_not_reached ();
3893 // g_print ("patched with 0x%08x\n", ins);
3898 arm_patch (guchar *code, const guchar *target)
3900 arm_patch_general (NULL, code, target, NULL);
3904 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3905 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3906 * to be used with the emit macros.
3907 * Return -1 otherwise.
3910 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3913 for (i = 0; i < 31; i+= 2) {
3914 res = (val << (32 - i)) | (val >> i);
3917 *rot_amount = i? 32 - i: 0;
3924 * Emits in code a sequence of instructions that load the value 'val'
3925 * into the dreg register. Uses at most 4 instructions.
3928 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3930 int imm8, rot_amount;
3932 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3933 /* skip the constant pool */
3939 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3940 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3941 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3942 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3945 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3947 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3951 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3953 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3955 if (val & 0xFF0000) {
3956 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3958 if (val & 0xFF000000) {
3959 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3961 } else if (val & 0xFF00) {
3962 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3963 if (val & 0xFF0000) {
3964 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3966 if (val & 0xFF000000) {
3967 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3969 } else if (val & 0xFF0000) {
3970 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3971 if (val & 0xFF000000) {
3972 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3975 //g_assert_not_reached ();
3981 mono_arm_thumb_supported (void)
3983 return thumb_supported;
3989 * emit_load_volatile_arguments:
3991 * Load volatile arguments from the stack to the original input registers.
3992 * Required before a tail call.
3995 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3997 MonoMethod *method = cfg->method;
3998 MonoMethodSignature *sig;
4003 /* FIXME: Generate intermediate code instead */
4005 sig = mono_method_signature (method);
4007 /* This is the opposite of the code in emit_prolog */
4011 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4013 if (cinfo->vtype_retaddr) {
4014 ArgInfo *ainfo = &cinfo->ret;
4015 inst = cfg->vret_addr;
4016 g_assert (arm_is_imm12 (inst->inst_offset));
4017 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4019 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4020 ArgInfo *ainfo = cinfo->args + i;
4021 inst = cfg->args [pos];
4023 if (cfg->verbose_level > 2)
4024 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4025 if (inst->opcode == OP_REGVAR) {
4026 if (ainfo->storage == RegTypeGeneral)
4027 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4028 else if (ainfo->storage == RegTypeFP) {
4029 g_assert_not_reached ();
4030 } else if (ainfo->storage == RegTypeBase) {
4034 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4035 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4037 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4038 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4042 g_assert_not_reached ();
4044 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4045 switch (ainfo->size) {
4052 g_assert (arm_is_imm12 (inst->inst_offset));
4053 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4054 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4055 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4058 if (arm_is_imm12 (inst->inst_offset)) {
4059 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4061 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4062 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4066 } else if (ainfo->storage == RegTypeBaseGen) {
4069 } else if (ainfo->storage == RegTypeBase) {
4071 } else if (ainfo->storage == RegTypeFP) {
4072 g_assert_not_reached ();
4073 } else if (ainfo->storage == RegTypeStructByVal) {
4074 int doffset = inst->inst_offset;
4078 if (mono_class_from_mono_type (inst->inst_vtype))
4079 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4080 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4081 if (arm_is_imm12 (doffset)) {
4082 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4084 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4085 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4087 soffset += sizeof (gpointer);
4088 doffset += sizeof (gpointer);
4093 } else if (ainfo->storage == RegTypeStructByAddr) {
4108 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4113 guint8 *code = cfg->native_code + cfg->code_len;
4114 MonoInst *last_ins = NULL;
4115 guint last_offset = 0;
4117 int imm8, rot_amount;
4119 /* we don't align basic blocks of loops on arm */
4121 if (cfg->verbose_level > 2)
4122 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4124 cpos = bb->max_offset;
4126 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4127 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4128 //g_assert (!mono_compile_aot);
4131 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4132 /* this is not thread save, but good enough */
4133 /* fixme: howto handle overflows? */
4134 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4137 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4138 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4139 (gpointer)"mono_break");
4140 code = emit_call_seq (cfg, code);
4143 MONO_BB_FOR_EACH_INS (bb, ins) {
4144 offset = code - cfg->native_code;
4146 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4148 if (offset > (cfg->code_size - max_len - 16)) {
4149 cfg->code_size *= 2;
4150 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4151 code = cfg->native_code + offset;
4153 // if (ins->cil_code)
4154 // g_print ("cil code\n");
4155 mono_debug_record_line_number (cfg, ins, offset);
4157 switch (ins->opcode) {
4158 case OP_MEMORY_BARRIER:
4160 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4161 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4165 #ifdef HAVE_AEABI_READ_TP
4166 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4167 (gpointer)"__aeabi_read_tp");
4168 code = emit_call_seq (cfg, code);
4170 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4172 g_assert_not_reached ();
4175 case OP_ATOMIC_EXCHANGE_I4:
4176 case OP_ATOMIC_CAS_I4:
4177 case OP_ATOMIC_ADD_NEW_I4: {
4181 g_assert (v7_supported);
4184 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4186 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4188 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4192 g_assert (cfg->arch.atomic_tmp_offset != -1);
4193 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4195 switch (ins->opcode) {
4196 case OP_ATOMIC_EXCHANGE_I4:
4198 ARM_DMB (code, ARM_DMB_SY);
4199 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4200 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4201 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4203 ARM_B_COND (code, ARMCOND_NE, 0);
4204 arm_patch (buf [1], buf [0]);
4206 case OP_ATOMIC_CAS_I4:
4207 ARM_DMB (code, ARM_DMB_SY);
4209 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4210 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4212 ARM_B_COND (code, ARMCOND_NE, 0);
4213 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4214 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4216 ARM_B_COND (code, ARMCOND_NE, 0);
4217 arm_patch (buf [2], buf [0]);
4218 arm_patch (buf [1], code);
4220 case OP_ATOMIC_ADD_NEW_I4:
4222 ARM_DMB (code, ARM_DMB_SY);
4223 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4224 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4225 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4226 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4228 ARM_B_COND (code, ARMCOND_NE, 0);
4229 arm_patch (buf [1], buf [0]);
4232 g_assert_not_reached ();
4235 ARM_DMB (code, ARM_DMB_SY);
4236 if (tmpreg != ins->dreg)
4237 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4238 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4243 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4244 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4247 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4248 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4250 case OP_STOREI1_MEMBASE_IMM:
4251 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4252 g_assert (arm_is_imm12 (ins->inst_offset));
4253 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4255 case OP_STOREI2_MEMBASE_IMM:
4256 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4257 g_assert (arm_is_imm8 (ins->inst_offset));
4258 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4260 case OP_STORE_MEMBASE_IMM:
4261 case OP_STOREI4_MEMBASE_IMM:
4262 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4263 g_assert (arm_is_imm12 (ins->inst_offset));
4264 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4266 case OP_STOREI1_MEMBASE_REG:
4267 g_assert (arm_is_imm12 (ins->inst_offset));
4268 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4270 case OP_STOREI2_MEMBASE_REG:
4271 g_assert (arm_is_imm8 (ins->inst_offset));
4272 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4274 case OP_STORE_MEMBASE_REG:
4275 case OP_STOREI4_MEMBASE_REG:
4276 /* this case is special, since it happens for spill code after lowering has been called */
4277 if (arm_is_imm12 (ins->inst_offset)) {
4278 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4280 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4281 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4284 case OP_STOREI1_MEMINDEX:
4285 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4287 case OP_STOREI2_MEMINDEX:
4288 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4290 case OP_STORE_MEMINDEX:
4291 case OP_STOREI4_MEMINDEX:
4292 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4295 g_assert_not_reached ();
4297 case OP_LOAD_MEMINDEX:
4298 case OP_LOADI4_MEMINDEX:
4299 case OP_LOADU4_MEMINDEX:
4300 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4302 case OP_LOADI1_MEMINDEX:
4303 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4305 case OP_LOADU1_MEMINDEX:
4306 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4308 case OP_LOADI2_MEMINDEX:
4309 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4311 case OP_LOADU2_MEMINDEX:
4312 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4314 case OP_LOAD_MEMBASE:
4315 case OP_LOADI4_MEMBASE:
4316 case OP_LOADU4_MEMBASE:
4317 /* this case is special, since it happens for spill code after lowering has been called */
4318 if (arm_is_imm12 (ins->inst_offset)) {
4319 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4321 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4322 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4325 case OP_LOADI1_MEMBASE:
4326 g_assert (arm_is_imm8 (ins->inst_offset));
4327 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4329 case OP_LOADU1_MEMBASE:
4330 g_assert (arm_is_imm12 (ins->inst_offset));
4331 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4333 case OP_LOADU2_MEMBASE:
4334 g_assert (arm_is_imm8 (ins->inst_offset));
4335 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4337 case OP_LOADI2_MEMBASE:
4338 g_assert (arm_is_imm8 (ins->inst_offset));
4339 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4341 case OP_ICONV_TO_I1:
4342 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4343 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4345 case OP_ICONV_TO_I2:
4346 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4347 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4349 case OP_ICONV_TO_U1:
4350 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4352 case OP_ICONV_TO_U2:
4353 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4354 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4358 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4360 case OP_COMPARE_IMM:
4361 case OP_ICOMPARE_IMM:
4362 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4363 g_assert (imm8 >= 0);
4364 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4368 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4369 * So instead of emitting a trap, we emit a call a C function and place a
4372 //*(int*)code = 0xef9f0001;
4375 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4376 (gpointer)"mono_break");
4377 code = emit_call_seq (cfg, code);
4379 case OP_RELAXED_NOP:
4384 case OP_DUMMY_STORE:
4385 case OP_DUMMY_ICONST:
4386 case OP_DUMMY_R8CONST:
4387 case OP_NOT_REACHED:
4390 case OP_SEQ_POINT: {
4392 MonoInst *info_var = cfg->arch.seq_point_info_var;
4393 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4394 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4395 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4396 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4398 int dreg = ARMREG_LR;
4400 if (cfg->soft_breakpoints) {
4401 g_assert (!cfg->compile_aot);
4405 * For AOT, we use one got slot per method, which will point to a
4406 * SeqPointInfo structure, containing all the information required
4407 * by the code below.
4409 if (cfg->compile_aot) {
4410 g_assert (info_var);
4411 g_assert (info_var->opcode == OP_REGOFFSET);
4412 g_assert (arm_is_imm12 (info_var->inst_offset));
4415 if (!cfg->soft_breakpoints) {
4417 * Read from the single stepping trigger page. This will cause a
4418 * SIGSEGV when single stepping is enabled.
4419 * We do this _before_ the breakpoint, so single stepping after
4420 * a breakpoint is hit will step to the next IL offset.
4422 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4425 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4426 if (cfg->soft_breakpoints) {
4427 /* Load the address of the sequence point trigger variable. */
4430 g_assert (var->opcode == OP_REGOFFSET);
4431 g_assert (arm_is_imm12 (var->inst_offset));
4432 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4434 /* Read the value and check whether it is non-zero. */
4435 ARM_LDR_IMM (code, dreg, dreg, 0);
4436 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4438 /* Load the address of the sequence point method. */
4439 var = ss_method_var;
4441 g_assert (var->opcode == OP_REGOFFSET);
4442 g_assert (arm_is_imm12 (var->inst_offset));
4443 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4445 /* Call it conditionally. */
4446 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4448 if (cfg->compile_aot) {
4449 /* Load the trigger page addr from the variable initialized in the prolog */
4450 var = ss_trigger_page_var;
4452 g_assert (var->opcode == OP_REGOFFSET);
4453 g_assert (arm_is_imm12 (var->inst_offset));
4454 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4456 #ifdef USE_JUMP_TABLES
4457 gpointer *jte = mono_jumptable_add_entry ();
4458 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4459 jte [0] = ss_trigger_page;
4461 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4463 *(int*)code = (int)ss_trigger_page;
4467 ARM_LDR_IMM (code, dreg, dreg, 0);
4471 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4473 if (cfg->soft_breakpoints) {
4474 /* Load the address of the breakpoint method into ip. */
4475 var = bp_method_var;
4477 g_assert (var->opcode == OP_REGOFFSET);
4478 g_assert (arm_is_imm12 (var->inst_offset));
4479 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4482 * A placeholder for a possible breakpoint inserted by
4483 * mono_arch_set_breakpoint ().
4486 } else if (cfg->compile_aot) {
4487 guint32 offset = code - cfg->native_code;
4490 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4491 /* Add the offset */
4492 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4493 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4494 if (arm_is_imm12 ((int)val)) {
4495 ARM_LDR_IMM (code, dreg, dreg, val);
4497 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4499 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4501 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4502 g_assert (!(val & 0xFF000000));
4504 ARM_LDR_IMM (code, dreg, dreg, 0);
4506 /* What is faster, a branch or a load ? */
4507 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4508 /* The breakpoint instruction */
4509 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4512 * A placeholder for a possible breakpoint inserted by
4513 * mono_arch_set_breakpoint ().
4515 for (i = 0; i < 4; ++i)
4522 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4525 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4529 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4532 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4533 g_assert (imm8 >= 0);
4534 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4538 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4539 g_assert (imm8 >= 0);
4540 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4544 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4545 g_assert (imm8 >= 0);
4546 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4549 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4550 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4552 case OP_IADD_OVF_UN:
4553 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4554 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4557 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4558 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4560 case OP_ISUB_OVF_UN:
4561 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4562 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4564 case OP_ADD_OVF_CARRY:
4565 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4566 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4568 case OP_ADD_OVF_UN_CARRY:
4569 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4570 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4572 case OP_SUB_OVF_CARRY:
4573 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4574 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4576 case OP_SUB_OVF_UN_CARRY:
4577 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4578 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4582 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4585 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4586 g_assert (imm8 >= 0);
4587 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4590 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4594 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4598 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4599 g_assert (imm8 >= 0);
4600 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4604 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4605 g_assert (imm8 >= 0);
4606 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4608 case OP_ARM_RSBS_IMM:
4609 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4610 g_assert (imm8 >= 0);
4611 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4613 case OP_ARM_RSC_IMM:
4614 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4615 g_assert (imm8 >= 0);
4616 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4619 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4623 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4624 g_assert (imm8 >= 0);
4625 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4628 g_assert (v7s_supported);
4629 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4632 g_assert (v7s_supported);
4633 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4636 g_assert (v7s_supported);
4637 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4638 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4641 g_assert (v7s_supported);
4642 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4643 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4647 g_assert_not_reached ();
4649 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4653 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4654 g_assert (imm8 >= 0);
4655 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4658 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4662 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4663 g_assert (imm8 >= 0);
4664 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4667 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4672 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4673 else if (ins->dreg != ins->sreg1)
4674 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4677 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4682 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4683 else if (ins->dreg != ins->sreg1)
4684 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4687 case OP_ISHR_UN_IMM:
4689 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4690 else if (ins->dreg != ins->sreg1)
4691 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4694 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4697 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4700 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4703 if (ins->dreg == ins->sreg2)
4704 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4706 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4709 g_assert_not_reached ();
4712 /* FIXME: handle ovf/ sreg2 != dreg */
4713 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4714 /* FIXME: MUL doesn't set the C/O flags on ARM */
4716 case OP_IMUL_OVF_UN:
4717 /* FIXME: handle ovf/ sreg2 != dreg */
4718 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4719 /* FIXME: MUL doesn't set the C/O flags on ARM */
4722 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4725 /* Load the GOT offset */
4726 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4727 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4729 *(gpointer*)code = NULL;
4731 /* Load the value from the GOT */
4732 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4734 case OP_OBJC_GET_SELECTOR:
4735 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4736 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4738 *(gpointer*)code = NULL;
4740 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4742 case OP_ICONV_TO_I4:
4743 case OP_ICONV_TO_U4:
4745 if (ins->dreg != ins->sreg1)
4746 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4749 int saved = ins->sreg2;
4750 if (ins->sreg2 == ARM_LSW_REG) {
4751 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4754 if (ins->sreg1 != ARM_LSW_REG)
4755 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4756 if (saved != ARM_MSW_REG)
4757 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4762 ARM_CPYD (code, ins->dreg, ins->sreg1);
4764 case OP_FCONV_TO_R4:
4766 ARM_CVTD (code, ins->dreg, ins->sreg1);
4767 ARM_CVTS (code, ins->dreg, ins->dreg);
4772 * Keep in sync with mono_arch_emit_epilog
4774 g_assert (!cfg->method->save_lmf);
4776 code = emit_load_volatile_arguments (cfg, code);
4778 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4780 if (cfg->used_int_regs)
4781 ARM_POP (code, cfg->used_int_regs);
4782 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4784 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4786 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4787 if (cfg->compile_aot) {
4788 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4790 *(gpointer*)code = NULL;
4792 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4794 code = mono_arm_patchable_b (code, ARMCOND_AL);
4798 MonoCallInst *call = (MonoCallInst*)ins;
4801 * The stack looks like the following:
4802 * <caller argument area>
4805 * <callee argument area>
4806 * Need to copy the arguments from the callee argument area to
4807 * the caller argument area, and pop the frame.
4809 if (call->stack_usage) {
4810 int i, prev_sp_offset = 0;
4812 /* Compute size of saved registers restored below */
4814 prev_sp_offset = 2 * 4;
4816 prev_sp_offset = 1 * 4;
4817 for (i = 0; i < 16; ++i) {
4818 if (cfg->used_int_regs & (1 << i))
4819 prev_sp_offset += 4;
4822 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4824 /* Copy arguments on the stack to our argument area */
4825 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4826 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4827 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4832 * Keep in sync with mono_arch_emit_epilog
4834 g_assert (!cfg->method->save_lmf);
4836 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4838 if (cfg->used_int_regs)
4839 ARM_POP (code, cfg->used_int_regs);
4840 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4842 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4845 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4846 if (cfg->compile_aot) {
4847 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4849 *(gpointer*)code = NULL;
4851 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4853 code = mono_arm_patchable_b (code, ARMCOND_AL);
4858 /* ensure ins->sreg1 is not NULL */
4859 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4862 g_assert (cfg->sig_cookie < 128);
4863 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4864 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4873 call = (MonoCallInst*)ins;
4876 code = emit_float_args (cfg, call, code, &max_len, &offset);
4878 if (ins->flags & MONO_INST_HAS_METHOD)
4879 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4881 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4882 code = emit_call_seq (cfg, code);
4883 ins->flags |= MONO_INST_GC_CALLSITE;
4884 ins->backend.pc_offset = code - cfg->native_code;
4885 code = emit_move_return_value (cfg, ins, code);
4891 case OP_VOIDCALL_REG:
4894 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4896 code = emit_call_reg (code, ins->sreg1);
4897 ins->flags |= MONO_INST_GC_CALLSITE;
4898 ins->backend.pc_offset = code - cfg->native_code;
4899 code = emit_move_return_value (cfg, ins, code);
4901 case OP_FCALL_MEMBASE:
4902 case OP_LCALL_MEMBASE:
4903 case OP_VCALL_MEMBASE:
4904 case OP_VCALL2_MEMBASE:
4905 case OP_VOIDCALL_MEMBASE:
4906 case OP_CALL_MEMBASE: {
4907 gboolean imt_arg = FALSE;
4909 g_assert (ins->sreg1 != ARMREG_LR);
4910 call = (MonoCallInst*)ins;
4913 code = emit_float_args (cfg, call, code, &max_len, &offset);
4915 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4917 if (!arm_is_imm12 (ins->inst_offset))
4918 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4919 #ifdef USE_JUMP_TABLES
4925 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4927 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4929 if (!arm_is_imm12 (ins->inst_offset))
4930 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4932 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4935 * We can't embed the method in the code stream in PIC code, or
4937 * Instead, we put it in V5 in code emitted by
4938 * mono_arch_emit_imt_argument (), and embed NULL here to
4939 * signal the IMT thunk that the value is in V5.
4941 #ifdef USE_JUMP_TABLES
4942 /* In case of jumptables we always use value in V5. */
4945 if (call->dynamic_imt_arg)
4946 *((gpointer*)code) = NULL;
4948 *((gpointer*)code) = (gpointer)call->method;
4952 ins->flags |= MONO_INST_GC_CALLSITE;
4953 ins->backend.pc_offset = code - cfg->native_code;
4954 code = emit_move_return_value (cfg, ins, code);
4958 /* keep alignment */
4959 int alloca_waste = cfg->param_area;
4962 /* round the size to 8 bytes */
4963 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4964 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4966 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4967 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4968 /* memzero the area: dreg holds the size, sp is the pointer */
4969 if (ins->flags & MONO_INST_INIT) {
4970 guint8 *start_loop, *branch_to_cond;
4971 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4972 branch_to_cond = code;
4975 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4976 arm_patch (branch_to_cond, code);
4977 /* decrement by 4 and set flags */
4978 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4979 ARM_B_COND (code, ARMCOND_GE, 0);
4980 arm_patch (code - 4, start_loop);
4982 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4987 MonoInst *var = cfg->dyn_call_var;
4989 g_assert (var->opcode == OP_REGOFFSET);
4990 g_assert (arm_is_imm12 (var->inst_offset));
4992 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4993 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4995 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4997 /* Save args buffer */
4998 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
5000 /* Set stack slots using R0 as scratch reg */
5001 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
5002 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
5003 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
5004 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
5007 /* Set argument registers */
5008 for (i = 0; i < PARAM_REGS; ++i)
5009 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5012 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5013 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5016 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5017 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
5018 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
5022 if (ins->sreg1 != ARMREG_R0)
5023 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5024 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5025 (gpointer)"mono_arch_throw_exception");
5026 code = emit_call_seq (cfg, code);
5030 if (ins->sreg1 != ARMREG_R0)
5031 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5032 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5033 (gpointer)"mono_arch_rethrow_exception");
5034 code = emit_call_seq (cfg, code);
5037 case OP_START_HANDLER: {
5038 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5041 /* Reserve a param area, see filter-stack.exe */
5042 if (cfg->param_area) {
5043 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5044 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5046 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5047 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5051 if (arm_is_imm12 (spvar->inst_offset)) {
5052 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5054 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5055 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5059 case OP_ENDFILTER: {
5060 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5063 /* Free the param area */
5064 if (cfg->param_area) {
5065 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5066 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5068 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5069 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5073 if (ins->sreg1 != ARMREG_R0)
5074 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5075 if (arm_is_imm12 (spvar->inst_offset)) {
5076 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5078 g_assert (ARMREG_IP != spvar->inst_basereg);
5079 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5080 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5082 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5085 case OP_ENDFINALLY: {
5086 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5089 /* Free the param area */
5090 if (cfg->param_area) {
5091 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5092 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5094 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5095 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5099 if (arm_is_imm12 (spvar->inst_offset)) {
5100 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5102 g_assert (ARMREG_IP != spvar->inst_basereg);
5103 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5104 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5106 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5109 case OP_CALL_HANDLER:
5110 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5111 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5112 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5115 ins->inst_c0 = code - cfg->native_code;
5118 /*if (ins->inst_target_bb->native_offset) {
5120 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5122 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5123 code = mono_arm_patchable_b (code, ARMCOND_AL);
5127 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5131 * In the normal case we have:
5132 * ldr pc, [pc, ins->sreg1 << 2]
5135 * ldr lr, [pc, ins->sreg1 << 2]
5137 * After follows the data.
5138 * FIXME: add aot support.
5140 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5141 #ifdef USE_JUMP_TABLES
5143 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5144 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5145 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5149 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5150 if (offset + max_len > (cfg->code_size - 16)) {
5151 cfg->code_size += max_len;
5152 cfg->code_size *= 2;
5153 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5154 code = cfg->native_code + offset;
5156 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5158 code += 4 * GPOINTER_TO_INT (ins->klass);
5163 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5164 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5168 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5169 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5173 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5174 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5178 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5179 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5183 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5184 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5191 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5192 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5195 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5196 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5200 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5201 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5203 case OP_COND_EXC_EQ:
5204 case OP_COND_EXC_NE_UN:
5205 case OP_COND_EXC_LT:
5206 case OP_COND_EXC_LT_UN:
5207 case OP_COND_EXC_GT:
5208 case OP_COND_EXC_GT_UN:
5209 case OP_COND_EXC_GE:
5210 case OP_COND_EXC_GE_UN:
5211 case OP_COND_EXC_LE:
5212 case OP_COND_EXC_LE_UN:
5213 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5215 case OP_COND_EXC_IEQ:
5216 case OP_COND_EXC_INE_UN:
5217 case OP_COND_EXC_ILT:
5218 case OP_COND_EXC_ILT_UN:
5219 case OP_COND_EXC_IGT:
5220 case OP_COND_EXC_IGT_UN:
5221 case OP_COND_EXC_IGE:
5222 case OP_COND_EXC_IGE_UN:
5223 case OP_COND_EXC_ILE:
5224 case OP_COND_EXC_ILE_UN:
5225 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5228 case OP_COND_EXC_IC:
5229 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5231 case OP_COND_EXC_OV:
5232 case OP_COND_EXC_IOV:
5233 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5235 case OP_COND_EXC_NC:
5236 case OP_COND_EXC_INC:
5237 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5239 case OP_COND_EXC_NO:
5240 case OP_COND_EXC_INO:
5241 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5253 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5256 /* floating point opcodes */
5258 if (cfg->compile_aot) {
5259 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5261 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5263 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5266 /* FIXME: we can optimize the imm load by dealing with part of
5267 * the displacement in LDFD (aligning to 512).
5269 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5270 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5274 if (cfg->compile_aot) {
5275 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5277 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5279 ARM_CVTS (code, ins->dreg, ins->dreg);
5281 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5282 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5283 ARM_CVTS (code, ins->dreg, ins->dreg);
5286 case OP_STORER8_MEMBASE_REG:
5287 /* This is generated by the local regalloc pass which runs after the lowering pass */
5288 if (!arm_is_fpimm8 (ins->inst_offset)) {
5289 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5290 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5291 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5293 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5296 case OP_LOADR8_MEMBASE:
5297 /* This is generated by the local regalloc pass which runs after the lowering pass */
5298 if (!arm_is_fpimm8 (ins->inst_offset)) {
5299 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5300 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5301 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5303 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5306 case OP_STORER4_MEMBASE_REG:
5307 g_assert (arm_is_fpimm8 (ins->inst_offset));
5308 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5309 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5310 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5311 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5313 case OP_LOADR4_MEMBASE:
5314 g_assert (arm_is_fpimm8 (ins->inst_offset));
5315 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5316 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5317 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5318 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5320 case OP_ICONV_TO_R_UN: {
5321 g_assert_not_reached ();
5324 case OP_ICONV_TO_R4:
5325 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5326 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5327 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5328 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5329 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5331 case OP_ICONV_TO_R8:
5332 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5333 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5334 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5335 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5339 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5340 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5342 if (!IS_HARD_FLOAT) {
5343 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5346 if (IS_HARD_FLOAT) {
5347 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5349 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5353 case OP_FCONV_TO_I1:
5354 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5356 case OP_FCONV_TO_U1:
5357 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5359 case OP_FCONV_TO_I2:
5360 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5362 case OP_FCONV_TO_U2:
5363 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5365 case OP_FCONV_TO_I4:
5367 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5369 case OP_FCONV_TO_U4:
5371 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5373 case OP_FCONV_TO_I8:
5374 case OP_FCONV_TO_U8:
5375 g_assert_not_reached ();
5376 /* Implemented as helper calls */
5378 case OP_LCONV_TO_R_UN:
5379 g_assert_not_reached ();
5380 /* Implemented as helper calls */
5382 case OP_LCONV_TO_OVF_I4_2: {
5383 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5385 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5388 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5389 high_bit_not_set = code;
5390 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5392 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5393 valid_negative = code;
5394 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5395 invalid_negative = code;
5396 ARM_B_COND (code, ARMCOND_AL, 0);
5398 arm_patch (high_bit_not_set, code);
5400 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5401 valid_positive = code;
5402 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5404 arm_patch (invalid_negative, code);
5405 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5407 arm_patch (valid_negative, code);
5408 arm_patch (valid_positive, code);
5410 if (ins->dreg != ins->sreg1)
5411 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5415 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5418 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5421 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5424 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5427 ARM_NEGD (code, ins->dreg, ins->sreg1);
5431 g_assert_not_reached ();
5435 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5441 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5444 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5445 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5449 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5452 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5453 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5457 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5460 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5461 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5462 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5466 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5469 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5470 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5474 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5477 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5478 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5479 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5483 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5486 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5487 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5491 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5494 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5495 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5499 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5502 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5503 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5506 /* ARM FPA flags table:
5507 * N Less than ARMCOND_MI
5508 * Z Equal ARMCOND_EQ
5509 * C Greater Than or Equal ARMCOND_CS
5510 * V Unordered ARMCOND_VS
5513 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5516 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5519 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5522 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5523 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5529 g_assert_not_reached ();
5533 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5535 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5536 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5537 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5541 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5542 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5547 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5548 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5550 #ifdef USE_JUMP_TABLES
5552 gpointer *jte = mono_jumptable_add_entries (2);
5553 jte [0] = GUINT_TO_POINTER (0xffffffff);
5554 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5555 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5556 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5559 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5560 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5562 *(guint32*)code = 0xffffffff;
5564 *(guint32*)code = 0x7fefffff;
5567 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5569 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5570 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5572 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5573 ARM_CPYD (code, ins->dreg, ins->sreg1);
5575 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5576 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5581 case OP_GC_LIVENESS_DEF:
5582 case OP_GC_LIVENESS_USE:
5583 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5584 ins->backend.pc_offset = code - cfg->native_code;
5586 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5587 ins->backend.pc_offset = code - cfg->native_code;
5588 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5592 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5593 g_assert_not_reached ();
5596 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5597 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5598 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5599 g_assert_not_reached ();
5605 last_offset = offset;
5608 cfg->code_len = code - cfg->native_code;
5611 #endif /* DISABLE_JIT */
5613 #ifdef HAVE_AEABI_READ_TP
5614 void __aeabi_read_tp (void);
5618 mono_arch_register_lowlevel_calls (void)
5620 /* The signature doesn't matter */
5621 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5622 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5624 #ifndef MONO_CROSS_COMPILE
5625 #ifdef HAVE_AEABI_READ_TP
5626 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5631 #define patch_lis_ori(ip,val) do {\
5632 guint16 *__lis_ori = (guint16*)(ip); \
5633 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5634 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5638 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5640 MonoJumpInfo *patch_info;
5641 gboolean compile_aot = !run_cctors;
5643 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5644 unsigned char *ip = patch_info->ip.i + code;
5645 const unsigned char *target;
5647 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5648 #ifdef USE_JUMP_TABLES
5649 gpointer *jt = mono_jumptable_get_entry (ip);
5651 gpointer *jt = (gpointer*)(ip + 8);
5654 /* jt is the inlined jump table, 2 instructions after ip
5655 * In the normal case we store the absolute addresses,
5656 * otherwise the displacements.
5658 for (i = 0; i < patch_info->data.table->table_size; i++)
5659 jt [i] = code + (int)patch_info->data.table->table [i];
5664 switch (patch_info->type) {
5665 case MONO_PATCH_INFO_BB:
5666 case MONO_PATCH_INFO_LABEL:
5669 /* No need to patch these */
5674 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5676 switch (patch_info->type) {
5677 case MONO_PATCH_INFO_IP:
5678 g_assert_not_reached ();
5679 patch_lis_ori (ip, ip);
5681 case MONO_PATCH_INFO_METHOD_REL:
5682 g_assert_not_reached ();
5683 *((gpointer *)(ip)) = code + patch_info->data.offset;
5685 case MONO_PATCH_INFO_METHODCONST:
5686 case MONO_PATCH_INFO_CLASS:
5687 case MONO_PATCH_INFO_IMAGE:
5688 case MONO_PATCH_INFO_FIELD:
5689 case MONO_PATCH_INFO_VTABLE:
5690 case MONO_PATCH_INFO_IID:
5691 case MONO_PATCH_INFO_SFLDA:
5692 case MONO_PATCH_INFO_LDSTR:
5693 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5694 case MONO_PATCH_INFO_LDTOKEN:
5695 g_assert_not_reached ();
5696 /* from OP_AOTCONST : lis + ori */
5697 patch_lis_ori (ip, target);
5699 case MONO_PATCH_INFO_R4:
5700 case MONO_PATCH_INFO_R8:
5701 g_assert_not_reached ();
5702 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5704 case MONO_PATCH_INFO_EXC_NAME:
5705 g_assert_not_reached ();
5706 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5708 case MONO_PATCH_INFO_NONE:
5709 case MONO_PATCH_INFO_BB_OVF:
5710 case MONO_PATCH_INFO_EXC_OVF:
5711 /* everything is dealt with at epilog output time */
5716 arm_patch_general (domain, ip, target, dyn_code_mp);
5723 * Stack frame layout:
5725 * ------------------- fp
5726 * MonoLMF structure or saved registers
5727 * -------------------
5729 * -------------------
5731 * -------------------
5732 * optional 8 bytes for tracing
5733 * -------------------
5734 * param area size is cfg->param_area
5735 * ------------------- sp
5738 mono_arch_emit_prolog (MonoCompile *cfg)
5740 MonoMethod *method = cfg->method;
5742 MonoMethodSignature *sig;
5744 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5749 int prev_sp_offset, reg_offset;
5751 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5754 sig = mono_method_signature (method);
5755 cfg->code_size = 256 + sig->param_count * 64;
5756 code = cfg->native_code = g_malloc (cfg->code_size);
5758 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5760 alloc_size = cfg->stack_offset;
5766 * The iphone uses R7 as the frame pointer, and it points at the saved
5771 * We can't use r7 as a frame pointer since it points into the middle of
5772 * the frame, so we keep using our own frame pointer.
5773 * FIXME: Optimize this.
5775 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5776 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5777 prev_sp_offset += 8; /* r7 and lr */
5778 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5779 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5782 if (!method->save_lmf) {
5784 /* No need to push LR again */
5785 if (cfg->used_int_regs)
5786 ARM_PUSH (code, cfg->used_int_regs);
5788 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5789 prev_sp_offset += 4;
5791 for (i = 0; i < 16; ++i) {
5792 if (cfg->used_int_regs & (1 << i))
5793 prev_sp_offset += 4;
5795 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5797 for (i = 0; i < 16; ++i) {
5798 if ((cfg->used_int_regs & (1 << i))) {
5799 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5800 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5805 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5806 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5808 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5809 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5812 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5813 ARM_PUSH (code, 0x5ff0);
5814 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5815 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5817 for (i = 0; i < 16; ++i) {
5818 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5819 /* The original r7 is saved at the start */
5820 if (!(iphone_abi && i == ARMREG_R7))
5821 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5825 g_assert (reg_offset == 4 * 10);
5826 pos += sizeof (MonoLMF) - (4 * 10);
5830 orig_alloc_size = alloc_size;
5831 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5832 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5833 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5834 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5837 /* the stack used in the pushed regs */
5838 if (prev_sp_offset & 4)
5840 cfg->stack_usage = alloc_size;
5842 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5843 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5845 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5846 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5848 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5850 if (cfg->frame_reg != ARMREG_SP) {
5851 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5852 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5854 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5855 prev_sp_offset += alloc_size;
5857 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5858 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5860 /* compute max_offset in order to use short forward jumps
5861 * we could skip do it on arm because the immediate displacement
5862 * for jumps is large enough, it may be useful later for constant pools
5865 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5866 MonoInst *ins = bb->code;
5867 bb->max_offset = max_offset;
5869 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5872 MONO_BB_FOR_EACH_INS (bb, ins)
5873 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5876 /* store runtime generic context */
5877 if (cfg->rgctx_var) {
5878 MonoInst *ins = cfg->rgctx_var;
5880 g_assert (ins->opcode == OP_REGOFFSET);
5882 if (arm_is_imm12 (ins->inst_offset)) {
5883 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5885 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5886 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5890 /* load arguments allocated to register from the stack */
5893 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5895 if (cinfo->vtype_retaddr) {
5896 ArgInfo *ainfo = &cinfo->ret;
5897 inst = cfg->vret_addr;
5898 g_assert (arm_is_imm12 (inst->inst_offset));
5899 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5902 if (sig->call_convention == MONO_CALL_VARARG) {
5903 ArgInfo *cookie = &cinfo->sig_cookie;
5905 /* Save the sig cookie address */
5906 g_assert (cookie->storage == RegTypeBase);
5908 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5909 g_assert (arm_is_imm12 (cfg->sig_cookie));
5910 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5911 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5914 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5915 ArgInfo *ainfo = cinfo->args + i;
5916 inst = cfg->args [pos];
5918 if (cfg->verbose_level > 2)
5919 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5920 if (inst->opcode == OP_REGVAR) {
5921 if (ainfo->storage == RegTypeGeneral)
5922 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5923 else if (ainfo->storage == RegTypeFP) {
5924 g_assert_not_reached ();
5925 } else if (ainfo->storage == RegTypeBase) {
5926 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5927 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5929 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5930 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5933 g_assert_not_reached ();
5935 if (cfg->verbose_level > 2)
5936 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5938 /* the argument should be put on the stack: FIXME handle size != word */
5939 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5940 switch (ainfo->size) {
5942 if (arm_is_imm12 (inst->inst_offset))
5943 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5945 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5946 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5950 if (arm_is_imm8 (inst->inst_offset)) {
5951 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5953 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5954 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5958 if (arm_is_imm12 (inst->inst_offset)) {
5959 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5961 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5962 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5964 if (arm_is_imm12 (inst->inst_offset + 4)) {
5965 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5967 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5968 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5972 if (arm_is_imm12 (inst->inst_offset)) {
5973 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5975 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5976 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5980 } else if (ainfo->storage == RegTypeBaseGen) {
5981 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5982 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5984 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5985 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5987 if (arm_is_imm12 (inst->inst_offset + 4)) {
5988 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5989 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5991 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5992 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5993 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5994 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5996 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5997 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5998 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
6000 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
6001 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6004 switch (ainfo->size) {
6006 if (arm_is_imm8 (inst->inst_offset)) {
6007 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6009 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6010 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6014 if (arm_is_imm8 (inst->inst_offset)) {
6015 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6017 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6018 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6022 if (arm_is_imm12 (inst->inst_offset)) {
6023 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6025 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6026 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6028 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6029 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6031 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6032 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6034 if (arm_is_imm12 (inst->inst_offset + 4)) {
6035 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6037 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6038 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6042 if (arm_is_imm12 (inst->inst_offset)) {
6043 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6045 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6046 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6050 } else if (ainfo->storage == RegTypeFP) {
6051 int imm8, rot_amount;
6053 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6054 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6055 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6057 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6059 if (ainfo->size == 8)
6060 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6062 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6063 } else if (ainfo->storage == RegTypeStructByVal) {
6064 int doffset = inst->inst_offset;
6068 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6069 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6070 if (arm_is_imm12 (doffset)) {
6071 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6073 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6074 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6076 soffset += sizeof (gpointer);
6077 doffset += sizeof (gpointer);
6079 if (ainfo->vtsize) {
6080 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6081 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6082 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6084 } else if (ainfo->storage == RegTypeStructByAddr) {
6085 g_assert_not_reached ();
6086 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6087 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6089 g_assert_not_reached ();
6094 if (method->save_lmf)
6095 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6098 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6100 if (cfg->arch.seq_point_info_var) {
6101 MonoInst *ins = cfg->arch.seq_point_info_var;
6103 /* Initialize the variable from a GOT slot */
6104 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6105 #ifdef USE_JUMP_TABLES
6107 gpointer *jte = mono_jumptable_add_entry ();
6108 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6109 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6111 /** XXX: is it correct? */
6113 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6115 *(gpointer*)code = NULL;
6118 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6120 g_assert (ins->opcode == OP_REGOFFSET);
6122 if (arm_is_imm12 (ins->inst_offset)) {
6123 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6125 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6126 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6130 /* Initialize ss_trigger_page_var */
6131 if (!cfg->soft_breakpoints) {
6132 MonoInst *info_var = cfg->arch.seq_point_info_var;
6133 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6134 int dreg = ARMREG_LR;
6137 g_assert (info_var->opcode == OP_REGOFFSET);
6138 g_assert (arm_is_imm12 (info_var->inst_offset));
6140 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6141 /* Load the trigger page addr */
6142 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6143 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6147 if (cfg->arch.seq_point_read_var) {
6148 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6149 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6150 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6151 #ifdef USE_JUMP_TABLES
6154 g_assert (read_ins->opcode == OP_REGOFFSET);
6155 g_assert (arm_is_imm12 (read_ins->inst_offset));
6156 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6157 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6158 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6159 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6161 #ifdef USE_JUMP_TABLES
6162 jte = mono_jumptable_add_entries (3);
6163 jte [0] = (gpointer)&ss_trigger_var;
6164 jte [1] = single_step_func_wrapper;
6165 jte [2] = breakpoint_func_wrapper;
6166 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6168 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6170 *(volatile int **)code = &ss_trigger_var;
6172 *(gpointer*)code = single_step_func_wrapper;
6174 *(gpointer*)code = breakpoint_func_wrapper;
6178 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6179 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6180 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6181 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6182 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6183 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6186 cfg->code_len = code - cfg->native_code;
6187 g_assert (cfg->code_len < cfg->code_size);
6194 mono_arch_emit_epilog (MonoCompile *cfg)
6196 MonoMethod *method = cfg->method;
6197 int pos, i, rot_amount;
6198 int max_epilog_size = 16 + 20*4;
6202 if (cfg->method->save_lmf)
6203 max_epilog_size += 128;
6205 if (mono_jit_trace_calls != NULL)
6206 max_epilog_size += 50;
6208 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6209 max_epilog_size += 50;
6211 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6212 cfg->code_size *= 2;
6213 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6214 cfg->stat_code_reallocs++;
6218 * Keep in sync with OP_JMP
6220 code = cfg->native_code + cfg->code_len;
6222 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6223 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6227 /* Load returned vtypes into registers if needed */
6228 cinfo = cfg->arch.cinfo;
6229 if (cinfo->ret.storage == RegTypeStructByVal) {
6230 MonoInst *ins = cfg->ret;
6232 if (arm_is_imm12 (ins->inst_offset)) {
6233 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6235 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6236 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6240 if (method->save_lmf) {
6241 int lmf_offset, reg, sp_adj, regmask;
6242 /* all but r0-r3, sp and pc */
6243 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6246 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6248 /* This points to r4 inside MonoLMF->iregs */
6249 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6251 regmask = 0x9ff0; /* restore lr to pc */
6252 /* Skip caller saved registers not used by the method */
6253 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6254 regmask &= ~(1 << reg);
6259 /* Restored later */
6260 regmask &= ~(1 << ARMREG_PC);
6261 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6262 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6264 ARM_POP (code, regmask);
6266 /* Restore saved r7, restore LR to PC */
6267 /* Skip lr from the lmf */
6268 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6269 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6272 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6273 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6275 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6276 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6280 /* Restore saved gregs */
6281 if (cfg->used_int_regs)
6282 ARM_POP (code, cfg->used_int_regs);
6283 /* Restore saved r7, restore LR to PC */
6284 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6286 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6290 cfg->code_len = code - cfg->native_code;
6292 g_assert (cfg->code_len < cfg->code_size);
6297 mono_arch_emit_exceptions (MonoCompile *cfg)
6299 MonoJumpInfo *patch_info;
6302 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6303 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6304 int max_epilog_size = 50;
6306 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6307 exc_throw_pos [i] = NULL;
6308 exc_throw_found [i] = 0;
6311 /* count the number of exception infos */
6314 * make sure we have enough space for exceptions
6316 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6317 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6318 i = mini_exception_id_by_name (patch_info->data.target);
6319 if (!exc_throw_found [i]) {
6320 max_epilog_size += 32;
6321 exc_throw_found [i] = TRUE;
6326 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6327 cfg->code_size *= 2;
6328 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6329 cfg->stat_code_reallocs++;
6332 code = cfg->native_code + cfg->code_len;
6334 /* add code to raise exceptions */
6335 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6336 switch (patch_info->type) {
6337 case MONO_PATCH_INFO_EXC: {
6338 MonoClass *exc_class;
6339 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6341 i = mini_exception_id_by_name (patch_info->data.target);
6342 if (exc_throw_pos [i]) {
6343 arm_patch (ip, exc_throw_pos [i]);
6344 patch_info->type = MONO_PATCH_INFO_NONE;
6347 exc_throw_pos [i] = code;
6349 arm_patch (ip, code);
6351 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6352 g_assert (exc_class);
6354 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6355 #ifdef USE_JUMP_TABLES
6357 gpointer *jte = mono_jumptable_add_entries (2);
6358 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6359 patch_info->data.name = "mono_arch_throw_corlib_exception";
6360 patch_info->ip.i = code - cfg->native_code;
6361 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6362 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6363 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6364 ARM_BLX_REG (code, ARMREG_IP);
6365 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6368 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6369 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6370 patch_info->data.name = "mono_arch_throw_corlib_exception";
6371 patch_info->ip.i = code - cfg->native_code;
6373 *(guint32*)(gpointer)code = exc_class->type_token;
6384 cfg->code_len = code - cfg->native_code;
6386 g_assert (cfg->code_len < cfg->code_size);
6390 #endif /* #ifndef DISABLE_JIT */
6393 mono_arch_finish_init (void)
6398 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6403 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6410 mono_arch_print_tree (MonoInst *tree, int arity)
6420 mono_arch_get_patch_offset (guint8 *code)
6427 mono_arch_flush_register_windows (void)
6431 #ifdef MONO_ARCH_HAVE_IMT
6436 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6438 int method_reg = mono_alloc_ireg (cfg);
6439 #ifdef USE_JUMP_TABLES
6440 int use_jumptables = TRUE;
6442 int use_jumptables = FALSE;
6445 if (cfg->compile_aot) {
6448 call->dynamic_imt_arg = TRUE;
6451 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6453 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6454 ins->dreg = method_reg;
6455 ins->inst_p0 = call->method;
6456 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6457 MONO_ADD_INS (cfg->cbb, ins);
6459 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6460 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6461 /* Always pass in a register for simplicity */
6462 call->dynamic_imt_arg = TRUE;
6464 cfg->uses_rgctx_reg = TRUE;
6467 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6471 MONO_INST_NEW (cfg, ins, OP_PCONST);
6472 ins->inst_p0 = call->method;
6473 ins->dreg = method_reg;
6474 MONO_ADD_INS (cfg->cbb, ins);
6477 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6481 #endif /* DISABLE_JIT */
6484 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6486 #ifdef USE_JUMP_TABLES
6487 return (MonoMethod*)regs [ARMREG_V5];
6490 guint32 *code_ptr = (guint32*)code;
6492 method = GUINT_TO_POINTER (code_ptr [1]);
6496 return (MonoMethod*)regs [ARMREG_V5];
6498 /* The IMT value is stored in the code stream right after the LDC instruction. */
6499 /* This is no longer true for the gsharedvt_in trampoline */
6501 if (!IS_LDR_PC (code_ptr [0])) {
6502 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6503 g_assert (IS_LDR_PC (code_ptr [0]));
6507 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6508 return (MonoMethod*)regs [ARMREG_V5];
6510 return (MonoMethod*) method;
6515 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6517 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6520 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6521 #define BASE_SIZE (6 * 4)
6522 #define BSEARCH_ENTRY_SIZE (4 * 4)
6523 #define CMP_SIZE (3 * 4)
6524 #define BRANCH_SIZE (1 * 4)
6525 #define CALL_SIZE (2 * 4)
6526 #define WMC_SIZE (8 * 4)
6527 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6529 #ifdef USE_JUMP_TABLES
6531 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6533 g_assert (base [index] == NULL);
6534 base [index] = value;
6537 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6539 if (arm_is_imm12 (jti * 4)) {
6540 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6542 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6543 if ((jti * 4) >> 16)
6544 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6545 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6551 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6553 guint32 delta = DISTANCE (target, code);
6555 g_assert (delta >= 0 && delta <= 0xFFF);
6556 *target = *target | delta;
6562 #ifdef ENABLE_WRONG_METHOD_CHECK
6564 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6566 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6572 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6573 gpointer fail_tramp)
6576 arminstr_t *code, *start;
6577 #ifdef USE_JUMP_TABLES
6580 gboolean large_offsets = FALSE;
6581 guint32 **constant_pool_starts;
6582 arminstr_t *vtable_target = NULL;
6583 int extra_space = 0;
6585 #ifdef ENABLE_WRONG_METHOD_CHECK
6590 #ifdef USE_JUMP_TABLES
6591 for (i = 0; i < count; ++i) {
6592 MonoIMTCheckItem *item = imt_entries [i];
6593 item->chunk_size += 4 * 16;
6594 if (!item->is_equals)
6595 imt_entries [item->check_target_idx]->compare_done = TRUE;
6596 size += item->chunk_size;
6599 constant_pool_starts = g_new0 (guint32*, count);
6601 for (i = 0; i < count; ++i) {
6602 MonoIMTCheckItem *item = imt_entries [i];
6603 if (item->is_equals) {
6604 gboolean fail_case = !item->check_target_idx && fail_tramp;
6606 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6607 item->chunk_size += 32;
6608 large_offsets = TRUE;
6611 if (item->check_target_idx || fail_case) {
6612 if (!item->compare_done || fail_case)
6613 item->chunk_size += CMP_SIZE;
6614 item->chunk_size += BRANCH_SIZE;
6616 #ifdef ENABLE_WRONG_METHOD_CHECK
6617 item->chunk_size += WMC_SIZE;
6621 item->chunk_size += 16;
6622 large_offsets = TRUE;
6624 item->chunk_size += CALL_SIZE;
6626 item->chunk_size += BSEARCH_ENTRY_SIZE;
6627 imt_entries [item->check_target_idx]->compare_done = TRUE;
6629 size += item->chunk_size;
6633 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6637 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6639 code = mono_domain_code_reserve (domain, size);
6643 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6644 for (i = 0; i < count; ++i) {
6645 MonoIMTCheckItem *item = imt_entries [i];
6646 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6650 #ifdef USE_JUMP_TABLES
6651 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6652 /* If jumptables we always pass the IMT method in R5 */
6653 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6654 #define VTABLE_JTI 0
6655 #define IMT_METHOD_OFFSET 0
6656 #define TARGET_CODE_OFFSET 1
6657 #define JUMP_CODE_OFFSET 2
6658 #define RECORDS_PER_ENTRY 3
6659 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6660 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6661 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6663 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6664 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6665 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6666 set_jumptable_element (jte, VTABLE_JTI, vtable);
6669 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6671 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6672 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6673 vtable_target = code;
6674 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6676 if (mono_use_llvm) {
6677 /* LLVM always passes the IMT method in R5 */
6678 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6680 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6681 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6682 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6686 for (i = 0; i < count; ++i) {
6687 MonoIMTCheckItem *item = imt_entries [i];
6688 #ifdef USE_JUMP_TABLES
6689 guint32 imt_method_jti = 0, target_code_jti = 0;
6691 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6693 gint32 vtable_offset;
6695 item->code_target = (guint8*)code;
6697 if (item->is_equals) {
6698 gboolean fail_case = !item->check_target_idx && fail_tramp;
6700 if (item->check_target_idx || fail_case) {
6701 if (!item->compare_done || fail_case) {
6702 #ifdef USE_JUMP_TABLES
6703 imt_method_jti = IMT_METHOD_JTI (i);
6704 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6707 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6709 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6711 #ifdef USE_JUMP_TABLES
6712 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6713 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6714 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6716 item->jmp_code = (guint8*)code;
6717 ARM_B_COND (code, ARMCOND_NE, 0);
6720 /*Enable the commented code to assert on wrong method*/
6721 #ifdef ENABLE_WRONG_METHOD_CHECK
6722 #ifdef USE_JUMP_TABLES
6723 imt_method_jti = IMT_METHOD_JTI (i);
6724 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6727 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6729 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6731 ARM_B_COND (code, ARMCOND_EQ, 0);
6733 /* Define this if your system is so bad that gdb is failing. */
6734 #ifdef BROKEN_DEV_ENV
6735 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6737 arm_patch (code - 1, mini_dump_bad_imt);
6741 arm_patch (cond, code);
6745 if (item->has_target_code) {
6746 /* Load target address */
6747 #ifdef USE_JUMP_TABLES
6748 target_code_jti = TARGET_CODE_JTI (i);
6749 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6750 /* Restore registers */
6751 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6753 ARM_BX (code, ARMREG_R1);
6754 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6756 target_code_ins = code;
6757 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6758 /* Save it to the fourth slot */
6759 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6760 /* Restore registers and branch */
6761 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6763 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6766 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6767 if (!arm_is_imm12 (vtable_offset)) {
6769 * We need to branch to a computed address but we don't have
6770 * a free register to store it, since IP must contain the
6771 * vtable address. So we push the two values to the stack, and
6772 * load them both using LDM.
6774 /* Compute target address */
6775 #ifdef USE_JUMP_TABLES
6776 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6777 if (vtable_offset >> 16)
6778 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6779 /* IP had vtable base. */
6780 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6781 /* Restore registers and branch */
6782 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6783 ARM_BX (code, ARMREG_IP);
6785 vtable_offset_ins = code;
6786 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6787 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6788 /* Save it to the fourth slot */
6789 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6790 /* Restore registers and branch */
6791 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6793 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6796 #ifdef USE_JUMP_TABLES
6797 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6798 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6799 ARM_BX (code, ARMREG_IP);
6801 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6803 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6804 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6810 #ifdef USE_JUMP_TABLES
6811 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6812 target_code_jti = TARGET_CODE_JTI (i);
6813 /* Load target address */
6814 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6815 /* Restore registers */
6816 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6818 ARM_BX (code, ARMREG_R1);
6819 set_jumptable_element (jte, target_code_jti, fail_tramp);
6821 arm_patch (item->jmp_code, (guchar*)code);
6823 target_code_ins = code;
6824 /* Load target address */
6825 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6826 /* Save it to the fourth slot */
6827 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6828 /* Restore registers and branch */
6829 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6831 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6833 item->jmp_code = NULL;
6836 #ifdef USE_JUMP_TABLES
6838 set_jumptable_element (jte, imt_method_jti, item->key);
6841 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6843 /*must emit after unconditional branch*/
6844 if (vtable_target) {
6845 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6846 item->chunk_size += 4;
6847 vtable_target = NULL;
6850 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6851 constant_pool_starts [i] = code;
6853 code += extra_space;
6858 #ifdef USE_JUMP_TABLES
6859 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6860 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6861 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6862 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6863 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6865 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6866 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6868 item->jmp_code = (guint8*)code;
6869 ARM_B_COND (code, ARMCOND_HS, 0);
6875 for (i = 0; i < count; ++i) {
6876 MonoIMTCheckItem *item = imt_entries [i];
6877 if (item->jmp_code) {
6878 if (item->check_target_idx)
6879 #ifdef USE_JUMP_TABLES
6880 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6882 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6885 if (i > 0 && item->is_equals) {
6887 #ifdef USE_JUMP_TABLES
6888 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6889 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6891 arminstr_t *space_start = constant_pool_starts [i];
6892 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6893 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6901 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6902 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6907 #ifndef USE_JUMP_TABLES
6908 g_free (constant_pool_starts);
6911 mono_arch_flush_icache ((guint8*)start, size);
6912 mono_stats.imt_thunks_size += code - start;
6914 g_assert (DISTANCE (start, code) <= size);
6921 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6923 return ctx->regs [reg];
6927 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6929 ctx->regs [reg] = val;
6933 * mono_arch_get_trampolines:
6935 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6939 mono_arch_get_trampolines (gboolean aot)
6941 return mono_arm_get_exception_trampolines (aot);
6945 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6947 * mono_arch_set_breakpoint:
6949 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6950 * The location should contain code emitted by OP_SEQ_POINT.
6953 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6956 guint32 native_offset = ip - (guint8*)ji->code_start;
6957 MonoDebugOptions *opt = mini_get_debug_options ();
6959 if (opt->soft_breakpoints) {
6960 g_assert (!ji->from_aot);
6962 ARM_BLX_REG (code, ARMREG_LR);
6963 mono_arch_flush_icache (code - 4, 4);
6964 } else if (ji->from_aot) {
6965 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6967 g_assert (native_offset % 4 == 0);
6968 g_assert (info->bp_addrs [native_offset / 4] == 0);
6969 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6971 int dreg = ARMREG_LR;
6973 /* Read from another trigger page */
6974 #ifdef USE_JUMP_TABLES
6975 gpointer *jte = mono_jumptable_add_entry ();
6976 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6977 jte [0] = bp_trigger_page;
6979 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6981 *(int*)code = (int)bp_trigger_page;
6984 ARM_LDR_IMM (code, dreg, dreg, 0);
6986 mono_arch_flush_icache (code - 16, 16);
6989 /* This is currently implemented by emitting an SWI instruction, which
6990 * qemu/linux seems to convert to a SIGILL.
6992 *(int*)code = (0xef << 24) | 8;
6994 mono_arch_flush_icache (code - 4, 4);
7000 * mono_arch_clear_breakpoint:
7002 * Clear the breakpoint at IP.
7005 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7007 MonoDebugOptions *opt = mini_get_debug_options ();
7011 if (opt->soft_breakpoints) {
7012 g_assert (!ji->from_aot);
7015 mono_arch_flush_icache (code - 4, 4);
7016 } else if (ji->from_aot) {
7017 guint32 native_offset = ip - (guint8*)ji->code_start;
7018 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7020 g_assert (native_offset % 4 == 0);
7021 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7022 info->bp_addrs [native_offset / 4] = 0;
7024 for (i = 0; i < 4; ++i)
7027 mono_arch_flush_icache (ip, code - ip);
7032 * mono_arch_start_single_stepping:
7034 * Start single stepping.
7037 mono_arch_start_single_stepping (void)
7039 if (ss_trigger_page)
7040 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7046 * mono_arch_stop_single_stepping:
7048 * Stop single stepping.
7051 mono_arch_stop_single_stepping (void)
7053 if (ss_trigger_page)
7054 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7060 #define DBG_SIGNAL SIGBUS
7062 #define DBG_SIGNAL SIGSEGV
7066 * mono_arch_is_single_step_event:
7068 * Return whenever the machine state in SIGCTX corresponds to a single
7072 mono_arch_is_single_step_event (void *info, void *sigctx)
7074 siginfo_t *sinfo = info;
7076 if (!ss_trigger_page)
7079 /* Sometimes the address is off by 4 */
7080 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7087 * mono_arch_is_breakpoint_event:
7089 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7092 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7094 siginfo_t *sinfo = info;
7096 if (!ss_trigger_page)
7099 if (sinfo->si_signo == DBG_SIGNAL) {
7100 /* Sometimes the address is off by 4 */
7101 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7111 * mono_arch_skip_breakpoint:
7113 * See mini-amd64.c for docs.
7116 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7118 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7122 * mono_arch_skip_single_step:
7124 * See mini-amd64.c for docs.
7127 mono_arch_skip_single_step (MonoContext *ctx)
7129 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7132 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7135 * mono_arch_get_seq_point_info:
7137 * See mini-amd64.c for docs.
7140 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7145 // FIXME: Add a free function
7147 mono_domain_lock (domain);
7148 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7150 mono_domain_unlock (domain);
7153 ji = mono_jit_info_table_find (domain, (char*)code);
7156 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7158 info->ss_trigger_page = ss_trigger_page;
7159 info->bp_trigger_page = bp_trigger_page;
7161 mono_domain_lock (domain);
7162 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7164 mono_domain_unlock (domain);
7171 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7173 ext->lmf.previous_lmf = prev_lmf;
7174 /* Mark that this is a MonoLMFExt */
7175 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7176 ext->lmf.sp = (gssize)ext;
7180 * mono_arch_set_target:
7182 * Set the target architecture the JIT backend should generate code for, in the form
7183 * of a GNU target triplet. Only used in AOT mode.
7186 mono_arch_set_target (char *mtriple)
7188 /* The GNU target triple format is not very well documented */
7189 if (strstr (mtriple, "armv7")) {
7190 v5_supported = TRUE;
7191 v6_supported = TRUE;
7192 v7_supported = TRUE;
7194 if (strstr (mtriple, "armv6")) {
7195 v5_supported = TRUE;
7196 v6_supported = TRUE;
7198 if (strstr (mtriple, "armv7s")) {
7199 v7s_supported = TRUE;
7201 if (strstr (mtriple, "thumbv7s")) {
7202 v5_supported = TRUE;
7203 v6_supported = TRUE;
7204 v7_supported = TRUE;
7205 v7s_supported = TRUE;
7206 thumb_supported = TRUE;
7207 thumb2_supported = TRUE;
7209 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7210 v5_supported = TRUE;
7211 v6_supported = TRUE;
7212 thumb_supported = TRUE;
7215 if (strstr (mtriple, "gnueabi"))
7216 eabi_supported = TRUE;
7220 mono_arch_opcode_supported (int opcode)
7223 case OP_ATOMIC_EXCHANGE_I4:
7224 case OP_ATOMIC_CAS_I4:
7225 case OP_ATOMIC_ADD_NEW_I4:
7226 return v7_supported;
7232 #if defined(ENABLE_GSHAREDVT)
7234 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7236 #endif /* !MONOTOUCH */