2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 /* This mutex protects architecture specific caches */
90 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
91 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
92 static CRITICAL_SECTION mini_arch_mutex;
94 static gboolean v5_supported = FALSE;
95 static gboolean v6_supported = FALSE;
96 static gboolean v7_supported = FALSE;
97 static gboolean v7s_supported = FALSE;
98 static gboolean thumb_supported = FALSE;
99 static gboolean thumb2_supported = FALSE;
101 * Whenever to use the ARM EABI
103 static gboolean eabi_supported = FALSE;
106 * Whenever to use the iphone ABI extensions:
107 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
108 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
109 * This is required for debugging/profiling tools to work, but it has some overhead so it should
110 * only be turned on in debug builds.
112 static gboolean iphone_abi = FALSE;
115 * The FPU we are generating code for. This is NOT runtime configurable right now,
116 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
118 static MonoArmFPU arm_fpu;
120 #if defined(ARM_FPU_VFP_HARD)
122 * On armhf, d0-d7 are used for argument passing and d8-d15
123 * must be preserved across calls, which leaves us no room
124 * for scratch registers. So we use d14-d15 but back up their
125 * previous contents to a stack slot before using them - see
126 * mono_arm_emit_vfp_scratch_save/_restore ().
128 static int vfp_scratch1 = ARM_VFP_D14;
129 static int vfp_scratch2 = ARM_VFP_D15;
132 * On armel, d0-d7 do not need to be preserved, so we can
133 * freely make use of them as scratch registers.
135 static int vfp_scratch1 = ARM_VFP_D0;
136 static int vfp_scratch2 = ARM_VFP_D1;
141 static volatile int ss_trigger_var = 0;
143 static gpointer single_step_func_wrapper;
144 static gpointer breakpoint_func_wrapper;
147 * The code generated for sequence points reads from this location, which is
148 * made read-only when single stepping is enabled.
150 static gpointer ss_trigger_page;
152 /* Enabled breakpoints read from this trigger page */
153 static gpointer bp_trigger_page;
155 /* Structure used by the sequence points in AOTed code */
157 gpointer ss_trigger_page;
158 gpointer bp_trigger_page;
159 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
164 * floating point support: on ARM it is a mess, there are at least 3
165 * different setups, each of which binary incompat with the other.
166 * 1) FPA: old and ugly, but unfortunately what current distros use
167 * the double binary format has the two words swapped. 8 double registers.
168 * Implemented usually by kernel emulation.
169 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
170 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
171 * 3) VFP: the new and actually sensible and useful FP support. Implemented
172 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
174 * We do not care about FPA. We will support soft float and VFP.
176 int mono_exc_esp_offset = 0;
178 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
179 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
180 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
182 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
183 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
184 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
186 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
187 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
188 //#define DEBUG_IMT 0
190 /* A variant of ARM_LDR_IMM which can handle large offsets */
191 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
192 if (arm_is_imm12 ((offset))) { \
193 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
195 g_assert ((scratch_reg) != (basereg)); \
196 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
197 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
201 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
202 if (arm_is_imm12 ((offset))) { \
203 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
205 g_assert ((scratch_reg) != (basereg)); \
206 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
207 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
211 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
214 mono_arch_regname (int reg)
216 static const char * rnames[] = {
217 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
218 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
219 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
222 if (reg >= 0 && reg < 16)
228 mono_arch_fregname (int reg)
230 static const char * rnames[] = {
231 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
232 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
233 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
234 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
235 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
236 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
239 if (reg >= 0 && reg < 32)
247 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
254 g_assert (dreg != sreg);
255 code = mono_arm_emit_load_imm (code, dreg, imm);
256 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
261 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
263 /* we can use r0-r3, since this is called only for incoming args on the stack */
264 if (size > sizeof (gpointer) * 4) {
266 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
267 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
268 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
269 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
270 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
271 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
272 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
273 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
274 ARM_B_COND (code, ARMCOND_NE, 0);
275 arm_patch (code - 4, start_loop);
278 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
279 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
281 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
282 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
288 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
289 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
290 doffset = soffset = 0;
292 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
293 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
299 g_assert (size == 0);
304 emit_call_reg (guint8 *code, int reg)
307 ARM_BLX_REG (code, reg);
309 #ifdef USE_JUMP_TABLES
310 g_assert_not_reached ();
312 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
316 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
322 emit_call_seq (MonoCompile *cfg, guint8 *code)
324 #ifdef USE_JUMP_TABLES
325 code = mono_arm_patchable_bl (code, ARMCOND_AL);
327 if (cfg->method->dynamic) {
328 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
330 *(gpointer*)code = NULL;
332 code = emit_call_reg (code, ARMREG_IP);
341 mono_arm_patchable_b (guint8 *code, int cond)
343 #ifdef USE_JUMP_TABLES
346 jte = mono_jumptable_add_entry ();
347 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
348 ARM_BX_COND (code, cond, ARMREG_IP);
350 ARM_B_COND (code, cond, 0);
356 mono_arm_patchable_bl (guint8 *code, int cond)
358 #ifdef USE_JUMP_TABLES
361 jte = mono_jumptable_add_entry ();
362 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
363 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
365 ARM_BL_COND (code, cond, 0);
370 #ifdef USE_JUMP_TABLES
372 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
374 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
375 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
380 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
382 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
383 ARM_LDR_IMM (code, reg, reg, 0);
389 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
391 switch (ins->opcode) {
394 case OP_FCALL_MEMBASE:
396 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
398 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
400 ARM_FMSR (code, ins->dreg, ARMREG_R0);
401 ARM_CVTS (code, ins->dreg, ins->dreg);
405 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
407 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
426 gboolean get_lmf_fast = FALSE;
429 #ifdef HAVE_AEABI_READ_TP
430 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
432 if (lmf_addr_tls_offset != -1) {
435 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
436 (gpointer)"__aeabi_read_tp");
437 code = emit_call_seq (cfg, code);
439 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
445 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
448 /* Inline mono_get_lmf_addr () */
449 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
451 /* Load mono_jit_tls_id */
453 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
454 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
456 *(gpointer*)code = NULL;
458 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
459 /* call pthread_getspecific () */
460 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
461 (gpointer)"pthread_getspecific");
462 code = emit_call_seq (cfg, code);
463 /* lmf_addr = &jit_tls->lmf */
464 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
465 g_assert (arm_is_imm8 (lmf_offset));
466 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
474 (gpointer)"mono_get_lmf_addr");
475 code = emit_call_seq (cfg, code);
477 /* we build the MonoLMF structure on the stack - see mini-arm.h */
478 /* lmf_offset is the offset from the previous stack pointer,
479 * alloc_size is the total stack space allocated, so the offset
480 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
481 * The pointer to the struct is put in r1 (new_lmf).
482 * ip is used as scratch
483 * The callee-saved registers are already in the MonoLMF structure
485 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
486 /* r0 is the result from mono_get_lmf_addr () */
487 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
488 /* new_lmf->previous_lmf = *lmf_addr */
489 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
491 /* *(lmf_addr) = r1 */
492 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
493 /* Skip method (only needed for trampoline LMF frames) */
494 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
495 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
496 /* save the current IP */
497 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
498 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
500 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
501 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
512 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
516 for (list = inst->float_args; list; list = list->next) {
517 FloatArgData *fad = list->data;
518 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
519 gboolean imm = arm_is_fpimm8 (var->inst_offset);
521 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
527 if (*offset + *max_len > cfg->code_size) {
528 cfg->code_size += *max_len;
529 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
531 code = cfg->native_code + *offset;
535 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
536 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
538 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
540 *offset = code - cfg->native_code;
547 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
551 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
553 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
556 if (!arm_is_fpimm8 (inst->inst_offset)) {
557 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
558 ARM_FSTD (code, reg, ARMREG_LR, 0);
560 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
567 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FLDD (code, reg, ARMREG_LR, 0);
580 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
589 * Emit code to pop an LMF structure from the LMF stack.
592 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
596 if (lmf_offset < 32) {
597 basereg = cfg->frame_reg;
602 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
605 /* ip = previous_lmf */
606 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
608 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
609 /* *(lmf_addr) = previous_lmf */
610 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
615 #endif /* #ifndef DISABLE_JIT */
618 * mono_arch_get_argument_info:
619 * @csig: a method signature
620 * @param_count: the number of parameters to consider
621 * @arg_info: an array to store the result infos
623 * Gathers information on parameters such as size, alignment and
624 * padding. arg_info should be large enought to hold param_count + 1 entries.
626 * Returns the size of the activation frame.
629 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
631 int k, frame_size = 0;
632 guint32 size, align, pad;
636 t = mini_type_get_underlying_type (gsctx, csig->ret);
637 if (MONO_TYPE_ISSTRUCT (t)) {
638 frame_size += sizeof (gpointer);
642 arg_info [0].offset = offset;
645 frame_size += sizeof (gpointer);
649 arg_info [0].size = frame_size;
651 for (k = 0; k < param_count; k++) {
652 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
654 /* ignore alignment for now */
657 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
658 arg_info [k].pad = pad;
660 arg_info [k + 1].pad = 0;
661 arg_info [k + 1].size = size;
663 arg_info [k + 1].offset = offset;
667 align = MONO_ARCH_FRAME_ALIGNMENT;
668 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
669 arg_info [k].pad = pad;
674 #define MAX_ARCH_DELEGATE_PARAMS 3
677 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
679 guint8 *code, *start;
682 start = code = mono_global_codeman_reserve (12);
684 /* Replace the this argument with the target */
685 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
686 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
687 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
689 g_assert ((code - start) <= 12);
691 mono_arch_flush_icache (start, 12);
695 size = 8 + param_count * 4;
696 start = code = mono_global_codeman_reserve (size);
698 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
699 /* slide down the arguments */
700 for (i = 0; i < param_count; ++i) {
701 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
703 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
705 g_assert ((code - start) <= size);
707 mono_arch_flush_icache (start, size);
711 *code_size = code - start;
717 * mono_arch_get_delegate_invoke_impls:
719 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
723 mono_arch_get_delegate_invoke_impls (void)
731 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
732 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
734 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
735 code = get_delegate_invoke_impl (FALSE, i, &code_len);
736 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
737 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
745 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
747 guint8 *code, *start;
749 /* FIXME: Support more cases */
750 if (MONO_TYPE_ISSTRUCT (sig->ret))
754 static guint8* cached = NULL;
755 mono_mini_arch_lock ();
757 mono_mini_arch_unlock ();
762 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
764 start = get_delegate_invoke_impl (TRUE, 0, NULL);
766 mono_mini_arch_unlock ();
769 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
772 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
774 for (i = 0; i < sig->param_count; ++i)
775 if (!mono_is_regsize_var (sig->params [i]))
778 mono_mini_arch_lock ();
779 code = cache [sig->param_count];
781 mono_mini_arch_unlock ();
786 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
787 start = mono_aot_get_trampoline (name);
790 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
792 cache [sig->param_count] = start;
793 mono_mini_arch_unlock ();
801 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
803 return (gpointer)regs [ARMREG_R0];
807 * Initialize the cpu to execute managed code.
810 mono_arch_cpu_init (void)
812 #if defined(__APPLE__)
815 i8_align = __alignof__ (gint64);
820 create_function_wrapper (gpointer function)
822 guint8 *start, *code;
824 start = code = mono_global_codeman_reserve (96);
827 * Construct the MonoContext structure on the stack.
830 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
832 /* save ip, lr and pc into their correspodings ctx.regs slots. */
833 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
834 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
835 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
837 /* save r0..r10 and fp */
838 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
839 ARM_STM (code, ARMREG_IP, 0x0fff);
841 /* now we can update fp. */
842 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
844 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
845 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
846 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
847 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
849 /* make ctx.eip hold the address of the call. */
850 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
851 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
853 /* r0 now points to the MonoContext */
854 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
857 #ifdef USE_JUMP_TABLES
859 gpointer *jte = mono_jumptable_add_entry ();
860 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
864 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
866 *(gpointer*)code = function;
869 ARM_BLX_REG (code, ARMREG_IP);
871 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
872 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
873 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
874 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
876 /* make ip point to the regs array, then restore everything, including pc. */
877 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
878 ARM_LDM (code, ARMREG_IP, 0xffff);
880 mono_arch_flush_icache (start, code - start);
886 * Initialize architecture specific code.
889 mono_arch_init (void)
891 const char *cpu_arch;
893 InitializeCriticalSection (&mini_arch_mutex);
894 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
895 if (mini_get_debug_options ()->soft_breakpoints) {
896 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
897 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
902 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
903 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
904 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
907 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
908 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
909 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
910 #if defined(ENABLE_GSHAREDVT)
911 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
914 #if defined(__ARM_EABI__)
915 eabi_supported = TRUE;
918 #if defined(ARM_FPU_VFP_HARD)
919 arm_fpu = MONO_ARM_FPU_VFP_HARD;
921 arm_fpu = MONO_ARM_FPU_VFP;
923 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
924 /* If we're compiling with a soft float fallback and it
925 turns out that no VFP unit is available, we need to
926 switch to soft float. We don't do this for iOS, since
927 iOS devices always have a VFP unit. */
928 if (!mono_hwcap_arm_has_vfp)
929 arm_fpu = MONO_ARM_FPU_NONE;
933 v5_supported = mono_hwcap_arm_is_v5;
934 v6_supported = mono_hwcap_arm_is_v6;
935 v7_supported = mono_hwcap_arm_is_v7;
936 v7s_supported = mono_hwcap_arm_is_v7s;
938 #if defined(__APPLE__)
939 /* iOS is special-cased here because we don't yet
940 have a way to properly detect CPU features on it. */
941 thumb_supported = TRUE;
944 thumb_supported = mono_hwcap_arm_has_thumb;
945 thumb2_supported = mono_hwcap_arm_has_thumb2;
948 /* Format: armv(5|6|7[s])[-thumb[2]] */
949 cpu_arch = g_getenv ("MONO_CPU_ARCH");
951 /* Do this here so it overrides any detection. */
953 if (strncmp (cpu_arch, "armv", 4) == 0) {
954 v5_supported = cpu_arch [4] >= '5';
955 v6_supported = cpu_arch [4] >= '6';
956 v7_supported = cpu_arch [4] >= '7';
957 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
960 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
961 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
966 * Cleanup architecture specific code.
969 mono_arch_cleanup (void)
974 * This function returns the optimizations supported on this cpu.
977 mono_arch_cpu_optimizations (guint32 *exclude_mask)
979 /* no arm-specific optimizations yet */
985 * This function test for all SIMD functions supported.
987 * Returns a bitmask corresponding to all supported versions.
991 mono_arch_cpu_enumerate_simd_versions (void)
993 /* SIMD is currently unimplemented */
1001 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1003 if (v7s_supported) {
1017 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1019 mono_arch_is_soft_float (void)
1021 return arm_fpu == MONO_ARM_FPU_NONE;
1026 mono_arm_is_hard_float (void)
1028 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1032 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1035 t = mini_type_get_underlying_type (gsctx, t);
1042 case MONO_TYPE_FNPTR:
1044 case MONO_TYPE_OBJECT:
1045 case MONO_TYPE_STRING:
1046 case MONO_TYPE_CLASS:
1047 case MONO_TYPE_SZARRAY:
1048 case MONO_TYPE_ARRAY:
1050 case MONO_TYPE_GENERICINST:
1051 if (!mono_type_generic_inst_is_valuetype (t))
1054 case MONO_TYPE_VALUETYPE:
1061 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1066 for (i = 0; i < cfg->num_varinfo; i++) {
1067 MonoInst *ins = cfg->varinfo [i];
1068 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1071 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1074 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1077 /* we can only allocate 32 bit values */
1078 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1079 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1080 g_assert (i == vmv->idx);
1081 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1088 #define USE_EXTRA_TEMPS 0
1091 mono_arch_get_global_int_regs (MonoCompile *cfg)
1095 mono_arch_compute_omit_fp (cfg);
1098 * FIXME: Interface calls might go through a static rgctx trampoline which
1099 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1102 if (cfg->flags & MONO_CFG_HAS_CALLS)
1103 cfg->uses_rgctx_reg = TRUE;
1105 if (cfg->arch.omit_fp)
1106 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1107 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1108 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1109 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1111 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1112 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1115 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1116 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1118 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1119 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1125 * mono_arch_regalloc_cost:
1127 * Return the cost, in number of memory references, of the action of
1128 * allocating the variable VMV into a register during global register
1132 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1138 #endif /* #ifndef DISABLE_JIT */
1140 #ifndef __GNUC_PREREQ
1141 #define __GNUC_PREREQ(maj, min) (0)
1145 mono_arch_flush_icache (guint8 *code, gint size)
1147 #if defined(__native_client__)
1148 // For Native Client we don't have to flush i-cache here,
1149 // as it's being done by dyncode interface.
1152 #ifdef MONO_CROSS_COMPILE
1154 sys_icache_invalidate (code, size);
1155 #elif __GNUC_PREREQ(4, 1)
1156 __clear_cache (code, code + size);
1157 #elif defined(PLATFORM_ANDROID)
1158 const int syscall = 0xf0002;
1166 : "r" (code), "r" (code + size), "r" (syscall)
1167 : "r0", "r1", "r7", "r2"
1170 __asm __volatile ("mov r0, %0\n"
1173 "swi 0x9f0002 @ sys_cacheflush"
1175 : "r" (code), "r" (code + size), "r" (0)
1176 : "r0", "r1", "r3" );
1178 #endif /* !__native_client__ */
1189 RegTypeStructByAddr,
1190 /* gsharedvt argument passed by addr in greg */
1191 RegTypeGSharedVtInReg,
1192 /* gsharedvt argument passed by addr on stack */
1193 RegTypeGSharedVtOnStack,
1198 guint16 vtsize; /* in param area */
1202 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1207 guint32 stack_usage;
1208 gboolean vtype_retaddr;
1209 /* The index of the vret arg in the argument list */
1219 /*#define __alignof__(a) sizeof(a)*/
1220 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1223 #define PARAM_REGS 4
1226 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1229 if (*gr > ARMREG_R3) {
1231 ainfo->offset = *stack_size;
1232 ainfo->reg = ARMREG_SP; /* in the caller */
1233 ainfo->storage = RegTypeBase;
1236 ainfo->storage = RegTypeGeneral;
1243 split = i8_align == 4;
1248 if (*gr == ARMREG_R3 && split) {
1249 /* first word in r3 and the second on the stack */
1250 ainfo->offset = *stack_size;
1251 ainfo->reg = ARMREG_SP; /* in the caller */
1252 ainfo->storage = RegTypeBaseGen;
1254 } else if (*gr >= ARMREG_R3) {
1255 if (eabi_supported) {
1256 /* darwin aligns longs to 4 byte only */
1257 if (i8_align == 8) {
1262 ainfo->offset = *stack_size;
1263 ainfo->reg = ARMREG_SP; /* in the caller */
1264 ainfo->storage = RegTypeBase;
1267 if (eabi_supported) {
1268 if (i8_align == 8 && ((*gr) & 1))
1271 ainfo->storage = RegTypeIRegPair;
1280 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1283 * If we're calling a function like this:
1285 * void foo(float a, double b, float c)
1287 * We pass a in s0 and b in d1. That leaves us
1288 * with s1 being unused. The armhf ABI recognizes
1289 * this and requires register assignment to then
1290 * use that for the next single-precision arg,
1291 * i.e. c in this example. So float_spare either
1292 * tells us which reg to use for the next single-
1293 * precision arg, or it's -1, meaning use *fpr.
1295 * Note that even though most of the JIT speaks
1296 * double-precision, fpr represents single-
1297 * precision registers.
1299 * See parts 5.5 and 6.1.2 of the AAPCS for how
1303 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1304 ainfo->storage = RegTypeFP;
1308 * If we're passing a double-precision value
1309 * and *fpr is odd (e.g. it's s1, s3, ...)
1310 * we need to use the next even register. So
1311 * we mark the current *fpr as a spare that
1312 * can be used for the next single-precision
1316 *float_spare = *fpr;
1321 * At this point, we have an even register
1322 * so we assign that and move along.
1326 } else if (*float_spare >= 0) {
1328 * We're passing a single-precision value
1329 * and it looks like a spare single-
1330 * precision register is available. Let's
1334 ainfo->reg = *float_spare;
1338 * If we hit this branch, we're passing a
1339 * single-precision value and we can simply
1340 * use the next available register.
1348 * We've exhausted available floating point
1349 * regs, so pass the rest on the stack.
1357 ainfo->offset = *stack_size;
1358 ainfo->reg = ARMREG_SP;
1359 ainfo->storage = RegTypeBase;
1366 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1368 guint i, gr, fpr, pstart;
1370 int n = sig->hasthis + sig->param_count;
1371 MonoType *simpletype;
1372 guint32 stack_size = 0;
1374 gboolean is_pinvoke = sig->pinvoke;
1378 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1380 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1387 t = mini_type_get_underlying_type (gsctx, sig->ret);
1388 if (MONO_TYPE_ISSTRUCT (t)) {
1391 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1392 cinfo->ret.storage = RegTypeStructByVal;
1394 cinfo->vtype_retaddr = TRUE;
1396 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1397 cinfo->vtype_retaddr = TRUE;
1403 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1404 * the first argument, allowing 'this' to be always passed in the first arg reg.
1405 * Also do this if the first argument is a reference type, since virtual calls
1406 * are sometimes made using calli without sig->hasthis set, like in the delegate
1409 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1411 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1413 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1417 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1418 cinfo->vret_arg_index = 1;
1422 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1426 if (cinfo->vtype_retaddr)
1427 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1430 DEBUG(printf("params: %d\n", sig->param_count));
1431 for (i = pstart; i < sig->param_count; ++i) {
1432 ArgInfo *ainfo = &cinfo->args [n];
1434 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1435 /* Prevent implicit arguments and sig_cookie from
1436 being passed in registers */
1439 /* Emit the signature cookie just before the implicit arguments */
1440 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1442 DEBUG(printf("param %d: ", i));
1443 if (sig->params [i]->byref) {
1444 DEBUG(printf("byref\n"));
1445 add_general (&gr, &stack_size, ainfo, TRUE);
1449 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1450 switch (simpletype->type) {
1451 case MONO_TYPE_BOOLEAN:
1454 cinfo->args [n].size = 1;
1455 add_general (&gr, &stack_size, ainfo, TRUE);
1458 case MONO_TYPE_CHAR:
1461 cinfo->args [n].size = 2;
1462 add_general (&gr, &stack_size, ainfo, TRUE);
1467 cinfo->args [n].size = 4;
1468 add_general (&gr, &stack_size, ainfo, TRUE);
1474 case MONO_TYPE_FNPTR:
1475 case MONO_TYPE_CLASS:
1476 case MONO_TYPE_OBJECT:
1477 case MONO_TYPE_STRING:
1478 case MONO_TYPE_SZARRAY:
1479 case MONO_TYPE_ARRAY:
1480 cinfo->args [n].size = sizeof (gpointer);
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_GENERICINST:
1485 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1486 cinfo->args [n].size = sizeof (gpointer);
1487 add_general (&gr, &stack_size, ainfo, TRUE);
1491 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1492 /* gsharedvt arguments are passed by ref */
1493 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1495 switch (ainfo->storage) {
1496 case RegTypeGeneral:
1497 ainfo->storage = RegTypeGSharedVtInReg;
1500 ainfo->storage = RegTypeGSharedVtOnStack;
1503 g_assert_not_reached ();
1509 case MONO_TYPE_TYPEDBYREF:
1510 case MONO_TYPE_VALUETYPE: {
1516 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1517 size = sizeof (MonoTypedRef);
1518 align = sizeof (gpointer);
1520 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1522 size = mono_class_native_size (klass, &align);
1524 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1526 DEBUG(printf ("load %d bytes struct\n", size));
1529 align_size += (sizeof (gpointer) - 1);
1530 align_size &= ~(sizeof (gpointer) - 1);
1531 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1532 ainfo->storage = RegTypeStructByVal;
1533 ainfo->struct_size = size;
1534 /* FIXME: align stack_size if needed */
1535 if (eabi_supported) {
1536 if (align >= 8 && (gr & 1))
1539 if (gr > ARMREG_R3) {
1541 ainfo->vtsize = nwords;
1543 int rest = ARMREG_R3 - gr + 1;
1544 int n_in_regs = rest >= nwords? nwords: rest;
1546 ainfo->size = n_in_regs;
1547 ainfo->vtsize = nwords - n_in_regs;
1550 nwords -= n_in_regs;
1552 if (sig->call_convention == MONO_CALL_VARARG)
1553 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1554 stack_size = ALIGN_TO (stack_size, align);
1555 ainfo->offset = stack_size;
1556 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1557 stack_size += nwords * sizeof (gpointer);
1564 add_general (&gr, &stack_size, ainfo, FALSE);
1571 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1573 add_general (&gr, &stack_size, ainfo, TRUE);
1581 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1583 add_general (&gr, &stack_size, ainfo, FALSE);
1588 case MONO_TYPE_MVAR:
1589 /* gsharedvt arguments are passed by ref */
1590 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1591 add_general (&gr, &stack_size, ainfo, TRUE);
1592 switch (ainfo->storage) {
1593 case RegTypeGeneral:
1594 ainfo->storage = RegTypeGSharedVtInReg;
1597 ainfo->storage = RegTypeGSharedVtOnStack;
1600 g_assert_not_reached ();
1605 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1609 /* Handle the case where there are no implicit arguments */
1610 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1611 /* Prevent implicit arguments and sig_cookie from
1612 being passed in registers */
1615 /* Emit the signature cookie just before the implicit arguments */
1616 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1620 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1621 switch (simpletype->type) {
1622 case MONO_TYPE_BOOLEAN:
1627 case MONO_TYPE_CHAR:
1633 case MONO_TYPE_FNPTR:
1634 case MONO_TYPE_CLASS:
1635 case MONO_TYPE_OBJECT:
1636 case MONO_TYPE_SZARRAY:
1637 case MONO_TYPE_ARRAY:
1638 case MONO_TYPE_STRING:
1639 cinfo->ret.storage = RegTypeGeneral;
1640 cinfo->ret.reg = ARMREG_R0;
1644 cinfo->ret.storage = RegTypeIRegPair;
1645 cinfo->ret.reg = ARMREG_R0;
1649 cinfo->ret.storage = RegTypeFP;
1651 if (IS_HARD_FLOAT) {
1652 cinfo->ret.reg = ARM_VFP_F0;
1654 cinfo->ret.reg = ARMREG_R0;
1658 case MONO_TYPE_GENERICINST:
1659 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1660 cinfo->ret.storage = RegTypeGeneral;
1661 cinfo->ret.reg = ARMREG_R0;
1664 // FIXME: Only for variable types
1665 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1666 cinfo->ret.storage = RegTypeStructByAddr;
1667 g_assert (cinfo->vtype_retaddr);
1671 case MONO_TYPE_VALUETYPE:
1672 case MONO_TYPE_TYPEDBYREF:
1673 if (cinfo->ret.storage != RegTypeStructByVal)
1674 cinfo->ret.storage = RegTypeStructByAddr;
1677 case MONO_TYPE_MVAR:
1678 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1679 cinfo->ret.storage = RegTypeStructByAddr;
1680 g_assert (cinfo->vtype_retaddr);
1682 case MONO_TYPE_VOID:
1685 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1689 /* align stack size to 8 */
1690 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1691 stack_size = (stack_size + 7) & ~7;
1693 cinfo->stack_usage = stack_size;
1699 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1701 MonoType *callee_ret;
1705 if (cfg->compile_aot && !cfg->full_aot)
1706 /* OP_TAILCALL doesn't work with AOT */
1709 c1 = get_call_info (NULL, NULL, caller_sig);
1710 c2 = get_call_info (NULL, NULL, callee_sig);
1713 * Tail calls with more callee stack usage than the caller cannot be supported, since
1714 * the extra stack space would be left on the stack after the tail call.
1716 res = c1->stack_usage >= c2->stack_usage;
1717 callee_ret = mini_replace_type (callee_sig->ret);
1718 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1719 /* An address on the callee's stack is passed as the first argument */
1722 if (c2->stack_usage > 16 * 4)
1734 debug_omit_fp (void)
1737 return mono_debug_count ();
1744 * mono_arch_compute_omit_fp:
1746 * Determine whenever the frame pointer can be eliminated.
1749 mono_arch_compute_omit_fp (MonoCompile *cfg)
1751 MonoMethodSignature *sig;
1752 MonoMethodHeader *header;
1756 if (cfg->arch.omit_fp_computed)
1759 header = cfg->header;
1761 sig = mono_method_signature (cfg->method);
1763 if (!cfg->arch.cinfo)
1764 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1765 cinfo = cfg->arch.cinfo;
1768 * FIXME: Remove some of the restrictions.
1770 cfg->arch.omit_fp = TRUE;
1771 cfg->arch.omit_fp_computed = TRUE;
1773 if (cfg->disable_omit_fp)
1774 cfg->arch.omit_fp = FALSE;
1775 if (!debug_omit_fp ())
1776 cfg->arch.omit_fp = FALSE;
1778 if (cfg->method->save_lmf)
1779 cfg->arch.omit_fp = FALSE;
1781 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1782 cfg->arch.omit_fp = FALSE;
1783 if (header->num_clauses)
1784 cfg->arch.omit_fp = FALSE;
1785 if (cfg->param_area)
1786 cfg->arch.omit_fp = FALSE;
1787 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1788 cfg->arch.omit_fp = FALSE;
1789 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1790 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1791 cfg->arch.omit_fp = FALSE;
1792 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1793 ArgInfo *ainfo = &cinfo->args [i];
1795 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1797 * The stack offset can only be determined when the frame
1800 cfg->arch.omit_fp = FALSE;
1805 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1806 MonoInst *ins = cfg->varinfo [i];
1809 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1814 * Set var information according to the calling convention. arm version.
1815 * The locals var stuff should most likely be split in another method.
1818 mono_arch_allocate_vars (MonoCompile *cfg)
1820 MonoMethodSignature *sig;
1821 MonoMethodHeader *header;
1823 int i, offset, size, align, curinst;
1827 sig = mono_method_signature (cfg->method);
1829 if (!cfg->arch.cinfo)
1830 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1831 cinfo = cfg->arch.cinfo;
1833 mono_arch_compute_omit_fp (cfg);
1835 if (cfg->arch.omit_fp)
1836 cfg->frame_reg = ARMREG_SP;
1838 cfg->frame_reg = ARMREG_FP;
1840 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1842 /* allow room for the vararg method args: void* and long/double */
1843 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1844 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1846 header = cfg->header;
1848 /* See mono_arch_get_global_int_regs () */
1849 if (cfg->flags & MONO_CFG_HAS_CALLS)
1850 cfg->uses_rgctx_reg = TRUE;
1852 if (cfg->frame_reg != ARMREG_SP)
1853 cfg->used_int_regs |= 1 << cfg->frame_reg;
1855 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1856 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1857 cfg->used_int_regs |= (1 << ARMREG_V5);
1861 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1862 if (sig->ret->type != MONO_TYPE_VOID) {
1863 cfg->ret->opcode = OP_REGVAR;
1864 cfg->ret->inst_c0 = ARMREG_R0;
1867 /* local vars are at a positive offset from the stack pointer */
1869 * also note that if the function uses alloca, we use FP
1870 * to point at the local variables.
1872 offset = 0; /* linkage area */
1873 /* align the offset to 16 bytes: not sure this is needed here */
1875 //offset &= ~(8 - 1);
1877 /* add parameter area size for called functions */
1878 offset += cfg->param_area;
1881 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1884 /* allow room to save the return value */
1885 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1888 /* the MonoLMF structure is stored just below the stack pointer */
1889 if (cinfo->ret.storage == RegTypeStructByVal) {
1890 cfg->ret->opcode = OP_REGOFFSET;
1891 cfg->ret->inst_basereg = cfg->frame_reg;
1892 offset += sizeof (gpointer) - 1;
1893 offset &= ~(sizeof (gpointer) - 1);
1894 cfg->ret->inst_offset = - offset;
1895 offset += sizeof(gpointer);
1896 } else if (cinfo->vtype_retaddr) {
1897 ins = cfg->vret_addr;
1898 offset += sizeof(gpointer) - 1;
1899 offset &= ~(sizeof(gpointer) - 1);
1900 ins->inst_offset = offset;
1901 ins->opcode = OP_REGOFFSET;
1902 ins->inst_basereg = cfg->frame_reg;
1903 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1904 printf ("vret_addr =");
1905 mono_print_ins (cfg->vret_addr);
1907 offset += sizeof(gpointer);
1910 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1911 if (cfg->arch.seq_point_info_var) {
1914 ins = cfg->arch.seq_point_info_var;
1918 offset += align - 1;
1919 offset &= ~(align - 1);
1920 ins->opcode = OP_REGOFFSET;
1921 ins->inst_basereg = cfg->frame_reg;
1922 ins->inst_offset = offset;
1925 ins = cfg->arch.ss_trigger_page_var;
1928 offset += align - 1;
1929 offset &= ~(align - 1);
1930 ins->opcode = OP_REGOFFSET;
1931 ins->inst_basereg = cfg->frame_reg;
1932 ins->inst_offset = offset;
1936 if (cfg->arch.seq_point_read_var) {
1939 ins = cfg->arch.seq_point_read_var;
1943 offset += align - 1;
1944 offset &= ~(align - 1);
1945 ins->opcode = OP_REGOFFSET;
1946 ins->inst_basereg = cfg->frame_reg;
1947 ins->inst_offset = offset;
1950 ins = cfg->arch.seq_point_ss_method_var;
1953 offset += align - 1;
1954 offset &= ~(align - 1);
1955 ins->opcode = OP_REGOFFSET;
1956 ins->inst_basereg = cfg->frame_reg;
1957 ins->inst_offset = offset;
1960 ins = cfg->arch.seq_point_bp_method_var;
1963 offset += align - 1;
1964 offset &= ~(align - 1);
1965 ins->opcode = OP_REGOFFSET;
1966 ins->inst_basereg = cfg->frame_reg;
1967 ins->inst_offset = offset;
1971 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
1972 /* Allocate a temporary used by the atomic ops */
1976 /* Allocate a local slot to hold the sig cookie address */
1977 offset += align - 1;
1978 offset &= ~(align - 1);
1979 cfg->arch.atomic_tmp_offset = offset;
1982 cfg->arch.atomic_tmp_offset = -1;
1985 cfg->locals_min_stack_offset = offset;
1987 curinst = cfg->locals_start;
1988 for (i = curinst; i < cfg->num_varinfo; ++i) {
1991 ins = cfg->varinfo [i];
1992 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1995 t = ins->inst_vtype;
1996 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1999 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2000 * pinvoke wrappers when they call functions returning structure */
2001 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2002 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2006 size = mono_type_size (t, &align);
2008 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2009 * since it loads/stores misaligned words, which don't do the right thing.
2011 if (align < 4 && size >= 4)
2013 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2014 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2015 offset += align - 1;
2016 offset &= ~(align - 1);
2017 ins->opcode = OP_REGOFFSET;
2018 ins->inst_offset = offset;
2019 ins->inst_basereg = cfg->frame_reg;
2021 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2024 cfg->locals_max_stack_offset = offset;
2028 ins = cfg->args [curinst];
2029 if (ins->opcode != OP_REGVAR) {
2030 ins->opcode = OP_REGOFFSET;
2031 ins->inst_basereg = cfg->frame_reg;
2032 offset += sizeof (gpointer) - 1;
2033 offset &= ~(sizeof (gpointer) - 1);
2034 ins->inst_offset = offset;
2035 offset += sizeof (gpointer);
2040 if (sig->call_convention == MONO_CALL_VARARG) {
2044 /* Allocate a local slot to hold the sig cookie address */
2045 offset += align - 1;
2046 offset &= ~(align - 1);
2047 cfg->sig_cookie = offset;
2051 for (i = 0; i < sig->param_count; ++i) {
2052 ins = cfg->args [curinst];
2054 if (ins->opcode != OP_REGVAR) {
2055 ins->opcode = OP_REGOFFSET;
2056 ins->inst_basereg = cfg->frame_reg;
2057 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2059 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2060 * since it loads/stores misaligned words, which don't do the right thing.
2062 if (align < 4 && size >= 4)
2064 /* The code in the prolog () stores words when storing vtypes received in a register */
2065 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2067 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2068 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2069 offset += align - 1;
2070 offset &= ~(align - 1);
2071 ins->inst_offset = offset;
2077 /* align the offset to 8 bytes */
2078 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2079 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2084 cfg->stack_offset = offset;
2088 mono_arch_create_vars (MonoCompile *cfg)
2090 MonoMethodSignature *sig;
2094 sig = mono_method_signature (cfg->method);
2096 if (!cfg->arch.cinfo)
2097 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2098 cinfo = cfg->arch.cinfo;
2100 if (IS_HARD_FLOAT) {
2101 for (i = 0; i < 2; i++) {
2102 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2103 inst->flags |= MONO_INST_VOLATILE;
2105 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2109 if (cinfo->ret.storage == RegTypeStructByVal)
2110 cfg->ret_var_is_local = TRUE;
2112 if (cinfo->vtype_retaddr) {
2113 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2114 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2115 printf ("vret_addr = ");
2116 mono_print_ins (cfg->vret_addr);
2120 if (cfg->gen_seq_points) {
2121 if (cfg->soft_breakpoints) {
2122 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2123 ins->flags |= MONO_INST_VOLATILE;
2124 cfg->arch.seq_point_read_var = ins;
2126 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2127 ins->flags |= MONO_INST_VOLATILE;
2128 cfg->arch.seq_point_ss_method_var = ins;
2130 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2131 ins->flags |= MONO_INST_VOLATILE;
2132 cfg->arch.seq_point_bp_method_var = ins;
2134 g_assert (!cfg->compile_aot);
2135 } else if (cfg->compile_aot) {
2136 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2137 ins->flags |= MONO_INST_VOLATILE;
2138 cfg->arch.seq_point_info_var = ins;
2140 /* Allocate a separate variable for this to save 1 load per seq point */
2141 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2142 ins->flags |= MONO_INST_VOLATILE;
2143 cfg->arch.ss_trigger_page_var = ins;
2149 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2151 MonoMethodSignature *tmp_sig;
2154 if (call->tail_call)
2157 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2160 * mono_ArgIterator_Setup assumes the signature cookie is
2161 * passed first and all the arguments which were before it are
2162 * passed on the stack after the signature. So compensate by
2163 * passing a different signature.
2165 tmp_sig = mono_metadata_signature_dup (call->signature);
2166 tmp_sig->param_count -= call->signature->sentinelpos;
2167 tmp_sig->sentinelpos = 0;
2168 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2170 sig_reg = mono_alloc_ireg (cfg);
2171 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2178 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2183 LLVMCallInfo *linfo;
2185 n = sig->param_count + sig->hasthis;
2187 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2189 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2192 * LLVM always uses the native ABI while we use our own ABI, the
2193 * only difference is the handling of vtypes:
2194 * - we only pass/receive them in registers in some cases, and only
2195 * in 1 or 2 integer registers.
2197 if (cinfo->vtype_retaddr) {
2198 /* Vtype returned using a hidden argument */
2199 linfo->ret.storage = LLVMArgVtypeRetAddr;
2200 linfo->vret_arg_index = cinfo->vret_arg_index;
2201 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2202 cfg->exception_message = g_strdup ("unknown ret conv");
2203 cfg->disable_llvm = TRUE;
2207 for (i = 0; i < n; ++i) {
2208 ainfo = cinfo->args + i;
2210 linfo->args [i].storage = LLVMArgNone;
2212 switch (ainfo->storage) {
2213 case RegTypeGeneral:
2214 case RegTypeIRegPair:
2216 linfo->args [i].storage = LLVMArgInIReg;
2218 case RegTypeStructByVal:
2219 // FIXME: Passing entirely on the stack or split reg/stack
2220 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2221 linfo->args [i].storage = LLVMArgVtypeInReg;
2222 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2223 if (ainfo->size == 2)
2224 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2226 linfo->args [i].pair_storage [1] = LLVMArgNone;
2228 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2229 cfg->disable_llvm = TRUE;
2233 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2234 cfg->disable_llvm = TRUE;
2244 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2247 MonoMethodSignature *sig;
2251 sig = call->signature;
2252 n = sig->param_count + sig->hasthis;
2254 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2256 for (i = 0; i < n; ++i) {
2257 ArgInfo *ainfo = cinfo->args + i;
2260 if (i >= sig->hasthis)
2261 t = sig->params [i - sig->hasthis];
2263 t = &mono_defaults.int_class->byval_arg;
2264 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2266 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2267 /* Emit the signature cookie just before the implicit arguments */
2268 emit_sig_cookie (cfg, call, cinfo);
2271 in = call->args [i];
2273 switch (ainfo->storage) {
2274 case RegTypeGeneral:
2275 case RegTypeIRegPair:
2276 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2277 MONO_INST_NEW (cfg, ins, OP_MOVE);
2278 ins->dreg = mono_alloc_ireg (cfg);
2279 ins->sreg1 = in->dreg + 1;
2280 MONO_ADD_INS (cfg->cbb, ins);
2281 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2283 MONO_INST_NEW (cfg, ins, OP_MOVE);
2284 ins->dreg = mono_alloc_ireg (cfg);
2285 ins->sreg1 = in->dreg + 2;
2286 MONO_ADD_INS (cfg->cbb, ins);
2287 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2288 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2289 if (ainfo->size == 4) {
2290 if (IS_SOFT_FLOAT) {
2291 /* mono_emit_call_args () have already done the r8->r4 conversion */
2292 /* The converted value is in an int vreg */
2293 MONO_INST_NEW (cfg, ins, OP_MOVE);
2294 ins->dreg = mono_alloc_ireg (cfg);
2295 ins->sreg1 = in->dreg;
2296 MONO_ADD_INS (cfg->cbb, ins);
2297 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2301 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2302 creg = mono_alloc_ireg (cfg);
2303 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2304 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2307 if (IS_SOFT_FLOAT) {
2308 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2309 ins->dreg = mono_alloc_ireg (cfg);
2310 ins->sreg1 = in->dreg;
2311 MONO_ADD_INS (cfg->cbb, ins);
2312 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2314 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2315 ins->dreg = mono_alloc_ireg (cfg);
2316 ins->sreg1 = in->dreg;
2317 MONO_ADD_INS (cfg->cbb, ins);
2318 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2323 creg = mono_alloc_ireg (cfg);
2324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2325 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2326 creg = mono_alloc_ireg (cfg);
2327 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2328 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2331 cfg->flags |= MONO_CFG_HAS_FPOUT;
2333 MONO_INST_NEW (cfg, ins, OP_MOVE);
2334 ins->dreg = mono_alloc_ireg (cfg);
2335 ins->sreg1 = in->dreg;
2336 MONO_ADD_INS (cfg->cbb, ins);
2338 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2341 case RegTypeStructByAddr:
2344 /* FIXME: where si the data allocated? */
2345 arg->backend.reg3 = ainfo->reg;
2346 call->used_iregs |= 1 << ainfo->reg;
2347 g_assert_not_reached ();
2350 case RegTypeStructByVal:
2351 case RegTypeGSharedVtInReg:
2352 case RegTypeGSharedVtOnStack:
2353 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2354 ins->opcode = OP_OUTARG_VT;
2355 ins->sreg1 = in->dreg;
2356 ins->klass = in->klass;
2357 ins->inst_p0 = call;
2358 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2359 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2360 mono_call_inst_add_outarg_vt (cfg, call, ins);
2361 MONO_ADD_INS (cfg->cbb, ins);
2364 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2366 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2367 if (t->type == MONO_TYPE_R8) {
2368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2373 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2376 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2379 case RegTypeBaseGen:
2380 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2381 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2382 MONO_INST_NEW (cfg, ins, OP_MOVE);
2383 ins->dreg = mono_alloc_ireg (cfg);
2384 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2385 MONO_ADD_INS (cfg->cbb, ins);
2386 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2387 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2390 /* This should work for soft-float as well */
2392 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2393 creg = mono_alloc_ireg (cfg);
2394 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2395 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2396 creg = mono_alloc_ireg (cfg);
2397 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2399 cfg->flags |= MONO_CFG_HAS_FPOUT;
2401 g_assert_not_reached ();
2405 int fdreg = mono_alloc_freg (cfg);
2407 if (ainfo->size == 8) {
2408 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2409 ins->sreg1 = in->dreg;
2411 MONO_ADD_INS (cfg->cbb, ins);
2413 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2418 * Mono's register allocator doesn't speak single-precision registers that
2419 * overlap double-precision registers (i.e. armhf). So we have to work around
2420 * the register allocator and load the value from memory manually.
2422 * So we create a variable for the float argument and an instruction to store
2423 * the argument into the variable. We then store the list of these arguments
2424 * in cfg->float_args. This list is then used by emit_float_args later to
2425 * pass the arguments in the various call opcodes.
2427 * This is not very nice, and we should really try to fix the allocator.
2430 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2432 /* Make sure the instruction isn't seen as pointless and removed.
2434 float_arg->flags |= MONO_INST_VOLATILE;
2436 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2438 /* We use the dreg to look up the instruction later. The hreg is used to
2439 * emit the instruction that loads the value into the FP reg.
2441 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2442 fad->vreg = float_arg->dreg;
2443 fad->hreg = ainfo->reg;
2445 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2448 call->used_iregs |= 1 << ainfo->reg;
2449 cfg->flags |= MONO_CFG_HAS_FPOUT;
2453 g_assert_not_reached ();
2457 /* Handle the case where there are no implicit arguments */
2458 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2459 emit_sig_cookie (cfg, call, cinfo);
2461 if (cinfo->ret.storage == RegTypeStructByVal) {
2462 /* The JIT will transform this into a normal call */
2463 call->vret_in_reg = TRUE;
2464 } else if (cinfo->vtype_retaddr) {
2466 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2467 vtarg->sreg1 = call->vret_var->dreg;
2468 vtarg->dreg = mono_alloc_preg (cfg);
2469 MONO_ADD_INS (cfg->cbb, vtarg);
2471 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2474 call->stack_usage = cinfo->stack_usage;
2480 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2482 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2483 ArgInfo *ainfo = ins->inst_p1;
2484 int ovf_size = ainfo->vtsize;
2485 int doffset = ainfo->offset;
2486 int struct_size = ainfo->struct_size;
2487 int i, soffset, dreg, tmpreg;
2489 if (ainfo->storage == RegTypeGSharedVtInReg) {
2491 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2494 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2495 /* Pass by addr on stack */
2496 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2501 for (i = 0; i < ainfo->size; ++i) {
2502 dreg = mono_alloc_ireg (cfg);
2503 switch (struct_size) {
2505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2511 tmpreg = mono_alloc_ireg (cfg);
2512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2515 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2518 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2524 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2525 soffset += sizeof (gpointer);
2526 struct_size -= sizeof (gpointer);
2528 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2530 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2534 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2536 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2539 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2542 if (COMPILE_LLVM (cfg)) {
2543 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2545 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2546 ins->sreg1 = val->dreg + 1;
2547 ins->sreg2 = val->dreg + 2;
2548 MONO_ADD_INS (cfg->cbb, ins);
2553 case MONO_ARM_FPU_NONE:
2554 if (ret->type == MONO_TYPE_R8) {
2557 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2558 ins->dreg = cfg->ret->dreg;
2559 ins->sreg1 = val->dreg;
2560 MONO_ADD_INS (cfg->cbb, ins);
2563 if (ret->type == MONO_TYPE_R4) {
2564 /* Already converted to an int in method_to_ir () */
2565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2569 case MONO_ARM_FPU_VFP:
2570 case MONO_ARM_FPU_VFP_HARD:
2571 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2574 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2575 ins->dreg = cfg->ret->dreg;
2576 ins->sreg1 = val->dreg;
2577 MONO_ADD_INS (cfg->cbb, ins);
2582 g_assert_not_reached ();
2586 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2589 #endif /* #ifndef DISABLE_JIT */
2592 mono_arch_is_inst_imm (gint64 imm)
2597 #define DYN_CALL_STACK_ARGS 6
2600 MonoMethodSignature *sig;
2605 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2611 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2615 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2618 switch (cinfo->ret.storage) {
2620 case RegTypeGeneral:
2621 case RegTypeIRegPair:
2622 case RegTypeStructByAddr:
2633 for (i = 0; i < cinfo->nargs; ++i) {
2634 ArgInfo *ainfo = &cinfo->args [i];
2637 switch (ainfo->storage) {
2638 case RegTypeGeneral:
2640 case RegTypeIRegPair:
2643 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2646 case RegTypeStructByVal:
2647 if (ainfo->size == 0)
2648 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2650 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2651 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2659 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2660 for (i = 0; i < sig->param_count; ++i) {
2661 MonoType *t = sig->params [i];
2687 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2689 ArchDynCallInfo *info;
2692 cinfo = get_call_info (NULL, NULL, sig);
2694 if (!dyn_call_supported (cinfo, sig)) {
2699 info = g_new0 (ArchDynCallInfo, 1);
2700 // FIXME: Preprocess the info to speed up start_dyn_call ()
2702 info->cinfo = cinfo;
2704 return (MonoDynCallInfo*)info;
2708 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2710 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2712 g_free (ainfo->cinfo);
2717 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2719 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2720 DynCallArgs *p = (DynCallArgs*)buf;
2721 int arg_index, greg, i, j, pindex;
2722 MonoMethodSignature *sig = dinfo->sig;
2724 g_assert (buf_len >= sizeof (DynCallArgs));
2733 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2734 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2739 if (dinfo->cinfo->vtype_retaddr)
2740 p->regs [greg ++] = (mgreg_t)ret;
2742 for (i = pindex; i < sig->param_count; i++) {
2743 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2744 gpointer *arg = args [arg_index ++];
2745 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2748 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2750 else if (ainfo->storage == RegTypeBase)
2751 slot = PARAM_REGS + (ainfo->offset / 4);
2753 g_assert_not_reached ();
2756 p->regs [slot] = (mgreg_t)*arg;
2761 case MONO_TYPE_STRING:
2762 case MONO_TYPE_CLASS:
2763 case MONO_TYPE_ARRAY:
2764 case MONO_TYPE_SZARRAY:
2765 case MONO_TYPE_OBJECT:
2769 p->regs [slot] = (mgreg_t)*arg;
2771 case MONO_TYPE_BOOLEAN:
2773 p->regs [slot] = *(guint8*)arg;
2776 p->regs [slot] = *(gint8*)arg;
2779 p->regs [slot] = *(gint16*)arg;
2782 case MONO_TYPE_CHAR:
2783 p->regs [slot] = *(guint16*)arg;
2786 p->regs [slot] = *(gint32*)arg;
2789 p->regs [slot] = *(guint32*)arg;
2793 p->regs [slot ++] = (mgreg_t)arg [0];
2794 p->regs [slot] = (mgreg_t)arg [1];
2797 p->regs [slot] = *(mgreg_t*)arg;
2800 p->regs [slot ++] = (mgreg_t)arg [0];
2801 p->regs [slot] = (mgreg_t)arg [1];
2803 case MONO_TYPE_GENERICINST:
2804 if (MONO_TYPE_IS_REFERENCE (t)) {
2805 p->regs [slot] = (mgreg_t)*arg;
2810 case MONO_TYPE_VALUETYPE:
2811 g_assert (ainfo->storage == RegTypeStructByVal);
2813 if (ainfo->size == 0)
2814 slot = PARAM_REGS + (ainfo->offset / 4);
2818 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2819 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2822 g_assert_not_reached ();
2828 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2830 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2831 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2832 guint8 *ret = ((DynCallArgs*)buf)->ret;
2833 mgreg_t res = ((DynCallArgs*)buf)->res;
2834 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2836 switch (mono_type_get_underlying_type (sig->ret)->type) {
2837 case MONO_TYPE_VOID:
2838 *(gpointer*)ret = NULL;
2840 case MONO_TYPE_STRING:
2841 case MONO_TYPE_CLASS:
2842 case MONO_TYPE_ARRAY:
2843 case MONO_TYPE_SZARRAY:
2844 case MONO_TYPE_OBJECT:
2848 *(gpointer*)ret = (gpointer)res;
2854 case MONO_TYPE_BOOLEAN:
2855 *(guint8*)ret = res;
2858 *(gint16*)ret = res;
2861 case MONO_TYPE_CHAR:
2862 *(guint16*)ret = res;
2865 *(gint32*)ret = res;
2868 *(guint32*)ret = res;
2872 /* This handles endianness as well */
2873 ((gint32*)ret) [0] = res;
2874 ((gint32*)ret) [1] = res2;
2876 case MONO_TYPE_GENERICINST:
2877 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2878 *(gpointer*)ret = (gpointer)res;
2883 case MONO_TYPE_VALUETYPE:
2884 g_assert (ainfo->cinfo->vtype_retaddr);
2889 *(float*)ret = *(float*)&res;
2891 case MONO_TYPE_R8: {
2898 *(double*)ret = *(double*)®s;
2902 g_assert_not_reached ();
2909 * Allow tracing to work with this interface (with an optional argument)
2913 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2917 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2918 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2919 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2920 code = emit_call_reg (code, ARMREG_R2);
2933 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2936 int save_mode = SAVE_NONE;
2938 MonoMethod *method = cfg->method;
2939 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2940 int save_offset = cfg->param_area;
2944 offset = code - cfg->native_code;
2945 /* we need about 16 instructions */
2946 if (offset > (cfg->code_size - 16 * 4)) {
2947 cfg->code_size *= 2;
2948 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2949 code = cfg->native_code + offset;
2952 case MONO_TYPE_VOID:
2953 /* special case string .ctor icall */
2954 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2955 save_mode = SAVE_ONE;
2957 save_mode = SAVE_NONE;
2961 save_mode = SAVE_TWO;
2965 save_mode = SAVE_FP;
2967 case MONO_TYPE_VALUETYPE:
2968 save_mode = SAVE_STRUCT;
2971 save_mode = SAVE_ONE;
2975 switch (save_mode) {
2977 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2978 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2979 if (enable_arguments) {
2980 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2981 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2985 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2986 if (enable_arguments) {
2987 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2991 /* FIXME: what reg? */
2992 if (enable_arguments) {
2993 /* FIXME: what reg? */
2997 if (enable_arguments) {
2998 /* FIXME: get the actual address */
2999 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3007 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3008 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3009 code = emit_call_reg (code, ARMREG_IP);
3011 switch (save_mode) {
3013 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3014 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3017 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3031 * The immediate field for cond branches is big enough for all reasonable methods
3033 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3034 if (0 && ins->inst_true_bb->native_offset) { \
3035 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3037 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3038 ARM_B_COND (code, (condcode), 0); \
3041 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3043 /* emit an exception if condition is fail
3045 * We assign the extra code used to throw the implicit exceptions
3046 * to cfg->bb_exit as far as the big branch handling is concerned
3048 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3050 mono_add_patch_info (cfg, code - cfg->native_code, \
3051 MONO_PATCH_INFO_EXC, exc_name); \
3052 ARM_BL_COND (code, (condcode), 0); \
3055 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3058 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3063 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3065 MonoInst *ins, *n, *last_ins = NULL;
3067 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3068 switch (ins->opcode) {
3071 /* Already done by an arch-independent pass */
3073 case OP_LOAD_MEMBASE:
3074 case OP_LOADI4_MEMBASE:
3076 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3077 * OP_LOAD_MEMBASE offset(basereg), reg
3079 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3080 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3081 ins->inst_basereg == last_ins->inst_destbasereg &&
3082 ins->inst_offset == last_ins->inst_offset) {
3083 if (ins->dreg == last_ins->sreg1) {
3084 MONO_DELETE_INS (bb, ins);
3087 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3088 ins->opcode = OP_MOVE;
3089 ins->sreg1 = last_ins->sreg1;
3093 * Note: reg1 must be different from the basereg in the second load
3094 * OP_LOAD_MEMBASE offset(basereg), reg1
3095 * OP_LOAD_MEMBASE offset(basereg), reg2
3097 * OP_LOAD_MEMBASE offset(basereg), reg1
3098 * OP_MOVE reg1, reg2
3100 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3101 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3102 ins->inst_basereg != last_ins->dreg &&
3103 ins->inst_basereg == last_ins->inst_basereg &&
3104 ins->inst_offset == last_ins->inst_offset) {
3106 if (ins->dreg == last_ins->dreg) {
3107 MONO_DELETE_INS (bb, ins);
3110 ins->opcode = OP_MOVE;
3111 ins->sreg1 = last_ins->dreg;
3114 //g_assert_not_reached ();
3118 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3119 * OP_LOAD_MEMBASE offset(basereg), reg
3121 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3122 * OP_ICONST reg, imm
3124 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3125 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3126 ins->inst_basereg == last_ins->inst_destbasereg &&
3127 ins->inst_offset == last_ins->inst_offset) {
3128 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3129 ins->opcode = OP_ICONST;
3130 ins->inst_c0 = last_ins->inst_imm;
3131 g_assert_not_reached (); // check this rule
3135 case OP_LOADU1_MEMBASE:
3136 case OP_LOADI1_MEMBASE:
3137 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3138 ins->inst_basereg == last_ins->inst_destbasereg &&
3139 ins->inst_offset == last_ins->inst_offset) {
3140 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3141 ins->sreg1 = last_ins->sreg1;
3144 case OP_LOADU2_MEMBASE:
3145 case OP_LOADI2_MEMBASE:
3146 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3147 ins->inst_basereg == last_ins->inst_destbasereg &&
3148 ins->inst_offset == last_ins->inst_offset) {
3149 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3150 ins->sreg1 = last_ins->sreg1;
3154 ins->opcode = OP_MOVE;
3158 if (ins->dreg == ins->sreg1) {
3159 MONO_DELETE_INS (bb, ins);
3163 * OP_MOVE sreg, dreg
3164 * OP_MOVE dreg, sreg
3166 if (last_ins && last_ins->opcode == OP_MOVE &&
3167 ins->sreg1 == last_ins->dreg &&
3168 ins->dreg == last_ins->sreg1) {
3169 MONO_DELETE_INS (bb, ins);
3177 bb->last_ins = last_ins;
3181 * the branch_cc_table should maintain the order of these
3195 branch_cc_table [] = {
3209 #define ADD_NEW_INS(cfg,dest,op) do { \
3210 MONO_INST_NEW ((cfg), (dest), (op)); \
3211 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3215 map_to_reg_reg_op (int op)
3224 case OP_COMPARE_IMM:
3226 case OP_ICOMPARE_IMM:
3240 case OP_LOAD_MEMBASE:
3241 return OP_LOAD_MEMINDEX;
3242 case OP_LOADI4_MEMBASE:
3243 return OP_LOADI4_MEMINDEX;
3244 case OP_LOADU4_MEMBASE:
3245 return OP_LOADU4_MEMINDEX;
3246 case OP_LOADU1_MEMBASE:
3247 return OP_LOADU1_MEMINDEX;
3248 case OP_LOADI2_MEMBASE:
3249 return OP_LOADI2_MEMINDEX;
3250 case OP_LOADU2_MEMBASE:
3251 return OP_LOADU2_MEMINDEX;
3252 case OP_LOADI1_MEMBASE:
3253 return OP_LOADI1_MEMINDEX;
3254 case OP_STOREI1_MEMBASE_REG:
3255 return OP_STOREI1_MEMINDEX;
3256 case OP_STOREI2_MEMBASE_REG:
3257 return OP_STOREI2_MEMINDEX;
3258 case OP_STOREI4_MEMBASE_REG:
3259 return OP_STOREI4_MEMINDEX;
3260 case OP_STORE_MEMBASE_REG:
3261 return OP_STORE_MEMINDEX;
3262 case OP_STORER4_MEMBASE_REG:
3263 return OP_STORER4_MEMINDEX;
3264 case OP_STORER8_MEMBASE_REG:
3265 return OP_STORER8_MEMINDEX;
3266 case OP_STORE_MEMBASE_IMM:
3267 return OP_STORE_MEMBASE_REG;
3268 case OP_STOREI1_MEMBASE_IMM:
3269 return OP_STOREI1_MEMBASE_REG;
3270 case OP_STOREI2_MEMBASE_IMM:
3271 return OP_STOREI2_MEMBASE_REG;
3272 case OP_STOREI4_MEMBASE_IMM:
3273 return OP_STOREI4_MEMBASE_REG;
3275 g_assert_not_reached ();
3279 * Remove from the instruction list the instructions that can't be
3280 * represented with very simple instructions with no register
3284 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3286 MonoInst *ins, *temp, *last_ins = NULL;
3287 int rot_amount, imm8, low_imm;
3289 MONO_BB_FOR_EACH_INS (bb, ins) {
3291 switch (ins->opcode) {
3295 case OP_COMPARE_IMM:
3296 case OP_ICOMPARE_IMM:
3310 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3311 ADD_NEW_INS (cfg, temp, OP_ICONST);
3312 temp->inst_c0 = ins->inst_imm;
3313 temp->dreg = mono_alloc_ireg (cfg);
3314 ins->sreg2 = temp->dreg;
3315 ins->opcode = mono_op_imm_to_op (ins->opcode);
3317 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3323 if (ins->inst_imm == 1) {
3324 ins->opcode = OP_MOVE;
3327 if (ins->inst_imm == 0) {
3328 ins->opcode = OP_ICONST;
3332 imm8 = mono_is_power_of_two (ins->inst_imm);
3334 ins->opcode = OP_SHL_IMM;
3335 ins->inst_imm = imm8;
3338 ADD_NEW_INS (cfg, temp, OP_ICONST);
3339 temp->inst_c0 = ins->inst_imm;
3340 temp->dreg = mono_alloc_ireg (cfg);
3341 ins->sreg2 = temp->dreg;
3342 ins->opcode = OP_IMUL;
3348 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3349 /* ARM sets the C flag to 1 if there was _no_ overflow */
3350 ins->next->opcode = OP_COND_EXC_NC;
3353 case OP_IDIV_UN_IMM:
3355 case OP_IREM_UN_IMM:
3356 ADD_NEW_INS (cfg, temp, OP_ICONST);
3357 temp->inst_c0 = ins->inst_imm;
3358 temp->dreg = mono_alloc_ireg (cfg);
3359 ins->sreg2 = temp->dreg;
3360 ins->opcode = mono_op_imm_to_op (ins->opcode);
3362 case OP_LOCALLOC_IMM:
3363 ADD_NEW_INS (cfg, temp, OP_ICONST);
3364 temp->inst_c0 = ins->inst_imm;
3365 temp->dreg = mono_alloc_ireg (cfg);
3366 ins->sreg1 = temp->dreg;
3367 ins->opcode = OP_LOCALLOC;
3369 case OP_LOAD_MEMBASE:
3370 case OP_LOADI4_MEMBASE:
3371 case OP_LOADU4_MEMBASE:
3372 case OP_LOADU1_MEMBASE:
3373 /* we can do two things: load the immed in a register
3374 * and use an indexed load, or see if the immed can be
3375 * represented as an ad_imm + a load with a smaller offset
3376 * that fits. We just do the first for now, optimize later.
3378 if (arm_is_imm12 (ins->inst_offset))
3380 ADD_NEW_INS (cfg, temp, OP_ICONST);
3381 temp->inst_c0 = ins->inst_offset;
3382 temp->dreg = mono_alloc_ireg (cfg);
3383 ins->sreg2 = temp->dreg;
3384 ins->opcode = map_to_reg_reg_op (ins->opcode);
3386 case OP_LOADI2_MEMBASE:
3387 case OP_LOADU2_MEMBASE:
3388 case OP_LOADI1_MEMBASE:
3389 if (arm_is_imm8 (ins->inst_offset))
3391 ADD_NEW_INS (cfg, temp, OP_ICONST);
3392 temp->inst_c0 = ins->inst_offset;
3393 temp->dreg = mono_alloc_ireg (cfg);
3394 ins->sreg2 = temp->dreg;
3395 ins->opcode = map_to_reg_reg_op (ins->opcode);
3397 case OP_LOADR4_MEMBASE:
3398 case OP_LOADR8_MEMBASE:
3399 if (arm_is_fpimm8 (ins->inst_offset))
3401 low_imm = ins->inst_offset & 0x1ff;
3402 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3403 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3404 temp->inst_imm = ins->inst_offset & ~0x1ff;
3405 temp->sreg1 = ins->inst_basereg;
3406 temp->dreg = mono_alloc_ireg (cfg);
3407 ins->inst_basereg = temp->dreg;
3408 ins->inst_offset = low_imm;
3412 ADD_NEW_INS (cfg, temp, OP_ICONST);
3413 temp->inst_c0 = ins->inst_offset;
3414 temp->dreg = mono_alloc_ireg (cfg);
3416 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3417 add_ins->sreg1 = ins->inst_basereg;
3418 add_ins->sreg2 = temp->dreg;
3419 add_ins->dreg = mono_alloc_ireg (cfg);
3421 ins->inst_basereg = add_ins->dreg;
3422 ins->inst_offset = 0;
3425 case OP_STORE_MEMBASE_REG:
3426 case OP_STOREI4_MEMBASE_REG:
3427 case OP_STOREI1_MEMBASE_REG:
3428 if (arm_is_imm12 (ins->inst_offset))
3430 ADD_NEW_INS (cfg, temp, OP_ICONST);
3431 temp->inst_c0 = ins->inst_offset;
3432 temp->dreg = mono_alloc_ireg (cfg);
3433 ins->sreg2 = temp->dreg;
3434 ins->opcode = map_to_reg_reg_op (ins->opcode);
3436 case OP_STOREI2_MEMBASE_REG:
3437 if (arm_is_imm8 (ins->inst_offset))
3439 ADD_NEW_INS (cfg, temp, OP_ICONST);
3440 temp->inst_c0 = ins->inst_offset;
3441 temp->dreg = mono_alloc_ireg (cfg);
3442 ins->sreg2 = temp->dreg;
3443 ins->opcode = map_to_reg_reg_op (ins->opcode);
3445 case OP_STORER4_MEMBASE_REG:
3446 case OP_STORER8_MEMBASE_REG:
3447 if (arm_is_fpimm8 (ins->inst_offset))
3449 low_imm = ins->inst_offset & 0x1ff;
3450 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3451 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3452 temp->inst_imm = ins->inst_offset & ~0x1ff;
3453 temp->sreg1 = ins->inst_destbasereg;
3454 temp->dreg = mono_alloc_ireg (cfg);
3455 ins->inst_destbasereg = temp->dreg;
3456 ins->inst_offset = low_imm;
3460 ADD_NEW_INS (cfg, temp, OP_ICONST);
3461 temp->inst_c0 = ins->inst_offset;
3462 temp->dreg = mono_alloc_ireg (cfg);
3464 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3465 add_ins->sreg1 = ins->inst_destbasereg;
3466 add_ins->sreg2 = temp->dreg;
3467 add_ins->dreg = mono_alloc_ireg (cfg);
3469 ins->inst_destbasereg = add_ins->dreg;
3470 ins->inst_offset = 0;
3473 case OP_STORE_MEMBASE_IMM:
3474 case OP_STOREI1_MEMBASE_IMM:
3475 case OP_STOREI2_MEMBASE_IMM:
3476 case OP_STOREI4_MEMBASE_IMM:
3477 ADD_NEW_INS (cfg, temp, OP_ICONST);
3478 temp->inst_c0 = ins->inst_imm;
3479 temp->dreg = mono_alloc_ireg (cfg);
3480 ins->sreg1 = temp->dreg;
3481 ins->opcode = map_to_reg_reg_op (ins->opcode);
3483 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3485 gboolean swap = FALSE;
3489 /* Optimized away */
3494 /* Some fp compares require swapped operands */
3495 switch (ins->next->opcode) {
3497 ins->next->opcode = OP_FBLT;
3501 ins->next->opcode = OP_FBLT_UN;
3505 ins->next->opcode = OP_FBGE;
3509 ins->next->opcode = OP_FBGE_UN;
3517 ins->sreg1 = ins->sreg2;
3526 bb->last_ins = last_ins;
3527 bb->max_vreg = cfg->next_vreg;
3531 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3535 if (long_ins->opcode == OP_LNEG) {
3537 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3538 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3544 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3546 /* sreg is a float, dreg is an integer reg */
3548 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3550 ARM_TOSIZD (code, vfp_scratch1, sreg);
3552 ARM_TOUIZD (code, vfp_scratch1, sreg);
3553 ARM_FMRS (code, dreg, vfp_scratch1);
3554 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3558 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3559 else if (size == 2) {
3560 ARM_SHL_IMM (code, dreg, dreg, 16);
3561 ARM_SHR_IMM (code, dreg, dreg, 16);
3565 ARM_SHL_IMM (code, dreg, dreg, 24);
3566 ARM_SAR_IMM (code, dreg, dreg, 24);
3567 } else if (size == 2) {
3568 ARM_SHL_IMM (code, dreg, dreg, 16);
3569 ARM_SAR_IMM (code, dreg, dreg, 16);
3575 #endif /* #ifndef DISABLE_JIT */
3579 const guchar *target;
3584 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3587 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3588 PatchData *pdata = (PatchData*)user_data;
3589 guchar *code = data;
3590 guint32 *thunks = data;
3591 guint32 *endthunks = (guint32*)(code + bsize);
3593 int difflow, diffhigh;
3595 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3596 difflow = (char*)pdata->code - (char*)thunks;
3597 diffhigh = (char*)pdata->code - (char*)endthunks;
3598 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3602 * The thunk is composed of 3 words:
3603 * load constant from thunks [2] into ARM_IP
3606 * Note that the LR register is already setup
3608 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3609 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3610 while (thunks < endthunks) {
3611 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3612 if (thunks [2] == (guint32)pdata->target) {
3613 arm_patch (pdata->code, (guchar*)thunks);
3614 mono_arch_flush_icache (pdata->code, 4);
3617 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3618 /* found a free slot instead: emit thunk */
3619 /* ARMREG_IP is fine to use since this can't be an IMT call
3622 code = (guchar*)thunks;
3623 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3624 if (thumb_supported)
3625 ARM_BX (code, ARMREG_IP);
3627 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3628 thunks [2] = (guint32)pdata->target;
3629 mono_arch_flush_icache ((guchar*)thunks, 12);
3631 arm_patch (pdata->code, (guchar*)thunks);
3632 mono_arch_flush_icache (pdata->code, 4);
3636 /* skip 12 bytes, the size of the thunk */
3640 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3646 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3651 domain = mono_domain_get ();
3654 pdata.target = target;
3655 pdata.absolute = absolute;
3659 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3662 if (pdata.found != 1) {
3663 mono_domain_lock (domain);
3664 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3667 /* this uses the first available slot */
3669 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3671 mono_domain_unlock (domain);
3674 if (pdata.found != 1) {
3676 GHashTableIter iter;
3677 MonoJitDynamicMethodInfo *ji;
3680 * This might be a dynamic method, search its code manager. We can only
3681 * use the dynamic method containing CODE, since the others might be freed later.
3685 mono_domain_lock (domain);
3686 hash = domain_jit_info (domain)->dynamic_code_hash;
3688 /* FIXME: Speed this up */
3689 g_hash_table_iter_init (&iter, hash);
3690 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3691 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3692 if (pdata.found == 1)
3696 mono_domain_unlock (domain);
3698 if (pdata.found != 1)
3699 g_print ("thunk failed for %p from %p\n", target, code);
3700 g_assert (pdata.found == 1);
3704 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3706 guint32 *code32 = (void*)code;
3707 guint32 ins = *code32;
3708 guint32 prim = (ins >> 25) & 7;
3709 guint32 tval = GPOINTER_TO_UINT (target);
3711 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3712 if (prim == 5) { /* 101b */
3713 /* the diff starts 8 bytes from the branch opcode */
3714 gint diff = target - code - 8;
3716 gint tmask = 0xffffffff;
3717 if (tval & 1) { /* entering thumb mode */
3718 diff = target - 1 - code - 8;
3719 g_assert (thumb_supported);
3720 tbits = 0xf << 28; /* bl->blx bit pattern */
3721 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3722 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3726 tmask = ~(1 << 24); /* clear the link bit */
3727 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3732 if (diff <= 33554431) {
3734 ins = (ins & 0xff000000) | diff;
3736 *code32 = ins | tbits;
3740 /* diff between 0 and -33554432 */
3741 if (diff >= -33554432) {
3743 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3745 *code32 = ins | tbits;
3750 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3754 #ifdef USE_JUMP_TABLES
3756 gpointer *jte = mono_jumptable_get_entry (code);
3758 jte [0] = (gpointer) target;
3762 * The alternative call sequences looks like this:
3764 * ldr ip, [pc] // loads the address constant
3765 * b 1f // jumps around the constant
3766 * address constant embedded in the code
3771 * There are two cases for patching:
3772 * a) at the end of method emission: in this case code points to the start
3773 * of the call sequence
3774 * b) during runtime patching of the call site: in this case code points
3775 * to the mov pc, ip instruction
3777 * We have to handle also the thunk jump code sequence:
3781 * address constant // execution never reaches here
3783 if ((ins & 0x0ffffff0) == 0x12fff10) {
3784 /* Branch and exchange: the address is constructed in a reg
3785 * We can patch BX when the code sequence is the following:
3786 * ldr ip, [pc, #0] ; 0x8
3793 guint8 *emit = (guint8*)ccode;
3794 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3796 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3797 ARM_BX (emit, ARMREG_IP);
3799 /*patching from magic trampoline*/
3800 if (ins == ccode [3]) {
3801 g_assert (code32 [-4] == ccode [0]);
3802 g_assert (code32 [-3] == ccode [1]);
3803 g_assert (code32 [-1] == ccode [2]);
3804 code32 [-2] = (guint32)target;
3807 /*patching from JIT*/
3808 if (ins == ccode [0]) {
3809 g_assert (code32 [1] == ccode [1]);
3810 g_assert (code32 [3] == ccode [2]);
3811 g_assert (code32 [4] == ccode [3]);
3812 code32 [2] = (guint32)target;
3815 g_assert_not_reached ();
3816 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3824 guint8 *emit = (guint8*)ccode;
3825 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3827 ARM_BLX_REG (emit, ARMREG_IP);
3829 g_assert (code32 [-3] == ccode [0]);
3830 g_assert (code32 [-2] == ccode [1]);
3831 g_assert (code32 [0] == ccode [2]);
3833 code32 [-1] = (guint32)target;
3836 guint32 *tmp = ccode;
3837 guint8 *emit = (guint8*)tmp;
3838 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3839 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3840 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3841 ARM_BX (emit, ARMREG_IP);
3842 if (ins == ccode [2]) {
3843 g_assert_not_reached (); // should be -2 ...
3844 code32 [-1] = (guint32)target;
3847 if (ins == ccode [0]) {
3848 /* handles both thunk jump code and the far call sequence */
3849 code32 [2] = (guint32)target;
3852 g_assert_not_reached ();
3854 // g_print ("patched with 0x%08x\n", ins);
3859 arm_patch (guchar *code, const guchar *target)
3861 arm_patch_general (NULL, code, target, NULL);
3865 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3866 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3867 * to be used with the emit macros.
3868 * Return -1 otherwise.
3871 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3874 for (i = 0; i < 31; i+= 2) {
3875 res = (val << (32 - i)) | (val >> i);
3878 *rot_amount = i? 32 - i: 0;
3885 * Emits in code a sequence of instructions that load the value 'val'
3886 * into the dreg register. Uses at most 4 instructions.
3889 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3891 int imm8, rot_amount;
3893 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3894 /* skip the constant pool */
3900 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3901 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3902 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3903 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3906 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3908 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3912 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3914 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3916 if (val & 0xFF0000) {
3917 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3919 if (val & 0xFF000000) {
3920 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3922 } else if (val & 0xFF00) {
3923 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3924 if (val & 0xFF0000) {
3925 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3927 if (val & 0xFF000000) {
3928 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3930 } else if (val & 0xFF0000) {
3931 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3932 if (val & 0xFF000000) {
3933 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3936 //g_assert_not_reached ();
3942 mono_arm_thumb_supported (void)
3944 return thumb_supported;
3950 * emit_load_volatile_arguments:
3952 * Load volatile arguments from the stack to the original input registers.
3953 * Required before a tail call.
3956 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3958 MonoMethod *method = cfg->method;
3959 MonoMethodSignature *sig;
3964 /* FIXME: Generate intermediate code instead */
3966 sig = mono_method_signature (method);
3968 /* This is the opposite of the code in emit_prolog */
3972 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3974 if (cinfo->vtype_retaddr) {
3975 ArgInfo *ainfo = &cinfo->ret;
3976 inst = cfg->vret_addr;
3977 g_assert (arm_is_imm12 (inst->inst_offset));
3978 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3980 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3981 ArgInfo *ainfo = cinfo->args + i;
3982 inst = cfg->args [pos];
3984 if (cfg->verbose_level > 2)
3985 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3986 if (inst->opcode == OP_REGVAR) {
3987 if (ainfo->storage == RegTypeGeneral)
3988 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3989 else if (ainfo->storage == RegTypeFP) {
3990 g_assert_not_reached ();
3991 } else if (ainfo->storage == RegTypeBase) {
3995 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3996 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3998 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3999 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4003 g_assert_not_reached ();
4005 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4006 switch (ainfo->size) {
4013 g_assert (arm_is_imm12 (inst->inst_offset));
4014 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4015 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4016 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4019 if (arm_is_imm12 (inst->inst_offset)) {
4020 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4022 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4023 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4027 } else if (ainfo->storage == RegTypeBaseGen) {
4030 } else if (ainfo->storage == RegTypeBase) {
4032 } else if (ainfo->storage == RegTypeFP) {
4033 g_assert_not_reached ();
4034 } else if (ainfo->storage == RegTypeStructByVal) {
4035 int doffset = inst->inst_offset;
4039 if (mono_class_from_mono_type (inst->inst_vtype))
4040 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4041 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4042 if (arm_is_imm12 (doffset)) {
4043 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4045 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4046 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4048 soffset += sizeof (gpointer);
4049 doffset += sizeof (gpointer);
4054 } else if (ainfo->storage == RegTypeStructByAddr) {
4069 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4074 guint8 *code = cfg->native_code + cfg->code_len;
4075 MonoInst *last_ins = NULL;
4076 guint last_offset = 0;
4078 int imm8, rot_amount;
4080 /* we don't align basic blocks of loops on arm */
4082 if (cfg->verbose_level > 2)
4083 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4085 cpos = bb->max_offset;
4087 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4088 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4089 //g_assert (!mono_compile_aot);
4092 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4093 /* this is not thread save, but good enough */
4094 /* fixme: howto handle overflows? */
4095 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4098 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4099 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4100 (gpointer)"mono_break");
4101 code = emit_call_seq (cfg, code);
4104 MONO_BB_FOR_EACH_INS (bb, ins) {
4105 offset = code - cfg->native_code;
4107 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4109 if (offset > (cfg->code_size - max_len - 16)) {
4110 cfg->code_size *= 2;
4111 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4112 code = cfg->native_code + offset;
4114 // if (ins->cil_code)
4115 // g_print ("cil code\n");
4116 mono_debug_record_line_number (cfg, ins, offset);
4118 switch (ins->opcode) {
4119 case OP_MEMORY_BARRIER:
4121 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4122 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4126 #ifdef HAVE_AEABI_READ_TP
4127 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4128 (gpointer)"__aeabi_read_tp");
4129 code = emit_call_seq (cfg, code);
4131 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4133 g_assert_not_reached ();
4136 case OP_ATOMIC_EXCHANGE_I4:
4137 case OP_ATOMIC_CAS_I4:
4138 case OP_ATOMIC_ADD_NEW_I4: {
4142 g_assert (v7_supported);
4145 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4147 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4149 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4153 g_assert (cfg->arch.atomic_tmp_offset != -1);
4154 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4156 switch (ins->opcode) {
4157 case OP_ATOMIC_EXCHANGE_I4:
4159 ARM_DMB (code, ARM_DMB_SY);
4160 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4161 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4162 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4164 ARM_B_COND (code, ARMCOND_NE, 0);
4165 arm_patch (buf [1], buf [0]);
4167 case OP_ATOMIC_CAS_I4:
4168 ARM_DMB (code, ARM_DMB_SY);
4170 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4171 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4173 ARM_B_COND (code, ARMCOND_NE, 0);
4174 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4175 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4177 ARM_B_COND (code, ARMCOND_NE, 0);
4178 arm_patch (buf [2], buf [1]);
4179 arm_patch (buf [1], code);
4181 case OP_ATOMIC_ADD_NEW_I4:
4183 ARM_DMB (code, ARM_DMB_SY);
4184 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4185 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4186 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4187 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4189 ARM_B_COND (code, ARMCOND_NE, 0);
4190 arm_patch (buf [1], buf [0]);
4193 g_assert_not_reached ();
4196 ARM_DMB (code, ARM_DMB_SY);
4197 if (tmpreg != ins->dreg)
4198 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4199 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4204 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4205 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4208 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4209 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4211 case OP_STOREI1_MEMBASE_IMM:
4212 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4213 g_assert (arm_is_imm12 (ins->inst_offset));
4214 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4216 case OP_STOREI2_MEMBASE_IMM:
4217 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4218 g_assert (arm_is_imm8 (ins->inst_offset));
4219 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4221 case OP_STORE_MEMBASE_IMM:
4222 case OP_STOREI4_MEMBASE_IMM:
4223 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4224 g_assert (arm_is_imm12 (ins->inst_offset));
4225 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4227 case OP_STOREI1_MEMBASE_REG:
4228 g_assert (arm_is_imm12 (ins->inst_offset));
4229 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4231 case OP_STOREI2_MEMBASE_REG:
4232 g_assert (arm_is_imm8 (ins->inst_offset));
4233 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4235 case OP_STORE_MEMBASE_REG:
4236 case OP_STOREI4_MEMBASE_REG:
4237 /* this case is special, since it happens for spill code after lowering has been called */
4238 if (arm_is_imm12 (ins->inst_offset)) {
4239 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4241 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4242 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4245 case OP_STOREI1_MEMINDEX:
4246 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4248 case OP_STOREI2_MEMINDEX:
4249 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4251 case OP_STORE_MEMINDEX:
4252 case OP_STOREI4_MEMINDEX:
4253 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4256 g_assert_not_reached ();
4258 case OP_LOAD_MEMINDEX:
4259 case OP_LOADI4_MEMINDEX:
4260 case OP_LOADU4_MEMINDEX:
4261 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4263 case OP_LOADI1_MEMINDEX:
4264 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4266 case OP_LOADU1_MEMINDEX:
4267 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4269 case OP_LOADI2_MEMINDEX:
4270 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4272 case OP_LOADU2_MEMINDEX:
4273 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4275 case OP_LOAD_MEMBASE:
4276 case OP_LOADI4_MEMBASE:
4277 case OP_LOADU4_MEMBASE:
4278 /* this case is special, since it happens for spill code after lowering has been called */
4279 if (arm_is_imm12 (ins->inst_offset)) {
4280 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4282 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4283 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4286 case OP_LOADI1_MEMBASE:
4287 g_assert (arm_is_imm8 (ins->inst_offset));
4288 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4290 case OP_LOADU1_MEMBASE:
4291 g_assert (arm_is_imm12 (ins->inst_offset));
4292 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4294 case OP_LOADU2_MEMBASE:
4295 g_assert (arm_is_imm8 (ins->inst_offset));
4296 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4298 case OP_LOADI2_MEMBASE:
4299 g_assert (arm_is_imm8 (ins->inst_offset));
4300 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4302 case OP_ICONV_TO_I1:
4303 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4304 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4306 case OP_ICONV_TO_I2:
4307 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4308 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4310 case OP_ICONV_TO_U1:
4311 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4313 case OP_ICONV_TO_U2:
4314 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4315 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4319 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4321 case OP_COMPARE_IMM:
4322 case OP_ICOMPARE_IMM:
4323 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4324 g_assert (imm8 >= 0);
4325 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4329 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4330 * So instead of emitting a trap, we emit a call a C function and place a
4333 //*(int*)code = 0xef9f0001;
4336 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4337 (gpointer)"mono_break");
4338 code = emit_call_seq (cfg, code);
4340 case OP_RELAXED_NOP:
4345 case OP_DUMMY_STORE:
4346 case OP_DUMMY_ICONST:
4347 case OP_DUMMY_R8CONST:
4348 case OP_NOT_REACHED:
4351 case OP_SEQ_POINT: {
4353 MonoInst *info_var = cfg->arch.seq_point_info_var;
4354 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4355 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4356 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4357 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4359 int dreg = ARMREG_LR;
4361 if (cfg->soft_breakpoints) {
4362 g_assert (!cfg->compile_aot);
4366 * For AOT, we use one got slot per method, which will point to a
4367 * SeqPointInfo structure, containing all the information required
4368 * by the code below.
4370 if (cfg->compile_aot) {
4371 g_assert (info_var);
4372 g_assert (info_var->opcode == OP_REGOFFSET);
4373 g_assert (arm_is_imm12 (info_var->inst_offset));
4376 if (!cfg->soft_breakpoints) {
4378 * Read from the single stepping trigger page. This will cause a
4379 * SIGSEGV when single stepping is enabled.
4380 * We do this _before_ the breakpoint, so single stepping after
4381 * a breakpoint is hit will step to the next IL offset.
4383 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4386 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4387 if (cfg->soft_breakpoints) {
4388 /* Load the address of the sequence point trigger variable. */
4391 g_assert (var->opcode == OP_REGOFFSET);
4392 g_assert (arm_is_imm12 (var->inst_offset));
4393 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4395 /* Read the value and check whether it is non-zero. */
4396 ARM_LDR_IMM (code, dreg, dreg, 0);
4397 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4399 /* Load the address of the sequence point method. */
4400 var = ss_method_var;
4402 g_assert (var->opcode == OP_REGOFFSET);
4403 g_assert (arm_is_imm12 (var->inst_offset));
4404 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4406 /* Call it conditionally. */
4407 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4409 if (cfg->compile_aot) {
4410 /* Load the trigger page addr from the variable initialized in the prolog */
4411 var = ss_trigger_page_var;
4413 g_assert (var->opcode == OP_REGOFFSET);
4414 g_assert (arm_is_imm12 (var->inst_offset));
4415 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4417 #ifdef USE_JUMP_TABLES
4418 gpointer *jte = mono_jumptable_add_entry ();
4419 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4420 jte [0] = ss_trigger_page;
4422 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4424 *(int*)code = (int)ss_trigger_page;
4428 ARM_LDR_IMM (code, dreg, dreg, 0);
4432 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4434 if (cfg->soft_breakpoints) {
4435 /* Load the address of the breakpoint method into ip. */
4436 var = bp_method_var;
4438 g_assert (var->opcode == OP_REGOFFSET);
4439 g_assert (arm_is_imm12 (var->inst_offset));
4440 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4443 * A placeholder for a possible breakpoint inserted by
4444 * mono_arch_set_breakpoint ().
4447 } else if (cfg->compile_aot) {
4448 guint32 offset = code - cfg->native_code;
4451 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4452 /* Add the offset */
4453 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4454 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4455 if (arm_is_imm12 ((int)val)) {
4456 ARM_LDR_IMM (code, dreg, dreg, val);
4458 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4460 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4462 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4463 g_assert (!(val & 0xFF000000));
4465 ARM_LDR_IMM (code, dreg, dreg, 0);
4467 /* What is faster, a branch or a load ? */
4468 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4469 /* The breakpoint instruction */
4470 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4473 * A placeholder for a possible breakpoint inserted by
4474 * mono_arch_set_breakpoint ().
4476 for (i = 0; i < 4; ++i)
4483 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4486 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4490 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4493 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4494 g_assert (imm8 >= 0);
4495 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4499 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4500 g_assert (imm8 >= 0);
4501 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4505 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4506 g_assert (imm8 >= 0);
4507 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4510 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4511 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4513 case OP_IADD_OVF_UN:
4514 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4515 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4518 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4519 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4521 case OP_ISUB_OVF_UN:
4522 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4523 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4525 case OP_ADD_OVF_CARRY:
4526 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4527 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4529 case OP_ADD_OVF_UN_CARRY:
4530 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4531 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4533 case OP_SUB_OVF_CARRY:
4534 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4535 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4537 case OP_SUB_OVF_UN_CARRY:
4538 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4539 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4543 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4546 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4547 g_assert (imm8 >= 0);
4548 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4551 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4555 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4559 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4560 g_assert (imm8 >= 0);
4561 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4565 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4566 g_assert (imm8 >= 0);
4567 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4569 case OP_ARM_RSBS_IMM:
4570 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4571 g_assert (imm8 >= 0);
4572 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4574 case OP_ARM_RSC_IMM:
4575 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4576 g_assert (imm8 >= 0);
4577 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4580 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4584 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4585 g_assert (imm8 >= 0);
4586 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4589 g_assert (v7s_supported);
4590 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4593 g_assert (v7s_supported);
4594 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4597 g_assert (v7s_supported);
4598 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4599 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4602 g_assert (v7s_supported);
4603 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4604 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4608 g_assert_not_reached ();
4610 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4614 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4615 g_assert (imm8 >= 0);
4616 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4619 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4623 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4624 g_assert (imm8 >= 0);
4625 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4628 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4633 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4634 else if (ins->dreg != ins->sreg1)
4635 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4638 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4643 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4644 else if (ins->dreg != ins->sreg1)
4645 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4648 case OP_ISHR_UN_IMM:
4650 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4651 else if (ins->dreg != ins->sreg1)
4652 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4655 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4658 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4661 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4664 if (ins->dreg == ins->sreg2)
4665 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4667 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4670 g_assert_not_reached ();
4673 /* FIXME: handle ovf/ sreg2 != dreg */
4674 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4675 /* FIXME: MUL doesn't set the C/O flags on ARM */
4677 case OP_IMUL_OVF_UN:
4678 /* FIXME: handle ovf/ sreg2 != dreg */
4679 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4680 /* FIXME: MUL doesn't set the C/O flags on ARM */
4683 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4686 /* Load the GOT offset */
4687 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4688 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4690 *(gpointer*)code = NULL;
4692 /* Load the value from the GOT */
4693 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4695 case OP_OBJC_GET_SELECTOR:
4696 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4697 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4699 *(gpointer*)code = NULL;
4701 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4703 case OP_ICONV_TO_I4:
4704 case OP_ICONV_TO_U4:
4706 if (ins->dreg != ins->sreg1)
4707 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4710 int saved = ins->sreg2;
4711 if (ins->sreg2 == ARM_LSW_REG) {
4712 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4715 if (ins->sreg1 != ARM_LSW_REG)
4716 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4717 if (saved != ARM_MSW_REG)
4718 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4723 ARM_CPYD (code, ins->dreg, ins->sreg1);
4725 case OP_FCONV_TO_R4:
4727 ARM_CVTD (code, ins->dreg, ins->sreg1);
4728 ARM_CVTS (code, ins->dreg, ins->dreg);
4733 * Keep in sync with mono_arch_emit_epilog
4735 g_assert (!cfg->method->save_lmf);
4737 code = emit_load_volatile_arguments (cfg, code);
4739 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4741 if (cfg->used_int_regs)
4742 ARM_POP (code, cfg->used_int_regs);
4743 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4745 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4747 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4748 if (cfg->compile_aot) {
4749 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4751 *(gpointer*)code = NULL;
4753 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4755 code = mono_arm_patchable_b (code, ARMCOND_AL);
4759 MonoCallInst *call = (MonoCallInst*)ins;
4762 * The stack looks like the following:
4763 * <caller argument area>
4766 * <callee argument area>
4767 * Need to copy the arguments from the callee argument area to
4768 * the caller argument area, and pop the frame.
4770 if (call->stack_usage) {
4771 int i, prev_sp_offset = 0;
4773 /* Compute size of saved registers restored below */
4775 prev_sp_offset = 2 * 4;
4777 prev_sp_offset = 1 * 4;
4778 for (i = 0; i < 16; ++i) {
4779 if (cfg->used_int_regs & (1 << i))
4780 prev_sp_offset += 4;
4783 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4785 /* Copy arguments on the stack to our argument area */
4786 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4787 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4788 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4793 * Keep in sync with mono_arch_emit_epilog
4795 g_assert (!cfg->method->save_lmf);
4797 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4799 if (cfg->used_int_regs)
4800 ARM_POP (code, cfg->used_int_regs);
4801 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4803 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4806 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4807 if (cfg->compile_aot) {
4808 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4810 *(gpointer*)code = NULL;
4812 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4814 code = mono_arm_patchable_b (code, ARMCOND_AL);
4819 /* ensure ins->sreg1 is not NULL */
4820 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4823 g_assert (cfg->sig_cookie < 128);
4824 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4825 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4834 call = (MonoCallInst*)ins;
4837 code = emit_float_args (cfg, call, code, &max_len, &offset);
4839 if (ins->flags & MONO_INST_HAS_METHOD)
4840 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4842 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4843 code = emit_call_seq (cfg, code);
4844 ins->flags |= MONO_INST_GC_CALLSITE;
4845 ins->backend.pc_offset = code - cfg->native_code;
4846 code = emit_move_return_value (cfg, ins, code);
4852 case OP_VOIDCALL_REG:
4855 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4857 code = emit_call_reg (code, ins->sreg1);
4858 ins->flags |= MONO_INST_GC_CALLSITE;
4859 ins->backend.pc_offset = code - cfg->native_code;
4860 code = emit_move_return_value (cfg, ins, code);
4862 case OP_FCALL_MEMBASE:
4863 case OP_LCALL_MEMBASE:
4864 case OP_VCALL_MEMBASE:
4865 case OP_VCALL2_MEMBASE:
4866 case OP_VOIDCALL_MEMBASE:
4867 case OP_CALL_MEMBASE: {
4868 gboolean imt_arg = FALSE;
4870 g_assert (ins->sreg1 != ARMREG_LR);
4871 call = (MonoCallInst*)ins;
4874 code = emit_float_args (cfg, call, code, &max_len, &offset);
4876 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4878 if (!arm_is_imm12 (ins->inst_offset))
4879 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4880 #ifdef USE_JUMP_TABLES
4886 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4888 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4890 if (!arm_is_imm12 (ins->inst_offset))
4891 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4893 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4896 * We can't embed the method in the code stream in PIC code, or
4898 * Instead, we put it in V5 in code emitted by
4899 * mono_arch_emit_imt_argument (), and embed NULL here to
4900 * signal the IMT thunk that the value is in V5.
4902 #ifdef USE_JUMP_TABLES
4903 /* In case of jumptables we always use value in V5. */
4906 if (call->dynamic_imt_arg)
4907 *((gpointer*)code) = NULL;
4909 *((gpointer*)code) = (gpointer)call->method;
4913 ins->flags |= MONO_INST_GC_CALLSITE;
4914 ins->backend.pc_offset = code - cfg->native_code;
4915 code = emit_move_return_value (cfg, ins, code);
4919 /* keep alignment */
4920 int alloca_waste = cfg->param_area;
4923 /* round the size to 8 bytes */
4924 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4925 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4927 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4928 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4929 /* memzero the area: dreg holds the size, sp is the pointer */
4930 if (ins->flags & MONO_INST_INIT) {
4931 guint8 *start_loop, *branch_to_cond;
4932 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4933 branch_to_cond = code;
4936 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4937 arm_patch (branch_to_cond, code);
4938 /* decrement by 4 and set flags */
4939 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4940 ARM_B_COND (code, ARMCOND_GE, 0);
4941 arm_patch (code - 4, start_loop);
4943 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4948 MonoInst *var = cfg->dyn_call_var;
4950 g_assert (var->opcode == OP_REGOFFSET);
4951 g_assert (arm_is_imm12 (var->inst_offset));
4953 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4954 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4956 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4958 /* Save args buffer */
4959 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4961 /* Set stack slots using R0 as scratch reg */
4962 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4963 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4964 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4965 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4968 /* Set argument registers */
4969 for (i = 0; i < PARAM_REGS; ++i)
4970 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4973 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4974 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4977 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4978 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4979 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4983 if (ins->sreg1 != ARMREG_R0)
4984 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4985 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4986 (gpointer)"mono_arch_throw_exception");
4987 code = emit_call_seq (cfg, code);
4991 if (ins->sreg1 != ARMREG_R0)
4992 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4993 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4994 (gpointer)"mono_arch_rethrow_exception");
4995 code = emit_call_seq (cfg, code);
4998 case OP_START_HANDLER: {
4999 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5002 /* Reserve a param area, see filter-stack.exe */
5003 if (cfg->param_area) {
5004 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5005 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5007 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5008 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5012 if (arm_is_imm12 (spvar->inst_offset)) {
5013 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5015 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5016 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5020 case OP_ENDFILTER: {
5021 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5024 /* Free the param area */
5025 if (cfg->param_area) {
5026 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5027 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5029 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5030 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5034 if (ins->sreg1 != ARMREG_R0)
5035 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5036 if (arm_is_imm12 (spvar->inst_offset)) {
5037 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5039 g_assert (ARMREG_IP != spvar->inst_basereg);
5040 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5041 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5043 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5046 case OP_ENDFINALLY: {
5047 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5050 /* Free the param area */
5051 if (cfg->param_area) {
5052 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5053 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5055 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5056 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5060 if (arm_is_imm12 (spvar->inst_offset)) {
5061 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5063 g_assert (ARMREG_IP != spvar->inst_basereg);
5064 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5065 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5067 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5070 case OP_CALL_HANDLER:
5071 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5072 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5073 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5076 ins->inst_c0 = code - cfg->native_code;
5079 /*if (ins->inst_target_bb->native_offset) {
5081 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5083 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5084 code = mono_arm_patchable_b (code, ARMCOND_AL);
5088 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5092 * In the normal case we have:
5093 * ldr pc, [pc, ins->sreg1 << 2]
5096 * ldr lr, [pc, ins->sreg1 << 2]
5098 * After follows the data.
5099 * FIXME: add aot support.
5101 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5102 #ifdef USE_JUMP_TABLES
5104 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5105 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5106 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5110 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5111 if (offset + max_len > (cfg->code_size - 16)) {
5112 cfg->code_size += max_len;
5113 cfg->code_size *= 2;
5114 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5115 code = cfg->native_code + offset;
5117 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5119 code += 4 * GPOINTER_TO_INT (ins->klass);
5124 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5125 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5129 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5130 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5134 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5135 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5139 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5140 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5144 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5145 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5147 case OP_COND_EXC_EQ:
5148 case OP_COND_EXC_NE_UN:
5149 case OP_COND_EXC_LT:
5150 case OP_COND_EXC_LT_UN:
5151 case OP_COND_EXC_GT:
5152 case OP_COND_EXC_GT_UN:
5153 case OP_COND_EXC_GE:
5154 case OP_COND_EXC_GE_UN:
5155 case OP_COND_EXC_LE:
5156 case OP_COND_EXC_LE_UN:
5157 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5159 case OP_COND_EXC_IEQ:
5160 case OP_COND_EXC_INE_UN:
5161 case OP_COND_EXC_ILT:
5162 case OP_COND_EXC_ILT_UN:
5163 case OP_COND_EXC_IGT:
5164 case OP_COND_EXC_IGT_UN:
5165 case OP_COND_EXC_IGE:
5166 case OP_COND_EXC_IGE_UN:
5167 case OP_COND_EXC_ILE:
5168 case OP_COND_EXC_ILE_UN:
5169 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5172 case OP_COND_EXC_IC:
5173 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5175 case OP_COND_EXC_OV:
5176 case OP_COND_EXC_IOV:
5177 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5179 case OP_COND_EXC_NC:
5180 case OP_COND_EXC_INC:
5181 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5183 case OP_COND_EXC_NO:
5184 case OP_COND_EXC_INO:
5185 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5197 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5200 /* floating point opcodes */
5202 if (cfg->compile_aot) {
5203 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5205 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5207 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5210 /* FIXME: we can optimize the imm load by dealing with part of
5211 * the displacement in LDFD (aligning to 512).
5213 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5214 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5218 if (cfg->compile_aot) {
5219 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5221 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5223 ARM_CVTS (code, ins->dreg, ins->dreg);
5225 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5226 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5227 ARM_CVTS (code, ins->dreg, ins->dreg);
5230 case OP_STORER8_MEMBASE_REG:
5231 /* This is generated by the local regalloc pass which runs after the lowering pass */
5232 if (!arm_is_fpimm8 (ins->inst_offset)) {
5233 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5234 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5235 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5237 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5240 case OP_LOADR8_MEMBASE:
5241 /* This is generated by the local regalloc pass which runs after the lowering pass */
5242 if (!arm_is_fpimm8 (ins->inst_offset)) {
5243 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5244 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5245 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5247 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5250 case OP_STORER4_MEMBASE_REG:
5251 g_assert (arm_is_fpimm8 (ins->inst_offset));
5252 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5253 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5254 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5255 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5257 case OP_LOADR4_MEMBASE:
5258 g_assert (arm_is_fpimm8 (ins->inst_offset));
5259 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5260 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5261 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5262 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5264 case OP_ICONV_TO_R_UN: {
5265 g_assert_not_reached ();
5268 case OP_ICONV_TO_R4:
5269 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5270 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5271 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5272 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5273 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5275 case OP_ICONV_TO_R8:
5276 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5277 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5278 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5279 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5283 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5284 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5286 if (!IS_HARD_FLOAT) {
5287 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5290 if (IS_HARD_FLOAT) {
5291 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5293 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5297 case OP_FCONV_TO_I1:
5298 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5300 case OP_FCONV_TO_U1:
5301 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5303 case OP_FCONV_TO_I2:
5304 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5306 case OP_FCONV_TO_U2:
5307 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5309 case OP_FCONV_TO_I4:
5311 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5313 case OP_FCONV_TO_U4:
5315 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5317 case OP_FCONV_TO_I8:
5318 case OP_FCONV_TO_U8:
5319 g_assert_not_reached ();
5320 /* Implemented as helper calls */
5322 case OP_LCONV_TO_R_UN:
5323 g_assert_not_reached ();
5324 /* Implemented as helper calls */
5326 case OP_LCONV_TO_OVF_I4_2: {
5327 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5329 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5332 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5333 high_bit_not_set = code;
5334 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5336 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5337 valid_negative = code;
5338 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5339 invalid_negative = code;
5340 ARM_B_COND (code, ARMCOND_AL, 0);
5342 arm_patch (high_bit_not_set, code);
5344 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5345 valid_positive = code;
5346 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5348 arm_patch (invalid_negative, code);
5349 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5351 arm_patch (valid_negative, code);
5352 arm_patch (valid_positive, code);
5354 if (ins->dreg != ins->sreg1)
5355 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5359 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5362 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5365 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5368 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5371 ARM_NEGD (code, ins->dreg, ins->sreg1);
5375 g_assert_not_reached ();
5379 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5385 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5388 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5389 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5393 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5396 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5397 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5401 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5404 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5405 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5406 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5410 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5413 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5414 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5418 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5421 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5422 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5423 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5425 /* ARM FPA flags table:
5426 * N Less than ARMCOND_MI
5427 * Z Equal ARMCOND_EQ
5428 * C Greater Than or Equal ARMCOND_CS
5429 * V Unordered ARMCOND_VS
5432 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5435 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5438 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5441 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5442 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5448 g_assert_not_reached ();
5452 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5454 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5455 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5456 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5460 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5461 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5466 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5467 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5469 #ifdef USE_JUMP_TABLES
5471 gpointer *jte = mono_jumptable_add_entries (2);
5472 jte [0] = GUINT_TO_POINTER (0xffffffff);
5473 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5474 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5475 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5478 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5479 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5481 *(guint32*)code = 0xffffffff;
5483 *(guint32*)code = 0x7fefffff;
5486 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5488 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5489 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5491 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5492 ARM_CPYD (code, ins->dreg, ins->sreg1);
5494 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5495 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5500 case OP_GC_LIVENESS_DEF:
5501 case OP_GC_LIVENESS_USE:
5502 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5503 ins->backend.pc_offset = code - cfg->native_code;
5505 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5506 ins->backend.pc_offset = code - cfg->native_code;
5507 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5511 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5512 g_assert_not_reached ();
5515 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5516 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5517 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5518 g_assert_not_reached ();
5524 last_offset = offset;
5527 cfg->code_len = code - cfg->native_code;
5530 #endif /* DISABLE_JIT */
5532 #ifdef HAVE_AEABI_READ_TP
5533 void __aeabi_read_tp (void);
5537 mono_arch_register_lowlevel_calls (void)
5539 /* The signature doesn't matter */
5540 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5541 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5543 #ifndef MONO_CROSS_COMPILE
5544 #ifdef HAVE_AEABI_READ_TP
5545 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5550 #define patch_lis_ori(ip,val) do {\
5551 guint16 *__lis_ori = (guint16*)(ip); \
5552 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5553 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5557 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5559 MonoJumpInfo *patch_info;
5560 gboolean compile_aot = !run_cctors;
5562 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5563 unsigned char *ip = patch_info->ip.i + code;
5564 const unsigned char *target;
5566 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5567 #ifdef USE_JUMP_TABLES
5568 gpointer *jt = mono_jumptable_get_entry (ip);
5570 gpointer *jt = (gpointer*)(ip + 8);
5573 /* jt is the inlined jump table, 2 instructions after ip
5574 * In the normal case we store the absolute addresses,
5575 * otherwise the displacements.
5577 for (i = 0; i < patch_info->data.table->table_size; i++)
5578 jt [i] = code + (int)patch_info->data.table->table [i];
5583 switch (patch_info->type) {
5584 case MONO_PATCH_INFO_BB:
5585 case MONO_PATCH_INFO_LABEL:
5588 /* No need to patch these */
5593 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5595 switch (patch_info->type) {
5596 case MONO_PATCH_INFO_IP:
5597 g_assert_not_reached ();
5598 patch_lis_ori (ip, ip);
5600 case MONO_PATCH_INFO_METHOD_REL:
5601 g_assert_not_reached ();
5602 *((gpointer *)(ip)) = code + patch_info->data.offset;
5604 case MONO_PATCH_INFO_METHODCONST:
5605 case MONO_PATCH_INFO_CLASS:
5606 case MONO_PATCH_INFO_IMAGE:
5607 case MONO_PATCH_INFO_FIELD:
5608 case MONO_PATCH_INFO_VTABLE:
5609 case MONO_PATCH_INFO_IID:
5610 case MONO_PATCH_INFO_SFLDA:
5611 case MONO_PATCH_INFO_LDSTR:
5612 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5613 case MONO_PATCH_INFO_LDTOKEN:
5614 g_assert_not_reached ();
5615 /* from OP_AOTCONST : lis + ori */
5616 patch_lis_ori (ip, target);
5618 case MONO_PATCH_INFO_R4:
5619 case MONO_PATCH_INFO_R8:
5620 g_assert_not_reached ();
5621 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5623 case MONO_PATCH_INFO_EXC_NAME:
5624 g_assert_not_reached ();
5625 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5627 case MONO_PATCH_INFO_NONE:
5628 case MONO_PATCH_INFO_BB_OVF:
5629 case MONO_PATCH_INFO_EXC_OVF:
5630 /* everything is dealt with at epilog output time */
5635 arm_patch_general (domain, ip, target, dyn_code_mp);
5642 * Stack frame layout:
5644 * ------------------- fp
5645 * MonoLMF structure or saved registers
5646 * -------------------
5648 * -------------------
5650 * -------------------
5651 * optional 8 bytes for tracing
5652 * -------------------
5653 * param area size is cfg->param_area
5654 * ------------------- sp
5657 mono_arch_emit_prolog (MonoCompile *cfg)
5659 MonoMethod *method = cfg->method;
5661 MonoMethodSignature *sig;
5663 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5668 int prev_sp_offset, reg_offset;
5670 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5673 sig = mono_method_signature (method);
5674 cfg->code_size = 256 + sig->param_count * 64;
5675 code = cfg->native_code = g_malloc (cfg->code_size);
5677 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5679 alloc_size = cfg->stack_offset;
5685 * The iphone uses R7 as the frame pointer, and it points at the saved
5690 * We can't use r7 as a frame pointer since it points into the middle of
5691 * the frame, so we keep using our own frame pointer.
5692 * FIXME: Optimize this.
5694 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5695 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5696 prev_sp_offset += 8; /* r7 and lr */
5697 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5698 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5701 if (!method->save_lmf) {
5703 /* No need to push LR again */
5704 if (cfg->used_int_regs)
5705 ARM_PUSH (code, cfg->used_int_regs);
5707 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5708 prev_sp_offset += 4;
5710 for (i = 0; i < 16; ++i) {
5711 if (cfg->used_int_regs & (1 << i))
5712 prev_sp_offset += 4;
5714 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5716 for (i = 0; i < 16; ++i) {
5717 if ((cfg->used_int_regs & (1 << i))) {
5718 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5719 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5724 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5725 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5727 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5728 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5731 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5732 ARM_PUSH (code, 0x5ff0);
5733 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5734 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5736 for (i = 0; i < 16; ++i) {
5737 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5738 /* The original r7 is saved at the start */
5739 if (!(iphone_abi && i == ARMREG_R7))
5740 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5744 g_assert (reg_offset == 4 * 10);
5745 pos += sizeof (MonoLMF) - (4 * 10);
5749 orig_alloc_size = alloc_size;
5750 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5751 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5752 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5753 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5756 /* the stack used in the pushed regs */
5757 if (prev_sp_offset & 4)
5759 cfg->stack_usage = alloc_size;
5761 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5762 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5764 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5765 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5767 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5769 if (cfg->frame_reg != ARMREG_SP) {
5770 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5771 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5773 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5774 prev_sp_offset += alloc_size;
5776 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5777 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5779 /* compute max_offset in order to use short forward jumps
5780 * we could skip do it on arm because the immediate displacement
5781 * for jumps is large enough, it may be useful later for constant pools
5784 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5785 MonoInst *ins = bb->code;
5786 bb->max_offset = max_offset;
5788 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5791 MONO_BB_FOR_EACH_INS (bb, ins)
5792 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5795 /* store runtime generic context */
5796 if (cfg->rgctx_var) {
5797 MonoInst *ins = cfg->rgctx_var;
5799 g_assert (ins->opcode == OP_REGOFFSET);
5801 if (arm_is_imm12 (ins->inst_offset)) {
5802 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5804 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5805 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5809 /* load arguments allocated to register from the stack */
5812 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5814 if (cinfo->vtype_retaddr) {
5815 ArgInfo *ainfo = &cinfo->ret;
5816 inst = cfg->vret_addr;
5817 g_assert (arm_is_imm12 (inst->inst_offset));
5818 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5821 if (sig->call_convention == MONO_CALL_VARARG) {
5822 ArgInfo *cookie = &cinfo->sig_cookie;
5824 /* Save the sig cookie address */
5825 g_assert (cookie->storage == RegTypeBase);
5827 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5828 g_assert (arm_is_imm12 (cfg->sig_cookie));
5829 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5830 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5833 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5834 ArgInfo *ainfo = cinfo->args + i;
5835 inst = cfg->args [pos];
5837 if (cfg->verbose_level > 2)
5838 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5839 if (inst->opcode == OP_REGVAR) {
5840 if (ainfo->storage == RegTypeGeneral)
5841 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5842 else if (ainfo->storage == RegTypeFP) {
5843 g_assert_not_reached ();
5844 } else if (ainfo->storage == RegTypeBase) {
5845 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5846 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5848 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5849 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5852 g_assert_not_reached ();
5854 if (cfg->verbose_level > 2)
5855 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5857 /* the argument should be put on the stack: FIXME handle size != word */
5858 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5859 switch (ainfo->size) {
5861 if (arm_is_imm12 (inst->inst_offset))
5862 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5864 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5865 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5869 if (arm_is_imm8 (inst->inst_offset)) {
5870 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5872 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5873 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5877 if (arm_is_imm12 (inst->inst_offset)) {
5878 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5880 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5881 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5883 if (arm_is_imm12 (inst->inst_offset + 4)) {
5884 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5886 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5887 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5891 if (arm_is_imm12 (inst->inst_offset)) {
5892 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5894 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5895 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5899 } else if (ainfo->storage == RegTypeBaseGen) {
5900 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5901 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5903 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5904 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5906 if (arm_is_imm12 (inst->inst_offset + 4)) {
5907 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5908 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5910 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5911 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5912 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5913 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5915 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5916 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5917 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5919 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5920 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5923 switch (ainfo->size) {
5925 if (arm_is_imm8 (inst->inst_offset)) {
5926 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5928 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5929 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5933 if (arm_is_imm8 (inst->inst_offset)) {
5934 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5936 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5937 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5941 if (arm_is_imm12 (inst->inst_offset)) {
5942 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5944 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5945 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5947 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5948 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5950 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5951 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5953 if (arm_is_imm12 (inst->inst_offset + 4)) {
5954 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5956 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5957 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5961 if (arm_is_imm12 (inst->inst_offset)) {
5962 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5964 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5965 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5969 } else if (ainfo->storage == RegTypeFP) {
5970 int imm8, rot_amount;
5972 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
5973 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5974 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
5976 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
5978 if (ainfo->size == 8)
5979 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
5981 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
5982 } else if (ainfo->storage == RegTypeStructByVal) {
5983 int doffset = inst->inst_offset;
5987 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5988 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5989 if (arm_is_imm12 (doffset)) {
5990 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5992 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5993 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5995 soffset += sizeof (gpointer);
5996 doffset += sizeof (gpointer);
5998 if (ainfo->vtsize) {
5999 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6000 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6001 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6003 } else if (ainfo->storage == RegTypeStructByAddr) {
6004 g_assert_not_reached ();
6005 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6006 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6008 g_assert_not_reached ();
6013 if (method->save_lmf)
6014 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6017 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6019 if (cfg->arch.seq_point_info_var) {
6020 MonoInst *ins = cfg->arch.seq_point_info_var;
6022 /* Initialize the variable from a GOT slot */
6023 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6024 #ifdef USE_JUMP_TABLES
6026 gpointer *jte = mono_jumptable_add_entry ();
6027 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6028 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6030 /** XXX: is it correct? */
6032 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6034 *(gpointer*)code = NULL;
6037 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6039 g_assert (ins->opcode == OP_REGOFFSET);
6041 if (arm_is_imm12 (ins->inst_offset)) {
6042 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6044 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6045 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6049 /* Initialize ss_trigger_page_var */
6050 if (!cfg->soft_breakpoints) {
6051 MonoInst *info_var = cfg->arch.seq_point_info_var;
6052 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6053 int dreg = ARMREG_LR;
6056 g_assert (info_var->opcode == OP_REGOFFSET);
6057 g_assert (arm_is_imm12 (info_var->inst_offset));
6059 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6060 /* Load the trigger page addr */
6061 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6062 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6066 if (cfg->arch.seq_point_read_var) {
6067 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6068 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6069 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6070 #ifdef USE_JUMP_TABLES
6073 g_assert (read_ins->opcode == OP_REGOFFSET);
6074 g_assert (arm_is_imm12 (read_ins->inst_offset));
6075 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6076 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6077 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6078 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6080 #ifdef USE_JUMP_TABLES
6081 jte = mono_jumptable_add_entries (3);
6082 jte [0] = (gpointer)&ss_trigger_var;
6083 jte [1] = single_step_func_wrapper;
6084 jte [2] = breakpoint_func_wrapper;
6085 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6087 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6089 *(volatile int **)code = &ss_trigger_var;
6091 *(gpointer*)code = single_step_func_wrapper;
6093 *(gpointer*)code = breakpoint_func_wrapper;
6097 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6098 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6099 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6100 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6101 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6102 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6105 cfg->code_len = code - cfg->native_code;
6106 g_assert (cfg->code_len < cfg->code_size);
6113 mono_arch_emit_epilog (MonoCompile *cfg)
6115 MonoMethod *method = cfg->method;
6116 int pos, i, rot_amount;
6117 int max_epilog_size = 16 + 20*4;
6121 if (cfg->method->save_lmf)
6122 max_epilog_size += 128;
6124 if (mono_jit_trace_calls != NULL)
6125 max_epilog_size += 50;
6127 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6128 max_epilog_size += 50;
6130 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6131 cfg->code_size *= 2;
6132 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6133 cfg->stat_code_reallocs++;
6137 * Keep in sync with OP_JMP
6139 code = cfg->native_code + cfg->code_len;
6141 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6142 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6146 /* Load returned vtypes into registers if needed */
6147 cinfo = cfg->arch.cinfo;
6148 if (cinfo->ret.storage == RegTypeStructByVal) {
6149 MonoInst *ins = cfg->ret;
6151 if (arm_is_imm12 (ins->inst_offset)) {
6152 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6154 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6155 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6159 if (method->save_lmf) {
6160 int lmf_offset, reg, sp_adj, regmask;
6161 /* all but r0-r3, sp and pc */
6162 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6165 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6167 /* This points to r4 inside MonoLMF->iregs */
6168 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6170 regmask = 0x9ff0; /* restore lr to pc */
6171 /* Skip caller saved registers not used by the method */
6172 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6173 regmask &= ~(1 << reg);
6178 /* Restored later */
6179 regmask &= ~(1 << ARMREG_PC);
6180 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6181 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6183 ARM_POP (code, regmask);
6185 /* Restore saved r7, restore LR to PC */
6186 /* Skip lr from the lmf */
6187 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6188 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6191 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6192 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6194 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6195 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6199 /* Restore saved gregs */
6200 if (cfg->used_int_regs)
6201 ARM_POP (code, cfg->used_int_regs);
6202 /* Restore saved r7, restore LR to PC */
6203 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6205 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6209 cfg->code_len = code - cfg->native_code;
6211 g_assert (cfg->code_len < cfg->code_size);
6216 mono_arch_emit_exceptions (MonoCompile *cfg)
6218 MonoJumpInfo *patch_info;
6221 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6222 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6223 int max_epilog_size = 50;
6225 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6226 exc_throw_pos [i] = NULL;
6227 exc_throw_found [i] = 0;
6230 /* count the number of exception infos */
6233 * make sure we have enough space for exceptions
6235 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6236 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6237 i = mini_exception_id_by_name (patch_info->data.target);
6238 if (!exc_throw_found [i]) {
6239 max_epilog_size += 32;
6240 exc_throw_found [i] = TRUE;
6245 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6246 cfg->code_size *= 2;
6247 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6248 cfg->stat_code_reallocs++;
6251 code = cfg->native_code + cfg->code_len;
6253 /* add code to raise exceptions */
6254 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6255 switch (patch_info->type) {
6256 case MONO_PATCH_INFO_EXC: {
6257 MonoClass *exc_class;
6258 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6260 i = mini_exception_id_by_name (patch_info->data.target);
6261 if (exc_throw_pos [i]) {
6262 arm_patch (ip, exc_throw_pos [i]);
6263 patch_info->type = MONO_PATCH_INFO_NONE;
6266 exc_throw_pos [i] = code;
6268 arm_patch (ip, code);
6270 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6271 g_assert (exc_class);
6273 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6274 #ifdef USE_JUMP_TABLES
6276 gpointer *jte = mono_jumptable_add_entries (2);
6277 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6278 patch_info->data.name = "mono_arch_throw_corlib_exception";
6279 patch_info->ip.i = code - cfg->native_code;
6280 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6281 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6282 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6283 ARM_BLX_REG (code, ARMREG_IP);
6284 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6287 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6288 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6289 patch_info->data.name = "mono_arch_throw_corlib_exception";
6290 patch_info->ip.i = code - cfg->native_code;
6292 *(guint32*)(gpointer)code = exc_class->type_token;
6303 cfg->code_len = code - cfg->native_code;
6305 g_assert (cfg->code_len < cfg->code_size);
6309 #endif /* #ifndef DISABLE_JIT */
6312 mono_arch_finish_init (void)
6317 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6322 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6329 mono_arch_print_tree (MonoInst *tree, int arity)
6339 mono_arch_get_patch_offset (guint8 *code)
6346 mono_arch_flush_register_windows (void)
6350 #ifdef MONO_ARCH_HAVE_IMT
6355 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6357 int method_reg = mono_alloc_ireg (cfg);
6358 #ifdef USE_JUMP_TABLES
6359 int use_jumptables = TRUE;
6361 int use_jumptables = FALSE;
6364 if (cfg->compile_aot) {
6367 call->dynamic_imt_arg = TRUE;
6370 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6372 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6373 ins->dreg = method_reg;
6374 ins->inst_p0 = call->method;
6375 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6376 MONO_ADD_INS (cfg->cbb, ins);
6378 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6379 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6380 /* Always pass in a register for simplicity */
6381 call->dynamic_imt_arg = TRUE;
6383 cfg->uses_rgctx_reg = TRUE;
6386 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6390 MONO_INST_NEW (cfg, ins, OP_PCONST);
6391 ins->inst_p0 = call->method;
6392 ins->dreg = method_reg;
6393 MONO_ADD_INS (cfg->cbb, ins);
6396 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6400 #endif /* DISABLE_JIT */
6403 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6405 #ifdef USE_JUMP_TABLES
6406 return (MonoMethod*)regs [ARMREG_V5];
6409 guint32 *code_ptr = (guint32*)code;
6411 method = GUINT_TO_POINTER (code_ptr [1]);
6415 return (MonoMethod*)regs [ARMREG_V5];
6417 /* The IMT value is stored in the code stream right after the LDC instruction. */
6418 /* This is no longer true for the gsharedvt_in trampoline */
6420 if (!IS_LDR_PC (code_ptr [0])) {
6421 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6422 g_assert (IS_LDR_PC (code_ptr [0]));
6426 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6427 return (MonoMethod*)regs [ARMREG_V5];
6429 return (MonoMethod*) method;
6434 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6436 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6439 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6440 #define BASE_SIZE (6 * 4)
6441 #define BSEARCH_ENTRY_SIZE (4 * 4)
6442 #define CMP_SIZE (3 * 4)
6443 #define BRANCH_SIZE (1 * 4)
6444 #define CALL_SIZE (2 * 4)
6445 #define WMC_SIZE (8 * 4)
6446 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6448 #ifdef USE_JUMP_TABLES
6450 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6452 g_assert (base [index] == NULL);
6453 base [index] = value;
6456 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6458 if (arm_is_imm12 (jti * 4)) {
6459 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6461 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6462 if ((jti * 4) >> 16)
6463 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6464 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6470 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6472 guint32 delta = DISTANCE (target, code);
6474 g_assert (delta >= 0 && delta <= 0xFFF);
6475 *target = *target | delta;
6481 #ifdef ENABLE_WRONG_METHOD_CHECK
6483 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6485 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6491 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6492 gpointer fail_tramp)
6495 arminstr_t *code, *start;
6496 #ifdef USE_JUMP_TABLES
6499 gboolean large_offsets = FALSE;
6500 guint32 **constant_pool_starts;
6501 arminstr_t *vtable_target = NULL;
6502 int extra_space = 0;
6504 #ifdef ENABLE_WRONG_METHOD_CHECK
6509 #ifdef USE_JUMP_TABLES
6510 for (i = 0; i < count; ++i) {
6511 MonoIMTCheckItem *item = imt_entries [i];
6512 item->chunk_size += 4 * 16;
6513 if (!item->is_equals)
6514 imt_entries [item->check_target_idx]->compare_done = TRUE;
6515 size += item->chunk_size;
6518 constant_pool_starts = g_new0 (guint32*, count);
6520 for (i = 0; i < count; ++i) {
6521 MonoIMTCheckItem *item = imt_entries [i];
6522 if (item->is_equals) {
6523 gboolean fail_case = !item->check_target_idx && fail_tramp;
6525 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6526 item->chunk_size += 32;
6527 large_offsets = TRUE;
6530 if (item->check_target_idx || fail_case) {
6531 if (!item->compare_done || fail_case)
6532 item->chunk_size += CMP_SIZE;
6533 item->chunk_size += BRANCH_SIZE;
6535 #ifdef ENABLE_WRONG_METHOD_CHECK
6536 item->chunk_size += WMC_SIZE;
6540 item->chunk_size += 16;
6541 large_offsets = TRUE;
6543 item->chunk_size += CALL_SIZE;
6545 item->chunk_size += BSEARCH_ENTRY_SIZE;
6546 imt_entries [item->check_target_idx]->compare_done = TRUE;
6548 size += item->chunk_size;
6552 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6556 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6558 code = mono_domain_code_reserve (domain, size);
6562 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6563 for (i = 0; i < count; ++i) {
6564 MonoIMTCheckItem *item = imt_entries [i];
6565 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6569 #ifdef USE_JUMP_TABLES
6570 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6571 /* If jumptables we always pass the IMT method in R5 */
6572 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6573 #define VTABLE_JTI 0
6574 #define IMT_METHOD_OFFSET 0
6575 #define TARGET_CODE_OFFSET 1
6576 #define JUMP_CODE_OFFSET 2
6577 #define RECORDS_PER_ENTRY 3
6578 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6579 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6580 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6582 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6583 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6584 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6585 set_jumptable_element (jte, VTABLE_JTI, vtable);
6588 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6590 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6591 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6592 vtable_target = code;
6593 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6595 if (mono_use_llvm) {
6596 /* LLVM always passes the IMT method in R5 */
6597 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6599 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6600 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6601 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6605 for (i = 0; i < count; ++i) {
6606 MonoIMTCheckItem *item = imt_entries [i];
6607 #ifdef USE_JUMP_TABLES
6608 guint32 imt_method_jti = 0, target_code_jti = 0;
6610 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6612 gint32 vtable_offset;
6614 item->code_target = (guint8*)code;
6616 if (item->is_equals) {
6617 gboolean fail_case = !item->check_target_idx && fail_tramp;
6619 if (item->check_target_idx || fail_case) {
6620 if (!item->compare_done || fail_case) {
6621 #ifdef USE_JUMP_TABLES
6622 imt_method_jti = IMT_METHOD_JTI (i);
6623 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6626 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6628 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6630 #ifdef USE_JUMP_TABLES
6631 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6632 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6633 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6635 item->jmp_code = (guint8*)code;
6636 ARM_B_COND (code, ARMCOND_NE, 0);
6639 /*Enable the commented code to assert on wrong method*/
6640 #ifdef ENABLE_WRONG_METHOD_CHECK
6641 #ifdef USE_JUMP_TABLES
6642 imt_method_jti = IMT_METHOD_JTI (i);
6643 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6646 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6648 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6650 ARM_B_COND (code, ARMCOND_EQ, 0);
6652 /* Define this if your system is so bad that gdb is failing. */
6653 #ifdef BROKEN_DEV_ENV
6654 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6656 arm_patch (code - 1, mini_dump_bad_imt);
6660 arm_patch (cond, code);
6664 if (item->has_target_code) {
6665 /* Load target address */
6666 #ifdef USE_JUMP_TABLES
6667 target_code_jti = TARGET_CODE_JTI (i);
6668 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6669 /* Restore registers */
6670 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6672 ARM_BX (code, ARMREG_R1);
6673 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6675 target_code_ins = code;
6676 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6677 /* Save it to the fourth slot */
6678 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6679 /* Restore registers and branch */
6680 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6682 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6685 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6686 if (!arm_is_imm12 (vtable_offset)) {
6688 * We need to branch to a computed address but we don't have
6689 * a free register to store it, since IP must contain the
6690 * vtable address. So we push the two values to the stack, and
6691 * load them both using LDM.
6693 /* Compute target address */
6694 #ifdef USE_JUMP_TABLES
6695 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6696 if (vtable_offset >> 16)
6697 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6698 /* IP had vtable base. */
6699 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6700 /* Restore registers and branch */
6701 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6702 ARM_BX (code, ARMREG_IP);
6704 vtable_offset_ins = code;
6705 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6706 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6707 /* Save it to the fourth slot */
6708 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6709 /* Restore registers and branch */
6710 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6712 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6715 #ifdef USE_JUMP_TABLES
6716 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6717 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6718 ARM_BX (code, ARMREG_IP);
6720 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6722 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6723 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6729 #ifdef USE_JUMP_TABLES
6730 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6731 target_code_jti = TARGET_CODE_JTI (i);
6732 /* Load target address */
6733 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6734 /* Restore registers */
6735 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6737 ARM_BX (code, ARMREG_R1);
6738 set_jumptable_element (jte, target_code_jti, fail_tramp);
6740 arm_patch (item->jmp_code, (guchar*)code);
6742 target_code_ins = code;
6743 /* Load target address */
6744 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6745 /* Save it to the fourth slot */
6746 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6747 /* Restore registers and branch */
6748 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6750 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6752 item->jmp_code = NULL;
6755 #ifdef USE_JUMP_TABLES
6757 set_jumptable_element (jte, imt_method_jti, item->key);
6760 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6762 /*must emit after unconditional branch*/
6763 if (vtable_target) {
6764 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6765 item->chunk_size += 4;
6766 vtable_target = NULL;
6769 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6770 constant_pool_starts [i] = code;
6772 code += extra_space;
6777 #ifdef USE_JUMP_TABLES
6778 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6779 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6780 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6781 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6782 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6784 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6785 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6787 item->jmp_code = (guint8*)code;
6788 ARM_B_COND (code, ARMCOND_HS, 0);
6794 for (i = 0; i < count; ++i) {
6795 MonoIMTCheckItem *item = imt_entries [i];
6796 if (item->jmp_code) {
6797 if (item->check_target_idx)
6798 #ifdef USE_JUMP_TABLES
6799 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6801 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6804 if (i > 0 && item->is_equals) {
6806 #ifdef USE_JUMP_TABLES
6807 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6808 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6810 arminstr_t *space_start = constant_pool_starts [i];
6811 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6812 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6820 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6821 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6826 #ifndef USE_JUMP_TABLES
6827 g_free (constant_pool_starts);
6830 mono_arch_flush_icache ((guint8*)start, size);
6831 mono_stats.imt_thunks_size += code - start;
6833 g_assert (DISTANCE (start, code) <= size);
6840 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6842 return ctx->regs [reg];
6846 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6848 ctx->regs [reg] = val;
6852 * mono_arch_get_trampolines:
6854 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6858 mono_arch_get_trampolines (gboolean aot)
6860 return mono_arm_get_exception_trampolines (aot);
6864 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6866 * mono_arch_set_breakpoint:
6868 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6869 * The location should contain code emitted by OP_SEQ_POINT.
6872 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6875 guint32 native_offset = ip - (guint8*)ji->code_start;
6876 MonoDebugOptions *opt = mini_get_debug_options ();
6878 if (opt->soft_breakpoints) {
6879 g_assert (!ji->from_aot);
6881 ARM_BLX_REG (code, ARMREG_LR);
6882 mono_arch_flush_icache (code - 4, 4);
6883 } else if (ji->from_aot) {
6884 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6886 g_assert (native_offset % 4 == 0);
6887 g_assert (info->bp_addrs [native_offset / 4] == 0);
6888 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6890 int dreg = ARMREG_LR;
6892 /* Read from another trigger page */
6893 #ifdef USE_JUMP_TABLES
6894 gpointer *jte = mono_jumptable_add_entry ();
6895 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6896 jte [0] = bp_trigger_page;
6898 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6900 *(int*)code = (int)bp_trigger_page;
6903 ARM_LDR_IMM (code, dreg, dreg, 0);
6905 mono_arch_flush_icache (code - 16, 16);
6908 /* This is currently implemented by emitting an SWI instruction, which
6909 * qemu/linux seems to convert to a SIGILL.
6911 *(int*)code = (0xef << 24) | 8;
6913 mono_arch_flush_icache (code - 4, 4);
6919 * mono_arch_clear_breakpoint:
6921 * Clear the breakpoint at IP.
6924 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6926 MonoDebugOptions *opt = mini_get_debug_options ();
6930 if (opt->soft_breakpoints) {
6931 g_assert (!ji->from_aot);
6934 mono_arch_flush_icache (code - 4, 4);
6935 } else if (ji->from_aot) {
6936 guint32 native_offset = ip - (guint8*)ji->code_start;
6937 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6939 g_assert (native_offset % 4 == 0);
6940 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6941 info->bp_addrs [native_offset / 4] = 0;
6943 for (i = 0; i < 4; ++i)
6946 mono_arch_flush_icache (ip, code - ip);
6951 * mono_arch_start_single_stepping:
6953 * Start single stepping.
6956 mono_arch_start_single_stepping (void)
6958 if (ss_trigger_page)
6959 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6965 * mono_arch_stop_single_stepping:
6967 * Stop single stepping.
6970 mono_arch_stop_single_stepping (void)
6972 if (ss_trigger_page)
6973 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6979 #define DBG_SIGNAL SIGBUS
6981 #define DBG_SIGNAL SIGSEGV
6985 * mono_arch_is_single_step_event:
6987 * Return whenever the machine state in SIGCTX corresponds to a single
6991 mono_arch_is_single_step_event (void *info, void *sigctx)
6993 siginfo_t *sinfo = info;
6995 if (!ss_trigger_page)
6998 /* Sometimes the address is off by 4 */
6999 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7006 * mono_arch_is_breakpoint_event:
7008 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7011 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7013 siginfo_t *sinfo = info;
7015 if (!ss_trigger_page)
7018 if (sinfo->si_signo == DBG_SIGNAL) {
7019 /* Sometimes the address is off by 4 */
7020 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7030 * mono_arch_skip_breakpoint:
7032 * See mini-amd64.c for docs.
7035 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7037 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7041 * mono_arch_skip_single_step:
7043 * See mini-amd64.c for docs.
7046 mono_arch_skip_single_step (MonoContext *ctx)
7048 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7051 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7054 * mono_arch_get_seq_point_info:
7056 * See mini-amd64.c for docs.
7059 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7064 // FIXME: Add a free function
7066 mono_domain_lock (domain);
7067 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7069 mono_domain_unlock (domain);
7072 ji = mono_jit_info_table_find (domain, (char*)code);
7075 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7077 info->ss_trigger_page = ss_trigger_page;
7078 info->bp_trigger_page = bp_trigger_page;
7080 mono_domain_lock (domain);
7081 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7083 mono_domain_unlock (domain);
7090 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7092 ext->lmf.previous_lmf = prev_lmf;
7093 /* Mark that this is a MonoLMFExt */
7094 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7095 ext->lmf.sp = (gssize)ext;
7099 * mono_arch_set_target:
7101 * Set the target architecture the JIT backend should generate code for, in the form
7102 * of a GNU target triplet. Only used in AOT mode.
7105 mono_arch_set_target (char *mtriple)
7107 /* The GNU target triple format is not very well documented */
7108 if (strstr (mtriple, "armv7")) {
7109 v5_supported = TRUE;
7110 v6_supported = TRUE;
7111 v7_supported = TRUE;
7113 if (strstr (mtriple, "armv6")) {
7114 v5_supported = TRUE;
7115 v6_supported = TRUE;
7117 if (strstr (mtriple, "armv7s")) {
7118 v7s_supported = TRUE;
7120 if (strstr (mtriple, "thumbv7s")) {
7121 v5_supported = TRUE;
7122 v6_supported = TRUE;
7123 v7_supported = TRUE;
7124 v7s_supported = TRUE;
7125 thumb_supported = TRUE;
7126 thumb2_supported = TRUE;
7128 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7129 v5_supported = TRUE;
7130 v6_supported = TRUE;
7131 thumb_supported = TRUE;
7134 if (strstr (mtriple, "gnueabi"))
7135 eabi_supported = TRUE;
7139 mono_arch_opcode_supported (int opcode)
7142 case OP_ATOMIC_EXCHANGE_I4:
7143 case OP_ATOMIC_CAS_I4:
7144 case OP_ATOMIC_ADD_NEW_I4:
7145 return v7_supported;
7151 #if defined(ENABLE_GSHAREDVT)
7153 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7155 #endif /* !MONOTOUCH */