2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 /* This mutex protects architecture specific caches */
90 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
91 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
92 static CRITICAL_SECTION mini_arch_mutex;
94 static gboolean v5_supported = FALSE;
95 static gboolean v6_supported = FALSE;
96 static gboolean v7_supported = FALSE;
97 static gboolean v7s_supported = FALSE;
98 static gboolean thumb_supported = FALSE;
99 static gboolean thumb2_supported = FALSE;
101 * Whenever to use the ARM EABI
103 static gboolean eabi_supported = FALSE;
106 * Whenever to use the iphone ABI extensions:
107 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
108 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
109 * This is required for debugging/profiling tools to work, but it has some overhead so it should
110 * only be turned on in debug builds.
112 static gboolean iphone_abi = FALSE;
115 * The FPU we are generating code for. This is NOT runtime configurable right now,
116 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
118 static MonoArmFPU arm_fpu;
120 #if defined(ARM_FPU_VFP_HARD)
122 * On armhf, d0-d7 are used for argument passing and d8-d15
123 * must be preserved across calls, which leaves us no room
124 * for scratch registers. So we use d14-d15 but back up their
125 * previous contents to a stack slot before using them - see
126 * mono_arm_emit_vfp_scratch_save/_restore ().
128 static int vfp_scratch1 = ARM_VFP_D14;
129 static int vfp_scratch2 = ARM_VFP_D15;
132 * On armel, d0-d7 do not need to be preserved, so we can
133 * freely make use of them as scratch registers.
135 static int vfp_scratch1 = ARM_VFP_D0;
136 static int vfp_scratch2 = ARM_VFP_D1;
141 static volatile int ss_trigger_var = 0;
143 static gpointer single_step_func_wrapper;
144 static gpointer breakpoint_func_wrapper;
147 * The code generated for sequence points reads from this location, which is
148 * made read-only when single stepping is enabled.
150 static gpointer ss_trigger_page;
152 /* Enabled breakpoints read from this trigger page */
153 static gpointer bp_trigger_page;
155 /* Structure used by the sequence points in AOTed code */
157 gpointer ss_trigger_page;
158 gpointer bp_trigger_page;
159 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
164 * floating point support: on ARM it is a mess, there are at least 3
165 * different setups, each of which binary incompat with the other.
166 * 1) FPA: old and ugly, but unfortunately what current distros use
167 * the double binary format has the two words swapped. 8 double registers.
168 * Implemented usually by kernel emulation.
169 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
170 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
171 * 3) VFP: the new and actually sensible and useful FP support. Implemented
172 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
174 * We do not care about FPA. We will support soft float and VFP.
176 int mono_exc_esp_offset = 0;
178 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
179 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
180 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
182 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
183 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
184 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
186 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
187 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
188 //#define DEBUG_IMT 0
190 /* A variant of ARM_LDR_IMM which can handle large offsets */
191 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
192 if (arm_is_imm12 ((offset))) { \
193 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
195 g_assert ((scratch_reg) != (basereg)); \
196 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
197 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
201 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
202 if (arm_is_imm12 ((offset))) { \
203 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
205 g_assert ((scratch_reg) != (basereg)); \
206 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
207 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
211 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
214 mono_arch_regname (int reg)
216 static const char * rnames[] = {
217 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
218 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
219 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
222 if (reg >= 0 && reg < 16)
228 mono_arch_fregname (int reg)
230 static const char * rnames[] = {
231 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
232 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
233 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
234 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
235 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
236 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
239 if (reg >= 0 && reg < 32)
247 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
249 int imm8, rot_amount;
250 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
251 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
254 g_assert (dreg != sreg);
255 code = mono_arm_emit_load_imm (code, dreg, imm);
256 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
261 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
263 /* we can use r0-r3, since this is called only for incoming args on the stack */
264 if (size > sizeof (gpointer) * 4) {
266 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
267 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
268 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
269 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
270 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
271 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
272 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
273 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
274 ARM_B_COND (code, ARMCOND_NE, 0);
275 arm_patch (code - 4, start_loop);
278 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
279 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
281 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
282 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
288 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
289 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
290 doffset = soffset = 0;
292 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
293 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
299 g_assert (size == 0);
304 emit_call_reg (guint8 *code, int reg)
307 ARM_BLX_REG (code, reg);
309 #ifdef USE_JUMP_TABLES
310 g_assert_not_reached ();
312 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
316 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
322 emit_call_seq (MonoCompile *cfg, guint8 *code)
324 #ifdef USE_JUMP_TABLES
325 code = mono_arm_patchable_bl (code, ARMCOND_AL);
327 if (cfg->method->dynamic) {
328 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
330 *(gpointer*)code = NULL;
332 code = emit_call_reg (code, ARMREG_IP);
341 mono_arm_patchable_b (guint8 *code, int cond)
343 #ifdef USE_JUMP_TABLES
346 jte = mono_jumptable_add_entry ();
347 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
348 ARM_BX_COND (code, cond, ARMREG_IP);
350 ARM_B_COND (code, cond, 0);
356 mono_arm_patchable_bl (guint8 *code, int cond)
358 #ifdef USE_JUMP_TABLES
361 jte = mono_jumptable_add_entry ();
362 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
363 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
365 ARM_BL_COND (code, cond, 0);
370 #ifdef USE_JUMP_TABLES
372 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
374 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
375 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
380 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
382 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
383 ARM_LDR_IMM (code, reg, reg, 0);
389 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
391 switch (ins->opcode) {
394 case OP_FCALL_MEMBASE:
396 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
398 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
400 ARM_FMSR (code, ins->dreg, ARMREG_R0);
401 ARM_CVTS (code, ins->dreg, ins->dreg);
405 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
407 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
420 * Emit code to push an LMF structure on the LMF stack.
421 * On arm, this is intermixed with the initialization of other fields of the structure.
424 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
426 gboolean get_lmf_fast = FALSE;
429 #ifdef HAVE_AEABI_READ_TP
430 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
432 if (lmf_addr_tls_offset != -1) {
435 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
436 (gpointer)"__aeabi_read_tp");
437 code = emit_call_seq (cfg, code);
439 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
445 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
448 /* Inline mono_get_lmf_addr () */
449 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
451 /* Load mono_jit_tls_id */
453 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
454 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
456 *(gpointer*)code = NULL;
458 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
459 /* call pthread_getspecific () */
460 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
461 (gpointer)"pthread_getspecific");
462 code = emit_call_seq (cfg, code);
463 /* lmf_addr = &jit_tls->lmf */
464 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
465 g_assert (arm_is_imm8 (lmf_offset));
466 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
473 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
474 (gpointer)"mono_get_lmf_addr");
475 code = emit_call_seq (cfg, code);
477 /* we build the MonoLMF structure on the stack - see mini-arm.h */
478 /* lmf_offset is the offset from the previous stack pointer,
479 * alloc_size is the total stack space allocated, so the offset
480 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
481 * The pointer to the struct is put in r1 (new_lmf).
482 * ip is used as scratch
483 * The callee-saved registers are already in the MonoLMF structure
485 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
486 /* r0 is the result from mono_get_lmf_addr () */
487 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
488 /* new_lmf->previous_lmf = *lmf_addr */
489 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
491 /* *(lmf_addr) = r1 */
492 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
493 /* Skip method (only needed for trampoline LMF frames) */
494 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
495 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
496 /* save the current IP */
497 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
498 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
500 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
501 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
512 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
516 for (list = inst->float_args; list; list = list->next) {
517 FloatArgData *fad = list->data;
518 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
519 gboolean imm = arm_is_fpimm8 (var->inst_offset);
521 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
527 if (*offset + *max_len > cfg->code_size) {
528 cfg->code_size += *max_len;
529 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
531 code = cfg->native_code + *offset;
535 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
536 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
538 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
540 *offset = code - cfg->native_code;
547 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
551 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
553 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
556 if (!arm_is_fpimm8 (inst->inst_offset)) {
557 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
558 ARM_FSTD (code, reg, ARMREG_LR, 0);
560 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
567 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
571 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
573 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
576 if (!arm_is_fpimm8 (inst->inst_offset)) {
577 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
578 ARM_FLDD (code, reg, ARMREG_LR, 0);
580 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
589 * Emit code to pop an LMF structure from the LMF stack.
592 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
596 if (lmf_offset < 32) {
597 basereg = cfg->frame_reg;
602 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
605 /* ip = previous_lmf */
606 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
608 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
609 /* *(lmf_addr) = previous_lmf */
610 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
615 #endif /* #ifndef DISABLE_JIT */
618 * mono_arch_get_argument_info:
619 * @csig: a method signature
620 * @param_count: the number of parameters to consider
621 * @arg_info: an array to store the result infos
623 * Gathers information on parameters such as size, alignment and
624 * padding. arg_info should be large enought to hold param_count + 1 entries.
626 * Returns the size of the activation frame.
629 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
631 int k, frame_size = 0;
632 guint32 size, align, pad;
636 t = mini_type_get_underlying_type (gsctx, csig->ret);
637 if (MONO_TYPE_ISSTRUCT (t)) {
638 frame_size += sizeof (gpointer);
642 arg_info [0].offset = offset;
645 frame_size += sizeof (gpointer);
649 arg_info [0].size = frame_size;
651 for (k = 0; k < param_count; k++) {
652 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
654 /* ignore alignment for now */
657 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
658 arg_info [k].pad = pad;
660 arg_info [k + 1].pad = 0;
661 arg_info [k + 1].size = size;
663 arg_info [k + 1].offset = offset;
667 align = MONO_ARCH_FRAME_ALIGNMENT;
668 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
669 arg_info [k].pad = pad;
674 #define MAX_ARCH_DELEGATE_PARAMS 3
677 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
679 guint8 *code, *start;
682 start = code = mono_global_codeman_reserve (12);
684 /* Replace the this argument with the target */
685 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
686 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
687 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
689 g_assert ((code - start) <= 12);
691 mono_arch_flush_icache (start, 12);
695 size = 8 + param_count * 4;
696 start = code = mono_global_codeman_reserve (size);
698 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
699 /* slide down the arguments */
700 for (i = 0; i < param_count; ++i) {
701 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
703 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
705 g_assert ((code - start) <= size);
707 mono_arch_flush_icache (start, size);
711 *code_size = code - start;
717 * mono_arch_get_delegate_invoke_impls:
719 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
723 mono_arch_get_delegate_invoke_impls (void)
731 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
732 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
734 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
735 code = get_delegate_invoke_impl (FALSE, i, &code_len);
736 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
737 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
745 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
747 guint8 *code, *start;
749 /* FIXME: Support more cases */
750 if (MONO_TYPE_ISSTRUCT (sig->ret))
754 static guint8* cached = NULL;
755 mono_mini_arch_lock ();
757 mono_mini_arch_unlock ();
762 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
764 start = get_delegate_invoke_impl (TRUE, 0, NULL);
766 mono_mini_arch_unlock ();
769 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
772 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
774 for (i = 0; i < sig->param_count; ++i)
775 if (!mono_is_regsize_var (sig->params [i]))
778 mono_mini_arch_lock ();
779 code = cache [sig->param_count];
781 mono_mini_arch_unlock ();
786 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
787 start = mono_aot_get_trampoline (name);
790 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
792 cache [sig->param_count] = start;
793 mono_mini_arch_unlock ();
801 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
803 return (gpointer)regs [ARMREG_R0];
807 * Initialize the cpu to execute managed code.
810 mono_arch_cpu_init (void)
812 #if defined(__APPLE__)
815 i8_align = __alignof__ (gint64);
820 create_function_wrapper (gpointer function)
822 guint8 *start, *code;
824 start = code = mono_global_codeman_reserve (96);
827 * Construct the MonoContext structure on the stack.
830 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
832 /* save ip, lr and pc into their correspodings ctx.regs slots. */
833 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
834 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
835 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
837 /* save r0..r10 and fp */
838 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
839 ARM_STM (code, ARMREG_IP, 0x0fff);
841 /* now we can update fp. */
842 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
844 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
845 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
846 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
847 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
849 /* make ctx.eip hold the address of the call. */
850 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
851 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
853 /* r0 now points to the MonoContext */
854 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
857 #ifdef USE_JUMP_TABLES
859 gpointer *jte = mono_jumptable_add_entry ();
860 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
864 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
866 *(gpointer*)code = function;
869 ARM_BLX_REG (code, ARMREG_IP);
871 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
872 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
873 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
874 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
876 /* make ip point to the regs array, then restore everything, including pc. */
877 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
878 ARM_LDM (code, ARMREG_IP, 0xffff);
880 mono_arch_flush_icache (start, code - start);
886 * Initialize architecture specific code.
889 mono_arch_init (void)
891 const char *cpu_arch;
893 InitializeCriticalSection (&mini_arch_mutex);
894 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
895 if (mini_get_debug_options ()->soft_breakpoints) {
896 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
897 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
902 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
903 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
904 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
907 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
908 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
909 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
910 #if defined(ENABLE_GSHAREDVT)
911 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
914 #if defined(__ARM_EABI__)
915 eabi_supported = TRUE;
918 #if defined(ARM_FPU_VFP_HARD)
919 arm_fpu = MONO_ARM_FPU_VFP_HARD;
921 arm_fpu = MONO_ARM_FPU_VFP;
923 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
924 /* If we're compiling with a soft float fallback and it
925 turns out that no VFP unit is available, we need to
926 switch to soft float. We don't do this for iOS, since
927 iOS devices always have a VFP unit. */
928 if (!mono_hwcap_arm_has_vfp)
929 arm_fpu = MONO_ARM_FPU_NONE;
933 v5_supported = mono_hwcap_arm_is_v5;
934 v6_supported = mono_hwcap_arm_is_v6;
935 v7_supported = mono_hwcap_arm_is_v7;
936 v7s_supported = mono_hwcap_arm_is_v7s;
938 #if defined(__APPLE__)
939 /* iOS is special-cased here because we don't yet
940 have a way to properly detect CPU features on it. */
941 thumb_supported = TRUE;
944 thumb_supported = mono_hwcap_arm_has_thumb;
945 thumb2_supported = mono_hwcap_arm_has_thumb2;
948 /* Format: armv(5|6|7[s])[-thumb[2]] */
949 cpu_arch = g_getenv ("MONO_CPU_ARCH");
951 /* Do this here so it overrides any detection. */
953 if (strncmp (cpu_arch, "armv", 4) == 0) {
954 v5_supported = cpu_arch [4] >= '5';
955 v6_supported = cpu_arch [4] >= '6';
956 v7_supported = cpu_arch [4] >= '7';
957 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
960 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
961 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
966 * Cleanup architecture specific code.
969 mono_arch_cleanup (void)
974 * This function returns the optimizations supported on this cpu.
977 mono_arch_cpu_optimizations (guint32 *exclude_mask)
979 /* no arm-specific optimizations yet */
985 * This function test for all SIMD functions supported.
987 * Returns a bitmask corresponding to all supported versions.
991 mono_arch_cpu_enumerate_simd_versions (void)
993 /* SIMD is currently unimplemented */
1001 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1003 if (v7s_supported) {
1017 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1019 mono_arch_is_soft_float (void)
1021 return arm_fpu == MONO_ARM_FPU_NONE;
1026 mono_arm_is_hard_float (void)
1028 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1032 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1035 t = mini_type_get_underlying_type (gsctx, t);
1042 case MONO_TYPE_FNPTR:
1044 case MONO_TYPE_OBJECT:
1045 case MONO_TYPE_STRING:
1046 case MONO_TYPE_CLASS:
1047 case MONO_TYPE_SZARRAY:
1048 case MONO_TYPE_ARRAY:
1050 case MONO_TYPE_GENERICINST:
1051 if (!mono_type_generic_inst_is_valuetype (t))
1054 case MONO_TYPE_VALUETYPE:
1061 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1066 for (i = 0; i < cfg->num_varinfo; i++) {
1067 MonoInst *ins = cfg->varinfo [i];
1068 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1071 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1074 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1077 /* we can only allocate 32 bit values */
1078 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1079 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1080 g_assert (i == vmv->idx);
1081 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1088 #define USE_EXTRA_TEMPS 0
1091 mono_arch_get_global_int_regs (MonoCompile *cfg)
1095 mono_arch_compute_omit_fp (cfg);
1098 * FIXME: Interface calls might go through a static rgctx trampoline which
1099 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1102 if (cfg->flags & MONO_CFG_HAS_CALLS)
1103 cfg->uses_rgctx_reg = TRUE;
1105 if (cfg->arch.omit_fp)
1106 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1107 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1108 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1109 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1111 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1112 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1114 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1115 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1116 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1118 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1119 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1125 * mono_arch_regalloc_cost:
1127 * Return the cost, in number of memory references, of the action of
1128 * allocating the variable VMV into a register during global register
1132 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1138 #endif /* #ifndef DISABLE_JIT */
1140 #ifndef __GNUC_PREREQ
1141 #define __GNUC_PREREQ(maj, min) (0)
1145 mono_arch_flush_icache (guint8 *code, gint size)
1147 #if defined(__native_client__)
1148 // For Native Client we don't have to flush i-cache here,
1149 // as it's being done by dyncode interface.
1152 #ifdef MONO_CROSS_COMPILE
1154 sys_icache_invalidate (code, size);
1155 #elif __GNUC_PREREQ(4, 1)
1156 __clear_cache (code, code + size);
1157 #elif defined(PLATFORM_ANDROID)
1158 const int syscall = 0xf0002;
1166 : "r" (code), "r" (code + size), "r" (syscall)
1167 : "r0", "r1", "r7", "r2"
1170 __asm __volatile ("mov r0, %0\n"
1173 "swi 0x9f0002 @ sys_cacheflush"
1175 : "r" (code), "r" (code + size), "r" (0)
1176 : "r0", "r1", "r3" );
1178 #endif /* !__native_client__ */
1189 RegTypeStructByAddr,
1190 /* gsharedvt argument passed by addr in greg */
1191 RegTypeGSharedVtInReg,
1192 /* gsharedvt argument passed by addr on stack */
1193 RegTypeGSharedVtOnStack,
1198 guint16 vtsize; /* in param area */
1202 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1207 guint32 stack_usage;
1208 gboolean vtype_retaddr;
1209 /* The index of the vret arg in the argument list */
1219 /*#define __alignof__(a) sizeof(a)*/
1220 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1223 #define PARAM_REGS 4
1226 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1229 if (*gr > ARMREG_R3) {
1231 ainfo->offset = *stack_size;
1232 ainfo->reg = ARMREG_SP; /* in the caller */
1233 ainfo->storage = RegTypeBase;
1236 ainfo->storage = RegTypeGeneral;
1243 split = i8_align == 4;
1248 if (*gr == ARMREG_R3 && split) {
1249 /* first word in r3 and the second on the stack */
1250 ainfo->offset = *stack_size;
1251 ainfo->reg = ARMREG_SP; /* in the caller */
1252 ainfo->storage = RegTypeBaseGen;
1254 } else if (*gr >= ARMREG_R3) {
1255 if (eabi_supported) {
1256 /* darwin aligns longs to 4 byte only */
1257 if (i8_align == 8) {
1262 ainfo->offset = *stack_size;
1263 ainfo->reg = ARMREG_SP; /* in the caller */
1264 ainfo->storage = RegTypeBase;
1267 if (eabi_supported) {
1268 if (i8_align == 8 && ((*gr) & 1))
1271 ainfo->storage = RegTypeIRegPair;
1280 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1283 * If we're calling a function like this:
1285 * void foo(float a, double b, float c)
1287 * We pass a in s0 and b in d1. That leaves us
1288 * with s1 being unused. The armhf ABI recognizes
1289 * this and requires register assignment to then
1290 * use that for the next single-precision arg,
1291 * i.e. c in this example. So float_spare either
1292 * tells us which reg to use for the next single-
1293 * precision arg, or it's -1, meaning use *fpr.
1295 * Note that even though most of the JIT speaks
1296 * double-precision, fpr represents single-
1297 * precision registers.
1299 * See parts 5.5 and 6.1.2 of the AAPCS for how
1303 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1304 ainfo->storage = RegTypeFP;
1308 * If we're passing a double-precision value
1309 * and *fpr is odd (e.g. it's s1, s3, ...)
1310 * we need to use the next even register. So
1311 * we mark the current *fpr as a spare that
1312 * can be used for the next single-precision
1316 *float_spare = *fpr;
1321 * At this point, we have an even register
1322 * so we assign that and move along.
1326 } else if (*float_spare >= 0) {
1328 * We're passing a single-precision value
1329 * and it looks like a spare single-
1330 * precision register is available. Let's
1334 ainfo->reg = *float_spare;
1338 * If we hit this branch, we're passing a
1339 * single-precision value and we can simply
1340 * use the next available register.
1348 * We've exhausted available floating point
1349 * regs, so pass the rest on the stack.
1357 ainfo->offset = *stack_size;
1358 ainfo->reg = ARMREG_SP;
1359 ainfo->storage = RegTypeBase;
1366 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1368 guint i, gr, fpr, pstart;
1370 int n = sig->hasthis + sig->param_count;
1371 MonoType *simpletype;
1372 guint32 stack_size = 0;
1374 gboolean is_pinvoke = sig->pinvoke;
1378 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1380 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1387 t = mini_type_get_underlying_type (gsctx, sig->ret);
1388 if (MONO_TYPE_ISSTRUCT (t)) {
1391 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1392 cinfo->ret.storage = RegTypeStructByVal;
1394 cinfo->vtype_retaddr = TRUE;
1396 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1397 cinfo->vtype_retaddr = TRUE;
1403 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1404 * the first argument, allowing 'this' to be always passed in the first arg reg.
1405 * Also do this if the first argument is a reference type, since virtual calls
1406 * are sometimes made using calli without sig->hasthis set, like in the delegate
1409 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1411 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1413 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1417 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1418 cinfo->vret_arg_index = 1;
1422 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1426 if (cinfo->vtype_retaddr)
1427 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1430 DEBUG(printf("params: %d\n", sig->param_count));
1431 for (i = pstart; i < sig->param_count; ++i) {
1432 ArgInfo *ainfo = &cinfo->args [n];
1434 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1435 /* Prevent implicit arguments and sig_cookie from
1436 being passed in registers */
1439 /* Emit the signature cookie just before the implicit arguments */
1440 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1442 DEBUG(printf("param %d: ", i));
1443 if (sig->params [i]->byref) {
1444 DEBUG(printf("byref\n"));
1445 add_general (&gr, &stack_size, ainfo, TRUE);
1449 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1450 switch (simpletype->type) {
1451 case MONO_TYPE_BOOLEAN:
1454 cinfo->args [n].size = 1;
1455 add_general (&gr, &stack_size, ainfo, TRUE);
1458 case MONO_TYPE_CHAR:
1461 cinfo->args [n].size = 2;
1462 add_general (&gr, &stack_size, ainfo, TRUE);
1467 cinfo->args [n].size = 4;
1468 add_general (&gr, &stack_size, ainfo, TRUE);
1474 case MONO_TYPE_FNPTR:
1475 case MONO_TYPE_CLASS:
1476 case MONO_TYPE_OBJECT:
1477 case MONO_TYPE_STRING:
1478 case MONO_TYPE_SZARRAY:
1479 case MONO_TYPE_ARRAY:
1480 cinfo->args [n].size = sizeof (gpointer);
1481 add_general (&gr, &stack_size, ainfo, TRUE);
1484 case MONO_TYPE_GENERICINST:
1485 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1486 cinfo->args [n].size = sizeof (gpointer);
1487 add_general (&gr, &stack_size, ainfo, TRUE);
1491 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1492 /* gsharedvt arguments are passed by ref */
1493 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1494 add_general (&gr, &stack_size, ainfo, TRUE);
1495 switch (ainfo->storage) {
1496 case RegTypeGeneral:
1497 ainfo->storage = RegTypeGSharedVtInReg;
1500 ainfo->storage = RegTypeGSharedVtOnStack;
1503 g_assert_not_reached ();
1509 case MONO_TYPE_TYPEDBYREF:
1510 case MONO_TYPE_VALUETYPE: {
1516 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1517 size = sizeof (MonoTypedRef);
1518 align = sizeof (gpointer);
1520 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1522 size = mono_class_native_size (klass, &align);
1524 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1526 DEBUG(printf ("load %d bytes struct\n", size));
1529 align_size += (sizeof (gpointer) - 1);
1530 align_size &= ~(sizeof (gpointer) - 1);
1531 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1532 ainfo->storage = RegTypeStructByVal;
1533 ainfo->struct_size = size;
1534 /* FIXME: align stack_size if needed */
1535 if (eabi_supported) {
1536 if (align >= 8 && (gr & 1))
1539 if (gr > ARMREG_R3) {
1541 ainfo->vtsize = nwords;
1543 int rest = ARMREG_R3 - gr + 1;
1544 int n_in_regs = rest >= nwords? nwords: rest;
1546 ainfo->size = n_in_regs;
1547 ainfo->vtsize = nwords - n_in_regs;
1550 nwords -= n_in_regs;
1552 if (sig->call_convention == MONO_CALL_VARARG)
1553 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1554 stack_size = ALIGN_TO (stack_size, align);
1555 ainfo->offset = stack_size;
1556 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1557 stack_size += nwords * sizeof (gpointer);
1564 add_general (&gr, &stack_size, ainfo, FALSE);
1571 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1573 add_general (&gr, &stack_size, ainfo, TRUE);
1581 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1583 add_general (&gr, &stack_size, ainfo, FALSE);
1588 case MONO_TYPE_MVAR:
1589 /* gsharedvt arguments are passed by ref */
1590 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1591 add_general (&gr, &stack_size, ainfo, TRUE);
1592 switch (ainfo->storage) {
1593 case RegTypeGeneral:
1594 ainfo->storage = RegTypeGSharedVtInReg;
1597 ainfo->storage = RegTypeGSharedVtOnStack;
1600 g_assert_not_reached ();
1605 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1609 /* Handle the case where there are no implicit arguments */
1610 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1611 /* Prevent implicit arguments and sig_cookie from
1612 being passed in registers */
1615 /* Emit the signature cookie just before the implicit arguments */
1616 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1620 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1621 switch (simpletype->type) {
1622 case MONO_TYPE_BOOLEAN:
1627 case MONO_TYPE_CHAR:
1633 case MONO_TYPE_FNPTR:
1634 case MONO_TYPE_CLASS:
1635 case MONO_TYPE_OBJECT:
1636 case MONO_TYPE_SZARRAY:
1637 case MONO_TYPE_ARRAY:
1638 case MONO_TYPE_STRING:
1639 cinfo->ret.storage = RegTypeGeneral;
1640 cinfo->ret.reg = ARMREG_R0;
1644 cinfo->ret.storage = RegTypeIRegPair;
1645 cinfo->ret.reg = ARMREG_R0;
1649 cinfo->ret.storage = RegTypeFP;
1651 if (IS_HARD_FLOAT) {
1652 cinfo->ret.reg = ARM_VFP_F0;
1654 cinfo->ret.reg = ARMREG_R0;
1658 case MONO_TYPE_GENERICINST:
1659 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1660 cinfo->ret.storage = RegTypeGeneral;
1661 cinfo->ret.reg = ARMREG_R0;
1664 // FIXME: Only for variable types
1665 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1666 cinfo->ret.storage = RegTypeStructByAddr;
1667 g_assert (cinfo->vtype_retaddr);
1671 case MONO_TYPE_VALUETYPE:
1672 case MONO_TYPE_TYPEDBYREF:
1673 if (cinfo->ret.storage != RegTypeStructByVal)
1674 cinfo->ret.storage = RegTypeStructByAddr;
1677 case MONO_TYPE_MVAR:
1678 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1679 cinfo->ret.storage = RegTypeStructByAddr;
1680 g_assert (cinfo->vtype_retaddr);
1682 case MONO_TYPE_VOID:
1685 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1689 /* align stack size to 8 */
1690 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1691 stack_size = (stack_size + 7) & ~7;
1693 cinfo->stack_usage = stack_size;
1699 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1701 MonoType *callee_ret;
1705 if (cfg->compile_aot && !cfg->full_aot)
1706 /* OP_TAILCALL doesn't work with AOT */
1709 c1 = get_call_info (NULL, NULL, caller_sig);
1710 c2 = get_call_info (NULL, NULL, callee_sig);
1713 * Tail calls with more callee stack usage than the caller cannot be supported, since
1714 * the extra stack space would be left on the stack after the tail call.
1716 res = c1->stack_usage >= c2->stack_usage;
1717 callee_ret = mini_replace_type (callee_sig->ret);
1718 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1719 /* An address on the callee's stack is passed as the first argument */
1722 if (c2->stack_usage > 16 * 4)
1734 debug_omit_fp (void)
1737 return mono_debug_count ();
1744 * mono_arch_compute_omit_fp:
1746 * Determine whenever the frame pointer can be eliminated.
1749 mono_arch_compute_omit_fp (MonoCompile *cfg)
1751 MonoMethodSignature *sig;
1752 MonoMethodHeader *header;
1756 if (cfg->arch.omit_fp_computed)
1759 header = cfg->header;
1761 sig = mono_method_signature (cfg->method);
1763 if (!cfg->arch.cinfo)
1764 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1765 cinfo = cfg->arch.cinfo;
1768 * FIXME: Remove some of the restrictions.
1770 cfg->arch.omit_fp = TRUE;
1771 cfg->arch.omit_fp_computed = TRUE;
1773 if (cfg->disable_omit_fp)
1774 cfg->arch.omit_fp = FALSE;
1775 if (!debug_omit_fp ())
1776 cfg->arch.omit_fp = FALSE;
1778 if (cfg->method->save_lmf)
1779 cfg->arch.omit_fp = FALSE;
1781 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1782 cfg->arch.omit_fp = FALSE;
1783 if (header->num_clauses)
1784 cfg->arch.omit_fp = FALSE;
1785 if (cfg->param_area)
1786 cfg->arch.omit_fp = FALSE;
1787 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1788 cfg->arch.omit_fp = FALSE;
1789 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1790 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1791 cfg->arch.omit_fp = FALSE;
1792 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1793 ArgInfo *ainfo = &cinfo->args [i];
1795 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1797 * The stack offset can only be determined when the frame
1800 cfg->arch.omit_fp = FALSE;
1805 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1806 MonoInst *ins = cfg->varinfo [i];
1809 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1814 * Set var information according to the calling convention. arm version.
1815 * The locals var stuff should most likely be split in another method.
1818 mono_arch_allocate_vars (MonoCompile *cfg)
1820 MonoMethodSignature *sig;
1821 MonoMethodHeader *header;
1823 int i, offset, size, align, curinst;
1827 sig = mono_method_signature (cfg->method);
1829 if (!cfg->arch.cinfo)
1830 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1831 cinfo = cfg->arch.cinfo;
1833 mono_arch_compute_omit_fp (cfg);
1835 if (cfg->arch.omit_fp)
1836 cfg->frame_reg = ARMREG_SP;
1838 cfg->frame_reg = ARMREG_FP;
1840 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1842 /* allow room for the vararg method args: void* and long/double */
1843 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1844 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1846 header = cfg->header;
1848 /* See mono_arch_get_global_int_regs () */
1849 if (cfg->flags & MONO_CFG_HAS_CALLS)
1850 cfg->uses_rgctx_reg = TRUE;
1852 if (cfg->frame_reg != ARMREG_SP)
1853 cfg->used_int_regs |= 1 << cfg->frame_reg;
1855 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1856 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1857 cfg->used_int_regs |= (1 << ARMREG_V5);
1861 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1862 if (sig->ret->type != MONO_TYPE_VOID) {
1863 cfg->ret->opcode = OP_REGVAR;
1864 cfg->ret->inst_c0 = ARMREG_R0;
1867 /* local vars are at a positive offset from the stack pointer */
1869 * also note that if the function uses alloca, we use FP
1870 * to point at the local variables.
1872 offset = 0; /* linkage area */
1873 /* align the offset to 16 bytes: not sure this is needed here */
1875 //offset &= ~(8 - 1);
1877 /* add parameter area size for called functions */
1878 offset += cfg->param_area;
1881 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1884 /* allow room to save the return value */
1885 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1888 /* the MonoLMF structure is stored just below the stack pointer */
1889 if (cinfo->ret.storage == RegTypeStructByVal) {
1890 cfg->ret->opcode = OP_REGOFFSET;
1891 cfg->ret->inst_basereg = cfg->frame_reg;
1892 offset += sizeof (gpointer) - 1;
1893 offset &= ~(sizeof (gpointer) - 1);
1894 cfg->ret->inst_offset = - offset;
1895 offset += sizeof(gpointer);
1896 } else if (cinfo->vtype_retaddr) {
1897 ins = cfg->vret_addr;
1898 offset += sizeof(gpointer) - 1;
1899 offset &= ~(sizeof(gpointer) - 1);
1900 ins->inst_offset = offset;
1901 ins->opcode = OP_REGOFFSET;
1902 ins->inst_basereg = cfg->frame_reg;
1903 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1904 printf ("vret_addr =");
1905 mono_print_ins (cfg->vret_addr);
1907 offset += sizeof(gpointer);
1910 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1911 if (cfg->arch.seq_point_info_var) {
1914 ins = cfg->arch.seq_point_info_var;
1918 offset += align - 1;
1919 offset &= ~(align - 1);
1920 ins->opcode = OP_REGOFFSET;
1921 ins->inst_basereg = cfg->frame_reg;
1922 ins->inst_offset = offset;
1925 ins = cfg->arch.ss_trigger_page_var;
1928 offset += align - 1;
1929 offset &= ~(align - 1);
1930 ins->opcode = OP_REGOFFSET;
1931 ins->inst_basereg = cfg->frame_reg;
1932 ins->inst_offset = offset;
1936 if (cfg->arch.seq_point_read_var) {
1939 ins = cfg->arch.seq_point_read_var;
1943 offset += align - 1;
1944 offset &= ~(align - 1);
1945 ins->opcode = OP_REGOFFSET;
1946 ins->inst_basereg = cfg->frame_reg;
1947 ins->inst_offset = offset;
1950 ins = cfg->arch.seq_point_ss_method_var;
1953 offset += align - 1;
1954 offset &= ~(align - 1);
1955 ins->opcode = OP_REGOFFSET;
1956 ins->inst_basereg = cfg->frame_reg;
1957 ins->inst_offset = offset;
1960 ins = cfg->arch.seq_point_bp_method_var;
1963 offset += align - 1;
1964 offset &= ~(align - 1);
1965 ins->opcode = OP_REGOFFSET;
1966 ins->inst_basereg = cfg->frame_reg;
1967 ins->inst_offset = offset;
1971 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_new_i4) {
1972 /* Allocate a temporary used by the atomic ops */
1976 /* Allocate a local slot to hold the sig cookie address */
1977 offset += align - 1;
1978 offset &= ~(align - 1);
1979 cfg->arch.atomic_tmp_offset = offset;
1982 cfg->arch.atomic_tmp_offset = -1;
1985 cfg->locals_min_stack_offset = offset;
1987 curinst = cfg->locals_start;
1988 for (i = curinst; i < cfg->num_varinfo; ++i) {
1991 ins = cfg->varinfo [i];
1992 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1995 t = ins->inst_vtype;
1996 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1999 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2000 * pinvoke wrappers when they call functions returning structure */
2001 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2002 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2006 size = mono_type_size (t, &align);
2008 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2009 * since it loads/stores misaligned words, which don't do the right thing.
2011 if (align < 4 && size >= 4)
2013 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2014 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2015 offset += align - 1;
2016 offset &= ~(align - 1);
2017 ins->opcode = OP_REGOFFSET;
2018 ins->inst_offset = offset;
2019 ins->inst_basereg = cfg->frame_reg;
2021 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2024 cfg->locals_max_stack_offset = offset;
2028 ins = cfg->args [curinst];
2029 if (ins->opcode != OP_REGVAR) {
2030 ins->opcode = OP_REGOFFSET;
2031 ins->inst_basereg = cfg->frame_reg;
2032 offset += sizeof (gpointer) - 1;
2033 offset &= ~(sizeof (gpointer) - 1);
2034 ins->inst_offset = offset;
2035 offset += sizeof (gpointer);
2040 if (sig->call_convention == MONO_CALL_VARARG) {
2044 /* Allocate a local slot to hold the sig cookie address */
2045 offset += align - 1;
2046 offset &= ~(align - 1);
2047 cfg->sig_cookie = offset;
2051 for (i = 0; i < sig->param_count; ++i) {
2052 ins = cfg->args [curinst];
2054 if (ins->opcode != OP_REGVAR) {
2055 ins->opcode = OP_REGOFFSET;
2056 ins->inst_basereg = cfg->frame_reg;
2057 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2059 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2060 * since it loads/stores misaligned words, which don't do the right thing.
2062 if (align < 4 && size >= 4)
2064 /* The code in the prolog () stores words when storing vtypes received in a register */
2065 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2067 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2068 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2069 offset += align - 1;
2070 offset &= ~(align - 1);
2071 ins->inst_offset = offset;
2077 /* align the offset to 8 bytes */
2078 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2079 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2084 cfg->stack_offset = offset;
2088 mono_arch_create_vars (MonoCompile *cfg)
2090 MonoMethodSignature *sig;
2094 sig = mono_method_signature (cfg->method);
2096 if (!cfg->arch.cinfo)
2097 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2098 cinfo = cfg->arch.cinfo;
2100 if (IS_HARD_FLOAT) {
2101 for (i = 0; i < 2; i++) {
2102 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2103 inst->flags |= MONO_INST_VOLATILE;
2105 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2109 if (cinfo->ret.storage == RegTypeStructByVal)
2110 cfg->ret_var_is_local = TRUE;
2112 if (cinfo->vtype_retaddr) {
2113 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2114 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2115 printf ("vret_addr = ");
2116 mono_print_ins (cfg->vret_addr);
2120 if (cfg->gen_seq_points) {
2121 if (cfg->soft_breakpoints) {
2122 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2123 ins->flags |= MONO_INST_VOLATILE;
2124 cfg->arch.seq_point_read_var = ins;
2126 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2127 ins->flags |= MONO_INST_VOLATILE;
2128 cfg->arch.seq_point_ss_method_var = ins;
2130 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2131 ins->flags |= MONO_INST_VOLATILE;
2132 cfg->arch.seq_point_bp_method_var = ins;
2134 g_assert (!cfg->compile_aot);
2135 } else if (cfg->compile_aot) {
2136 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2137 ins->flags |= MONO_INST_VOLATILE;
2138 cfg->arch.seq_point_info_var = ins;
2140 /* Allocate a separate variable for this to save 1 load per seq point */
2141 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2142 ins->flags |= MONO_INST_VOLATILE;
2143 cfg->arch.ss_trigger_page_var = ins;
2149 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2151 MonoMethodSignature *tmp_sig;
2154 if (call->tail_call)
2157 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2160 * mono_ArgIterator_Setup assumes the signature cookie is
2161 * passed first and all the arguments which were before it are
2162 * passed on the stack after the signature. So compensate by
2163 * passing a different signature.
2165 tmp_sig = mono_metadata_signature_dup (call->signature);
2166 tmp_sig->param_count -= call->signature->sentinelpos;
2167 tmp_sig->sentinelpos = 0;
2168 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2170 sig_reg = mono_alloc_ireg (cfg);
2171 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2178 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2183 LLVMCallInfo *linfo;
2185 n = sig->param_count + sig->hasthis;
2187 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2189 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2192 * LLVM always uses the native ABI while we use our own ABI, the
2193 * only difference is the handling of vtypes:
2194 * - we only pass/receive them in registers in some cases, and only
2195 * in 1 or 2 integer registers.
2197 if (cinfo->vtype_retaddr) {
2198 /* Vtype returned using a hidden argument */
2199 linfo->ret.storage = LLVMArgVtypeRetAddr;
2200 linfo->vret_arg_index = cinfo->vret_arg_index;
2201 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2202 cfg->exception_message = g_strdup ("unknown ret conv");
2203 cfg->disable_llvm = TRUE;
2207 for (i = 0; i < n; ++i) {
2208 ainfo = cinfo->args + i;
2210 linfo->args [i].storage = LLVMArgNone;
2212 switch (ainfo->storage) {
2213 case RegTypeGeneral:
2214 case RegTypeIRegPair:
2216 linfo->args [i].storage = LLVMArgInIReg;
2218 case RegTypeStructByVal:
2219 // FIXME: Passing entirely on the stack or split reg/stack
2220 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2221 linfo->args [i].storage = LLVMArgVtypeInReg;
2222 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2223 if (ainfo->size == 2)
2224 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2226 linfo->args [i].pair_storage [1] = LLVMArgNone;
2228 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2229 cfg->disable_llvm = TRUE;
2233 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2234 cfg->disable_llvm = TRUE;
2244 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2247 MonoMethodSignature *sig;
2251 sig = call->signature;
2252 n = sig->param_count + sig->hasthis;
2254 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2256 for (i = 0; i < n; ++i) {
2257 ArgInfo *ainfo = cinfo->args + i;
2260 if (i >= sig->hasthis)
2261 t = sig->params [i - sig->hasthis];
2263 t = &mono_defaults.int_class->byval_arg;
2264 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2266 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2267 /* Emit the signature cookie just before the implicit arguments */
2268 emit_sig_cookie (cfg, call, cinfo);
2271 in = call->args [i];
2273 switch (ainfo->storage) {
2274 case RegTypeGeneral:
2275 case RegTypeIRegPair:
2276 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2277 MONO_INST_NEW (cfg, ins, OP_MOVE);
2278 ins->dreg = mono_alloc_ireg (cfg);
2279 ins->sreg1 = in->dreg + 1;
2280 MONO_ADD_INS (cfg->cbb, ins);
2281 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2283 MONO_INST_NEW (cfg, ins, OP_MOVE);
2284 ins->dreg = mono_alloc_ireg (cfg);
2285 ins->sreg1 = in->dreg + 2;
2286 MONO_ADD_INS (cfg->cbb, ins);
2287 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2288 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2289 if (ainfo->size == 4) {
2290 if (IS_SOFT_FLOAT) {
2291 /* mono_emit_call_args () have already done the r8->r4 conversion */
2292 /* The converted value is in an int vreg */
2293 MONO_INST_NEW (cfg, ins, OP_MOVE);
2294 ins->dreg = mono_alloc_ireg (cfg);
2295 ins->sreg1 = in->dreg;
2296 MONO_ADD_INS (cfg->cbb, ins);
2297 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2301 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2302 creg = mono_alloc_ireg (cfg);
2303 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2304 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2307 if (IS_SOFT_FLOAT) {
2308 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2309 ins->dreg = mono_alloc_ireg (cfg);
2310 ins->sreg1 = in->dreg;
2311 MONO_ADD_INS (cfg->cbb, ins);
2312 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2314 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2315 ins->dreg = mono_alloc_ireg (cfg);
2316 ins->sreg1 = in->dreg;
2317 MONO_ADD_INS (cfg->cbb, ins);
2318 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2323 creg = mono_alloc_ireg (cfg);
2324 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2325 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2326 creg = mono_alloc_ireg (cfg);
2327 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2328 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2331 cfg->flags |= MONO_CFG_HAS_FPOUT;
2333 MONO_INST_NEW (cfg, ins, OP_MOVE);
2334 ins->dreg = mono_alloc_ireg (cfg);
2335 ins->sreg1 = in->dreg;
2336 MONO_ADD_INS (cfg->cbb, ins);
2338 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2341 case RegTypeStructByAddr:
2344 /* FIXME: where si the data allocated? */
2345 arg->backend.reg3 = ainfo->reg;
2346 call->used_iregs |= 1 << ainfo->reg;
2347 g_assert_not_reached ();
2350 case RegTypeStructByVal:
2351 case RegTypeGSharedVtInReg:
2352 case RegTypeGSharedVtOnStack:
2353 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2354 ins->opcode = OP_OUTARG_VT;
2355 ins->sreg1 = in->dreg;
2356 ins->klass = in->klass;
2357 ins->inst_p0 = call;
2358 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2359 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2360 mono_call_inst_add_outarg_vt (cfg, call, ins);
2361 MONO_ADD_INS (cfg->cbb, ins);
2364 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2366 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2367 if (t->type == MONO_TYPE_R8) {
2368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2373 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2376 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2379 case RegTypeBaseGen:
2380 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2381 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2382 MONO_INST_NEW (cfg, ins, OP_MOVE);
2383 ins->dreg = mono_alloc_ireg (cfg);
2384 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2385 MONO_ADD_INS (cfg->cbb, ins);
2386 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2387 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2390 /* This should work for soft-float as well */
2392 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2393 creg = mono_alloc_ireg (cfg);
2394 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2395 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2396 creg = mono_alloc_ireg (cfg);
2397 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2399 cfg->flags |= MONO_CFG_HAS_FPOUT;
2401 g_assert_not_reached ();
2405 int fdreg = mono_alloc_freg (cfg);
2407 if (ainfo->size == 8) {
2408 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2409 ins->sreg1 = in->dreg;
2411 MONO_ADD_INS (cfg->cbb, ins);
2413 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2418 * Mono's register allocator doesn't speak single-precision registers that
2419 * overlap double-precision registers (i.e. armhf). So we have to work around
2420 * the register allocator and load the value from memory manually.
2422 * So we create a variable for the float argument and an instruction to store
2423 * the argument into the variable. We then store the list of these arguments
2424 * in cfg->float_args. This list is then used by emit_float_args later to
2425 * pass the arguments in the various call opcodes.
2427 * This is not very nice, and we should really try to fix the allocator.
2430 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2432 /* Make sure the instruction isn't seen as pointless and removed.
2434 float_arg->flags |= MONO_INST_VOLATILE;
2436 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2438 /* We use the dreg to look up the instruction later. The hreg is used to
2439 * emit the instruction that loads the value into the FP reg.
2441 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2442 fad->vreg = float_arg->dreg;
2443 fad->hreg = ainfo->reg;
2445 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2448 call->used_iregs |= 1 << ainfo->reg;
2449 cfg->flags |= MONO_CFG_HAS_FPOUT;
2453 g_assert_not_reached ();
2457 /* Handle the case where there are no implicit arguments */
2458 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2459 emit_sig_cookie (cfg, call, cinfo);
2461 if (cinfo->ret.storage == RegTypeStructByVal) {
2462 /* The JIT will transform this into a normal call */
2463 call->vret_in_reg = TRUE;
2464 } else if (cinfo->vtype_retaddr) {
2466 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2467 vtarg->sreg1 = call->vret_var->dreg;
2468 vtarg->dreg = mono_alloc_preg (cfg);
2469 MONO_ADD_INS (cfg->cbb, vtarg);
2471 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2474 call->stack_usage = cinfo->stack_usage;
2480 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2482 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2483 ArgInfo *ainfo = ins->inst_p1;
2484 int ovf_size = ainfo->vtsize;
2485 int doffset = ainfo->offset;
2486 int struct_size = ainfo->struct_size;
2487 int i, soffset, dreg, tmpreg;
2489 if (ainfo->storage == RegTypeGSharedVtInReg) {
2491 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2494 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2495 /* Pass by addr on stack */
2496 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2501 for (i = 0; i < ainfo->size; ++i) {
2502 dreg = mono_alloc_ireg (cfg);
2503 switch (struct_size) {
2505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2511 tmpreg = mono_alloc_ireg (cfg);
2512 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2513 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2514 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2515 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2516 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2517 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2518 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2521 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2524 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2525 soffset += sizeof (gpointer);
2526 struct_size -= sizeof (gpointer);
2528 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2530 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2534 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2536 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2539 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2542 if (COMPILE_LLVM (cfg)) {
2543 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2545 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2546 ins->sreg1 = val->dreg + 1;
2547 ins->sreg2 = val->dreg + 2;
2548 MONO_ADD_INS (cfg->cbb, ins);
2553 case MONO_ARM_FPU_NONE:
2554 if (ret->type == MONO_TYPE_R8) {
2557 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2558 ins->dreg = cfg->ret->dreg;
2559 ins->sreg1 = val->dreg;
2560 MONO_ADD_INS (cfg->cbb, ins);
2563 if (ret->type == MONO_TYPE_R4) {
2564 /* Already converted to an int in method_to_ir () */
2565 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2569 case MONO_ARM_FPU_VFP:
2570 case MONO_ARM_FPU_VFP_HARD:
2571 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2574 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2575 ins->dreg = cfg->ret->dreg;
2576 ins->sreg1 = val->dreg;
2577 MONO_ADD_INS (cfg->cbb, ins);
2582 g_assert_not_reached ();
2586 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2589 #endif /* #ifndef DISABLE_JIT */
2592 mono_arch_is_inst_imm (gint64 imm)
2597 #define DYN_CALL_STACK_ARGS 6
2600 MonoMethodSignature *sig;
2605 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2611 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2615 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2618 switch (cinfo->ret.storage) {
2620 case RegTypeGeneral:
2621 case RegTypeIRegPair:
2622 case RegTypeStructByAddr:
2633 for (i = 0; i < cinfo->nargs; ++i) {
2634 ArgInfo *ainfo = &cinfo->args [i];
2637 switch (ainfo->storage) {
2638 case RegTypeGeneral:
2640 case RegTypeIRegPair:
2643 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2646 case RegTypeStructByVal:
2647 if (ainfo->size == 0)
2648 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2650 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2651 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2659 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2660 for (i = 0; i < sig->param_count; ++i) {
2661 MonoType *t = sig->params [i];
2687 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2689 ArchDynCallInfo *info;
2692 cinfo = get_call_info (NULL, NULL, sig);
2694 if (!dyn_call_supported (cinfo, sig)) {
2699 info = g_new0 (ArchDynCallInfo, 1);
2700 // FIXME: Preprocess the info to speed up start_dyn_call ()
2702 info->cinfo = cinfo;
2704 return (MonoDynCallInfo*)info;
2708 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2710 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2712 g_free (ainfo->cinfo);
2717 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2719 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2720 DynCallArgs *p = (DynCallArgs*)buf;
2721 int arg_index, greg, i, j, pindex;
2722 MonoMethodSignature *sig = dinfo->sig;
2724 g_assert (buf_len >= sizeof (DynCallArgs));
2733 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2734 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2739 if (dinfo->cinfo->vtype_retaddr)
2740 p->regs [greg ++] = (mgreg_t)ret;
2742 for (i = pindex; i < sig->param_count; i++) {
2743 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2744 gpointer *arg = args [arg_index ++];
2745 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2748 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2750 else if (ainfo->storage == RegTypeBase)
2751 slot = PARAM_REGS + (ainfo->offset / 4);
2753 g_assert_not_reached ();
2756 p->regs [slot] = (mgreg_t)*arg;
2761 case MONO_TYPE_STRING:
2762 case MONO_TYPE_CLASS:
2763 case MONO_TYPE_ARRAY:
2764 case MONO_TYPE_SZARRAY:
2765 case MONO_TYPE_OBJECT:
2769 p->regs [slot] = (mgreg_t)*arg;
2771 case MONO_TYPE_BOOLEAN:
2773 p->regs [slot] = *(guint8*)arg;
2776 p->regs [slot] = *(gint8*)arg;
2779 p->regs [slot] = *(gint16*)arg;
2782 case MONO_TYPE_CHAR:
2783 p->regs [slot] = *(guint16*)arg;
2786 p->regs [slot] = *(gint32*)arg;
2789 p->regs [slot] = *(guint32*)arg;
2793 p->regs [slot ++] = (mgreg_t)arg [0];
2794 p->regs [slot] = (mgreg_t)arg [1];
2797 p->regs [slot] = *(mgreg_t*)arg;
2800 p->regs [slot ++] = (mgreg_t)arg [0];
2801 p->regs [slot] = (mgreg_t)arg [1];
2803 case MONO_TYPE_GENERICINST:
2804 if (MONO_TYPE_IS_REFERENCE (t)) {
2805 p->regs [slot] = (mgreg_t)*arg;
2810 case MONO_TYPE_VALUETYPE:
2811 g_assert (ainfo->storage == RegTypeStructByVal);
2813 if (ainfo->size == 0)
2814 slot = PARAM_REGS + (ainfo->offset / 4);
2818 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2819 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2822 g_assert_not_reached ();
2828 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2830 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2831 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2832 guint8 *ret = ((DynCallArgs*)buf)->ret;
2833 mgreg_t res = ((DynCallArgs*)buf)->res;
2834 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2836 switch (mono_type_get_underlying_type (sig->ret)->type) {
2837 case MONO_TYPE_VOID:
2838 *(gpointer*)ret = NULL;
2840 case MONO_TYPE_STRING:
2841 case MONO_TYPE_CLASS:
2842 case MONO_TYPE_ARRAY:
2843 case MONO_TYPE_SZARRAY:
2844 case MONO_TYPE_OBJECT:
2848 *(gpointer*)ret = (gpointer)res;
2854 case MONO_TYPE_BOOLEAN:
2855 *(guint8*)ret = res;
2858 *(gint16*)ret = res;
2861 case MONO_TYPE_CHAR:
2862 *(guint16*)ret = res;
2865 *(gint32*)ret = res;
2868 *(guint32*)ret = res;
2872 /* This handles endianness as well */
2873 ((gint32*)ret) [0] = res;
2874 ((gint32*)ret) [1] = res2;
2876 case MONO_TYPE_GENERICINST:
2877 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2878 *(gpointer*)ret = (gpointer)res;
2883 case MONO_TYPE_VALUETYPE:
2884 g_assert (ainfo->cinfo->vtype_retaddr);
2889 *(float*)ret = *(float*)&res;
2891 case MONO_TYPE_R8: {
2898 *(double*)ret = *(double*)®s;
2902 g_assert_not_reached ();
2909 * Allow tracing to work with this interface (with an optional argument)
2913 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2917 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2918 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2919 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2920 code = emit_call_reg (code, ARMREG_R2);
2934 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2937 int save_mode = SAVE_NONE;
2939 MonoMethod *method = cfg->method;
2940 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2941 int rtype = ret_type->type;
2942 int save_offset = cfg->param_area;
2946 offset = code - cfg->native_code;
2947 /* we need about 16 instructions */
2948 if (offset > (cfg->code_size - 16 * 4)) {
2949 cfg->code_size *= 2;
2950 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2951 code = cfg->native_code + offset;
2954 case MONO_TYPE_VOID:
2955 /* special case string .ctor icall */
2956 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2957 save_mode = SAVE_ONE;
2959 save_mode = SAVE_NONE;
2963 save_mode = SAVE_TWO;
2967 save_mode = SAVE_ONE_FP;
2969 save_mode = SAVE_ONE;
2973 save_mode = SAVE_TWO_FP;
2975 save_mode = SAVE_TWO;
2977 case MONO_TYPE_GENERICINST:
2978 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2979 save_mode = SAVE_ONE;
2983 case MONO_TYPE_VALUETYPE:
2984 save_mode = SAVE_STRUCT;
2987 save_mode = SAVE_ONE;
2991 switch (save_mode) {
2993 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2994 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2995 if (enable_arguments) {
2996 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2997 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3001 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3002 if (enable_arguments) {
3003 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3007 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3008 if (enable_arguments) {
3009 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3013 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3014 if (enable_arguments) {
3015 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3019 if (enable_arguments) {
3020 /* FIXME: get the actual address */
3021 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3029 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3030 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3031 code = emit_call_reg (code, ARMREG_IP);
3033 switch (save_mode) {
3035 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3036 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3039 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3042 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3045 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3056 * The immediate field for cond branches is big enough for all reasonable methods
3058 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3059 if (0 && ins->inst_true_bb->native_offset) { \
3060 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3062 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3063 ARM_B_COND (code, (condcode), 0); \
3066 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3068 /* emit an exception if condition is fail
3070 * We assign the extra code used to throw the implicit exceptions
3071 * to cfg->bb_exit as far as the big branch handling is concerned
3073 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3075 mono_add_patch_info (cfg, code - cfg->native_code, \
3076 MONO_PATCH_INFO_EXC, exc_name); \
3077 ARM_BL_COND (code, (condcode), 0); \
3080 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3083 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3088 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3090 MonoInst *ins, *n, *last_ins = NULL;
3092 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3093 switch (ins->opcode) {
3096 /* Already done by an arch-independent pass */
3098 case OP_LOAD_MEMBASE:
3099 case OP_LOADI4_MEMBASE:
3101 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3102 * OP_LOAD_MEMBASE offset(basereg), reg
3104 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3105 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3106 ins->inst_basereg == last_ins->inst_destbasereg &&
3107 ins->inst_offset == last_ins->inst_offset) {
3108 if (ins->dreg == last_ins->sreg1) {
3109 MONO_DELETE_INS (bb, ins);
3112 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3113 ins->opcode = OP_MOVE;
3114 ins->sreg1 = last_ins->sreg1;
3118 * Note: reg1 must be different from the basereg in the second load
3119 * OP_LOAD_MEMBASE offset(basereg), reg1
3120 * OP_LOAD_MEMBASE offset(basereg), reg2
3122 * OP_LOAD_MEMBASE offset(basereg), reg1
3123 * OP_MOVE reg1, reg2
3125 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3126 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3127 ins->inst_basereg != last_ins->dreg &&
3128 ins->inst_basereg == last_ins->inst_basereg &&
3129 ins->inst_offset == last_ins->inst_offset) {
3131 if (ins->dreg == last_ins->dreg) {
3132 MONO_DELETE_INS (bb, ins);
3135 ins->opcode = OP_MOVE;
3136 ins->sreg1 = last_ins->dreg;
3139 //g_assert_not_reached ();
3143 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3144 * OP_LOAD_MEMBASE offset(basereg), reg
3146 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3147 * OP_ICONST reg, imm
3149 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3150 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3151 ins->inst_basereg == last_ins->inst_destbasereg &&
3152 ins->inst_offset == last_ins->inst_offset) {
3153 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3154 ins->opcode = OP_ICONST;
3155 ins->inst_c0 = last_ins->inst_imm;
3156 g_assert_not_reached (); // check this rule
3160 case OP_LOADU1_MEMBASE:
3161 case OP_LOADI1_MEMBASE:
3162 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3163 ins->inst_basereg == last_ins->inst_destbasereg &&
3164 ins->inst_offset == last_ins->inst_offset) {
3165 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3166 ins->sreg1 = last_ins->sreg1;
3169 case OP_LOADU2_MEMBASE:
3170 case OP_LOADI2_MEMBASE:
3171 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3172 ins->inst_basereg == last_ins->inst_destbasereg &&
3173 ins->inst_offset == last_ins->inst_offset) {
3174 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3175 ins->sreg1 = last_ins->sreg1;
3179 ins->opcode = OP_MOVE;
3183 if (ins->dreg == ins->sreg1) {
3184 MONO_DELETE_INS (bb, ins);
3188 * OP_MOVE sreg, dreg
3189 * OP_MOVE dreg, sreg
3191 if (last_ins && last_ins->opcode == OP_MOVE &&
3192 ins->sreg1 == last_ins->dreg &&
3193 ins->dreg == last_ins->sreg1) {
3194 MONO_DELETE_INS (bb, ins);
3202 bb->last_ins = last_ins;
3206 * the branch_cc_table should maintain the order of these
3220 branch_cc_table [] = {
3234 #define ADD_NEW_INS(cfg,dest,op) do { \
3235 MONO_INST_NEW ((cfg), (dest), (op)); \
3236 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3240 map_to_reg_reg_op (int op)
3249 case OP_COMPARE_IMM:
3251 case OP_ICOMPARE_IMM:
3265 case OP_LOAD_MEMBASE:
3266 return OP_LOAD_MEMINDEX;
3267 case OP_LOADI4_MEMBASE:
3268 return OP_LOADI4_MEMINDEX;
3269 case OP_LOADU4_MEMBASE:
3270 return OP_LOADU4_MEMINDEX;
3271 case OP_LOADU1_MEMBASE:
3272 return OP_LOADU1_MEMINDEX;
3273 case OP_LOADI2_MEMBASE:
3274 return OP_LOADI2_MEMINDEX;
3275 case OP_LOADU2_MEMBASE:
3276 return OP_LOADU2_MEMINDEX;
3277 case OP_LOADI1_MEMBASE:
3278 return OP_LOADI1_MEMINDEX;
3279 case OP_STOREI1_MEMBASE_REG:
3280 return OP_STOREI1_MEMINDEX;
3281 case OP_STOREI2_MEMBASE_REG:
3282 return OP_STOREI2_MEMINDEX;
3283 case OP_STOREI4_MEMBASE_REG:
3284 return OP_STOREI4_MEMINDEX;
3285 case OP_STORE_MEMBASE_REG:
3286 return OP_STORE_MEMINDEX;
3287 case OP_STORER4_MEMBASE_REG:
3288 return OP_STORER4_MEMINDEX;
3289 case OP_STORER8_MEMBASE_REG:
3290 return OP_STORER8_MEMINDEX;
3291 case OP_STORE_MEMBASE_IMM:
3292 return OP_STORE_MEMBASE_REG;
3293 case OP_STOREI1_MEMBASE_IMM:
3294 return OP_STOREI1_MEMBASE_REG;
3295 case OP_STOREI2_MEMBASE_IMM:
3296 return OP_STOREI2_MEMBASE_REG;
3297 case OP_STOREI4_MEMBASE_IMM:
3298 return OP_STOREI4_MEMBASE_REG;
3300 g_assert_not_reached ();
3304 * Remove from the instruction list the instructions that can't be
3305 * represented with very simple instructions with no register
3309 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3311 MonoInst *ins, *temp, *last_ins = NULL;
3312 int rot_amount, imm8, low_imm;
3314 MONO_BB_FOR_EACH_INS (bb, ins) {
3316 switch (ins->opcode) {
3320 case OP_COMPARE_IMM:
3321 case OP_ICOMPARE_IMM:
3335 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3336 ADD_NEW_INS (cfg, temp, OP_ICONST);
3337 temp->inst_c0 = ins->inst_imm;
3338 temp->dreg = mono_alloc_ireg (cfg);
3339 ins->sreg2 = temp->dreg;
3340 ins->opcode = mono_op_imm_to_op (ins->opcode);
3342 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3348 if (ins->inst_imm == 1) {
3349 ins->opcode = OP_MOVE;
3352 if (ins->inst_imm == 0) {
3353 ins->opcode = OP_ICONST;
3357 imm8 = mono_is_power_of_two (ins->inst_imm);
3359 ins->opcode = OP_SHL_IMM;
3360 ins->inst_imm = imm8;
3363 ADD_NEW_INS (cfg, temp, OP_ICONST);
3364 temp->inst_c0 = ins->inst_imm;
3365 temp->dreg = mono_alloc_ireg (cfg);
3366 ins->sreg2 = temp->dreg;
3367 ins->opcode = OP_IMUL;
3373 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3374 /* ARM sets the C flag to 1 if there was _no_ overflow */
3375 ins->next->opcode = OP_COND_EXC_NC;
3378 case OP_IDIV_UN_IMM:
3380 case OP_IREM_UN_IMM:
3381 ADD_NEW_INS (cfg, temp, OP_ICONST);
3382 temp->inst_c0 = ins->inst_imm;
3383 temp->dreg = mono_alloc_ireg (cfg);
3384 ins->sreg2 = temp->dreg;
3385 ins->opcode = mono_op_imm_to_op (ins->opcode);
3387 case OP_LOCALLOC_IMM:
3388 ADD_NEW_INS (cfg, temp, OP_ICONST);
3389 temp->inst_c0 = ins->inst_imm;
3390 temp->dreg = mono_alloc_ireg (cfg);
3391 ins->sreg1 = temp->dreg;
3392 ins->opcode = OP_LOCALLOC;
3394 case OP_LOAD_MEMBASE:
3395 case OP_LOADI4_MEMBASE:
3396 case OP_LOADU4_MEMBASE:
3397 case OP_LOADU1_MEMBASE:
3398 /* we can do two things: load the immed in a register
3399 * and use an indexed load, or see if the immed can be
3400 * represented as an ad_imm + a load with a smaller offset
3401 * that fits. We just do the first for now, optimize later.
3403 if (arm_is_imm12 (ins->inst_offset))
3405 ADD_NEW_INS (cfg, temp, OP_ICONST);
3406 temp->inst_c0 = ins->inst_offset;
3407 temp->dreg = mono_alloc_ireg (cfg);
3408 ins->sreg2 = temp->dreg;
3409 ins->opcode = map_to_reg_reg_op (ins->opcode);
3411 case OP_LOADI2_MEMBASE:
3412 case OP_LOADU2_MEMBASE:
3413 case OP_LOADI1_MEMBASE:
3414 if (arm_is_imm8 (ins->inst_offset))
3416 ADD_NEW_INS (cfg, temp, OP_ICONST);
3417 temp->inst_c0 = ins->inst_offset;
3418 temp->dreg = mono_alloc_ireg (cfg);
3419 ins->sreg2 = temp->dreg;
3420 ins->opcode = map_to_reg_reg_op (ins->opcode);
3422 case OP_LOADR4_MEMBASE:
3423 case OP_LOADR8_MEMBASE:
3424 if (arm_is_fpimm8 (ins->inst_offset))
3426 low_imm = ins->inst_offset & 0x1ff;
3427 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3428 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3429 temp->inst_imm = ins->inst_offset & ~0x1ff;
3430 temp->sreg1 = ins->inst_basereg;
3431 temp->dreg = mono_alloc_ireg (cfg);
3432 ins->inst_basereg = temp->dreg;
3433 ins->inst_offset = low_imm;
3437 ADD_NEW_INS (cfg, temp, OP_ICONST);
3438 temp->inst_c0 = ins->inst_offset;
3439 temp->dreg = mono_alloc_ireg (cfg);
3441 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3442 add_ins->sreg1 = ins->inst_basereg;
3443 add_ins->sreg2 = temp->dreg;
3444 add_ins->dreg = mono_alloc_ireg (cfg);
3446 ins->inst_basereg = add_ins->dreg;
3447 ins->inst_offset = 0;
3450 case OP_STORE_MEMBASE_REG:
3451 case OP_STOREI4_MEMBASE_REG:
3452 case OP_STOREI1_MEMBASE_REG:
3453 if (arm_is_imm12 (ins->inst_offset))
3455 ADD_NEW_INS (cfg, temp, OP_ICONST);
3456 temp->inst_c0 = ins->inst_offset;
3457 temp->dreg = mono_alloc_ireg (cfg);
3458 ins->sreg2 = temp->dreg;
3459 ins->opcode = map_to_reg_reg_op (ins->opcode);
3461 case OP_STOREI2_MEMBASE_REG:
3462 if (arm_is_imm8 (ins->inst_offset))
3464 ADD_NEW_INS (cfg, temp, OP_ICONST);
3465 temp->inst_c0 = ins->inst_offset;
3466 temp->dreg = mono_alloc_ireg (cfg);
3467 ins->sreg2 = temp->dreg;
3468 ins->opcode = map_to_reg_reg_op (ins->opcode);
3470 case OP_STORER4_MEMBASE_REG:
3471 case OP_STORER8_MEMBASE_REG:
3472 if (arm_is_fpimm8 (ins->inst_offset))
3474 low_imm = ins->inst_offset & 0x1ff;
3475 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3476 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3477 temp->inst_imm = ins->inst_offset & ~0x1ff;
3478 temp->sreg1 = ins->inst_destbasereg;
3479 temp->dreg = mono_alloc_ireg (cfg);
3480 ins->inst_destbasereg = temp->dreg;
3481 ins->inst_offset = low_imm;
3485 ADD_NEW_INS (cfg, temp, OP_ICONST);
3486 temp->inst_c0 = ins->inst_offset;
3487 temp->dreg = mono_alloc_ireg (cfg);
3489 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3490 add_ins->sreg1 = ins->inst_destbasereg;
3491 add_ins->sreg2 = temp->dreg;
3492 add_ins->dreg = mono_alloc_ireg (cfg);
3494 ins->inst_destbasereg = add_ins->dreg;
3495 ins->inst_offset = 0;
3498 case OP_STORE_MEMBASE_IMM:
3499 case OP_STOREI1_MEMBASE_IMM:
3500 case OP_STOREI2_MEMBASE_IMM:
3501 case OP_STOREI4_MEMBASE_IMM:
3502 ADD_NEW_INS (cfg, temp, OP_ICONST);
3503 temp->inst_c0 = ins->inst_imm;
3504 temp->dreg = mono_alloc_ireg (cfg);
3505 ins->sreg1 = temp->dreg;
3506 ins->opcode = map_to_reg_reg_op (ins->opcode);
3508 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3510 gboolean swap = FALSE;
3514 /* Optimized away */
3519 /* Some fp compares require swapped operands */
3520 switch (ins->next->opcode) {
3522 ins->next->opcode = OP_FBLT;
3526 ins->next->opcode = OP_FBLT_UN;
3530 ins->next->opcode = OP_FBGE;
3534 ins->next->opcode = OP_FBGE_UN;
3542 ins->sreg1 = ins->sreg2;
3551 bb->last_ins = last_ins;
3552 bb->max_vreg = cfg->next_vreg;
3556 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3560 if (long_ins->opcode == OP_LNEG) {
3562 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3563 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3569 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3571 /* sreg is a float, dreg is an integer reg */
3573 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3575 ARM_TOSIZD (code, vfp_scratch1, sreg);
3577 ARM_TOUIZD (code, vfp_scratch1, sreg);
3578 ARM_FMRS (code, dreg, vfp_scratch1);
3579 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3583 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3584 else if (size == 2) {
3585 ARM_SHL_IMM (code, dreg, dreg, 16);
3586 ARM_SHR_IMM (code, dreg, dreg, 16);
3590 ARM_SHL_IMM (code, dreg, dreg, 24);
3591 ARM_SAR_IMM (code, dreg, dreg, 24);
3592 } else if (size == 2) {
3593 ARM_SHL_IMM (code, dreg, dreg, 16);
3594 ARM_SAR_IMM (code, dreg, dreg, 16);
3600 #endif /* #ifndef DISABLE_JIT */
3604 const guchar *target;
3609 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3612 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3613 PatchData *pdata = (PatchData*)user_data;
3614 guchar *code = data;
3615 guint32 *thunks = data;
3616 guint32 *endthunks = (guint32*)(code + bsize);
3618 int difflow, diffhigh;
3620 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3621 difflow = (char*)pdata->code - (char*)thunks;
3622 diffhigh = (char*)pdata->code - (char*)endthunks;
3623 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3627 * The thunk is composed of 3 words:
3628 * load constant from thunks [2] into ARM_IP
3631 * Note that the LR register is already setup
3633 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3634 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3635 while (thunks < endthunks) {
3636 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3637 if (thunks [2] == (guint32)pdata->target) {
3638 arm_patch (pdata->code, (guchar*)thunks);
3639 mono_arch_flush_icache (pdata->code, 4);
3642 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3643 /* found a free slot instead: emit thunk */
3644 /* ARMREG_IP is fine to use since this can't be an IMT call
3647 code = (guchar*)thunks;
3648 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3649 if (thumb_supported)
3650 ARM_BX (code, ARMREG_IP);
3652 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3653 thunks [2] = (guint32)pdata->target;
3654 mono_arch_flush_icache ((guchar*)thunks, 12);
3656 arm_patch (pdata->code, (guchar*)thunks);
3657 mono_arch_flush_icache (pdata->code, 4);
3661 /* skip 12 bytes, the size of the thunk */
3665 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3671 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3676 domain = mono_domain_get ();
3679 pdata.target = target;
3680 pdata.absolute = absolute;
3684 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3687 if (pdata.found != 1) {
3688 mono_domain_lock (domain);
3689 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3692 /* this uses the first available slot */
3694 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3696 mono_domain_unlock (domain);
3699 if (pdata.found != 1) {
3701 GHashTableIter iter;
3702 MonoJitDynamicMethodInfo *ji;
3705 * This might be a dynamic method, search its code manager. We can only
3706 * use the dynamic method containing CODE, since the others might be freed later.
3710 mono_domain_lock (domain);
3711 hash = domain_jit_info (domain)->dynamic_code_hash;
3713 /* FIXME: Speed this up */
3714 g_hash_table_iter_init (&iter, hash);
3715 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3716 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3717 if (pdata.found == 1)
3721 mono_domain_unlock (domain);
3723 if (pdata.found != 1)
3724 g_print ("thunk failed for %p from %p\n", target, code);
3725 g_assert (pdata.found == 1);
3729 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3731 guint32 *code32 = (void*)code;
3732 guint32 ins = *code32;
3733 guint32 prim = (ins >> 25) & 7;
3734 guint32 tval = GPOINTER_TO_UINT (target);
3736 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3737 if (prim == 5) { /* 101b */
3738 /* the diff starts 8 bytes from the branch opcode */
3739 gint diff = target - code - 8;
3741 gint tmask = 0xffffffff;
3742 if (tval & 1) { /* entering thumb mode */
3743 diff = target - 1 - code - 8;
3744 g_assert (thumb_supported);
3745 tbits = 0xf << 28; /* bl->blx bit pattern */
3746 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3747 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3751 tmask = ~(1 << 24); /* clear the link bit */
3752 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3757 if (diff <= 33554431) {
3759 ins = (ins & 0xff000000) | diff;
3761 *code32 = ins | tbits;
3765 /* diff between 0 and -33554432 */
3766 if (diff >= -33554432) {
3768 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3770 *code32 = ins | tbits;
3775 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3779 #ifdef USE_JUMP_TABLES
3781 gpointer *jte = mono_jumptable_get_entry (code);
3783 jte [0] = (gpointer) target;
3787 * The alternative call sequences looks like this:
3789 * ldr ip, [pc] // loads the address constant
3790 * b 1f // jumps around the constant
3791 * address constant embedded in the code
3796 * There are two cases for patching:
3797 * a) at the end of method emission: in this case code points to the start
3798 * of the call sequence
3799 * b) during runtime patching of the call site: in this case code points
3800 * to the mov pc, ip instruction
3802 * We have to handle also the thunk jump code sequence:
3806 * address constant // execution never reaches here
3808 if ((ins & 0x0ffffff0) == 0x12fff10) {
3809 /* Branch and exchange: the address is constructed in a reg
3810 * We can patch BX when the code sequence is the following:
3811 * ldr ip, [pc, #0] ; 0x8
3818 guint8 *emit = (guint8*)ccode;
3819 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3821 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3822 ARM_BX (emit, ARMREG_IP);
3824 /*patching from magic trampoline*/
3825 if (ins == ccode [3]) {
3826 g_assert (code32 [-4] == ccode [0]);
3827 g_assert (code32 [-3] == ccode [1]);
3828 g_assert (code32 [-1] == ccode [2]);
3829 code32 [-2] = (guint32)target;
3832 /*patching from JIT*/
3833 if (ins == ccode [0]) {
3834 g_assert (code32 [1] == ccode [1]);
3835 g_assert (code32 [3] == ccode [2]);
3836 g_assert (code32 [4] == ccode [3]);
3837 code32 [2] = (guint32)target;
3840 g_assert_not_reached ();
3841 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3849 guint8 *emit = (guint8*)ccode;
3850 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3852 ARM_BLX_REG (emit, ARMREG_IP);
3854 g_assert (code32 [-3] == ccode [0]);
3855 g_assert (code32 [-2] == ccode [1]);
3856 g_assert (code32 [0] == ccode [2]);
3858 code32 [-1] = (guint32)target;
3861 guint32 *tmp = ccode;
3862 guint8 *emit = (guint8*)tmp;
3863 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3864 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3865 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3866 ARM_BX (emit, ARMREG_IP);
3867 if (ins == ccode [2]) {
3868 g_assert_not_reached (); // should be -2 ...
3869 code32 [-1] = (guint32)target;
3872 if (ins == ccode [0]) {
3873 /* handles both thunk jump code and the far call sequence */
3874 code32 [2] = (guint32)target;
3877 g_assert_not_reached ();
3879 // g_print ("patched with 0x%08x\n", ins);
3884 arm_patch (guchar *code, const guchar *target)
3886 arm_patch_general (NULL, code, target, NULL);
3890 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3891 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3892 * to be used with the emit macros.
3893 * Return -1 otherwise.
3896 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3899 for (i = 0; i < 31; i+= 2) {
3900 res = (val << (32 - i)) | (val >> i);
3903 *rot_amount = i? 32 - i: 0;
3910 * Emits in code a sequence of instructions that load the value 'val'
3911 * into the dreg register. Uses at most 4 instructions.
3914 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3916 int imm8, rot_amount;
3918 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3919 /* skip the constant pool */
3925 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3926 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3927 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3928 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3931 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3933 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3937 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3939 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3941 if (val & 0xFF0000) {
3942 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3944 if (val & 0xFF000000) {
3945 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3947 } else if (val & 0xFF00) {
3948 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3949 if (val & 0xFF0000) {
3950 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3952 if (val & 0xFF000000) {
3953 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3955 } else if (val & 0xFF0000) {
3956 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3957 if (val & 0xFF000000) {
3958 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3961 //g_assert_not_reached ();
3967 mono_arm_thumb_supported (void)
3969 return thumb_supported;
3975 * emit_load_volatile_arguments:
3977 * Load volatile arguments from the stack to the original input registers.
3978 * Required before a tail call.
3981 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3983 MonoMethod *method = cfg->method;
3984 MonoMethodSignature *sig;
3989 /* FIXME: Generate intermediate code instead */
3991 sig = mono_method_signature (method);
3993 /* This is the opposite of the code in emit_prolog */
3997 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3999 if (cinfo->vtype_retaddr) {
4000 ArgInfo *ainfo = &cinfo->ret;
4001 inst = cfg->vret_addr;
4002 g_assert (arm_is_imm12 (inst->inst_offset));
4003 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4005 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4006 ArgInfo *ainfo = cinfo->args + i;
4007 inst = cfg->args [pos];
4009 if (cfg->verbose_level > 2)
4010 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4011 if (inst->opcode == OP_REGVAR) {
4012 if (ainfo->storage == RegTypeGeneral)
4013 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4014 else if (ainfo->storage == RegTypeFP) {
4015 g_assert_not_reached ();
4016 } else if (ainfo->storage == RegTypeBase) {
4020 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4021 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4023 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4024 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4028 g_assert_not_reached ();
4030 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4031 switch (ainfo->size) {
4038 g_assert (arm_is_imm12 (inst->inst_offset));
4039 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4040 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4041 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4044 if (arm_is_imm12 (inst->inst_offset)) {
4045 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4047 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4048 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4052 } else if (ainfo->storage == RegTypeBaseGen) {
4055 } else if (ainfo->storage == RegTypeBase) {
4057 } else if (ainfo->storage == RegTypeFP) {
4058 g_assert_not_reached ();
4059 } else if (ainfo->storage == RegTypeStructByVal) {
4060 int doffset = inst->inst_offset;
4064 if (mono_class_from_mono_type (inst->inst_vtype))
4065 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4066 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4067 if (arm_is_imm12 (doffset)) {
4068 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4070 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4071 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4073 soffset += sizeof (gpointer);
4074 doffset += sizeof (gpointer);
4079 } else if (ainfo->storage == RegTypeStructByAddr) {
4094 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4099 guint8 *code = cfg->native_code + cfg->code_len;
4100 MonoInst *last_ins = NULL;
4101 guint last_offset = 0;
4103 int imm8, rot_amount;
4105 /* we don't align basic blocks of loops on arm */
4107 if (cfg->verbose_level > 2)
4108 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4110 cpos = bb->max_offset;
4112 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4113 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4114 //g_assert (!mono_compile_aot);
4117 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4118 /* this is not thread save, but good enough */
4119 /* fixme: howto handle overflows? */
4120 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4123 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4124 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4125 (gpointer)"mono_break");
4126 code = emit_call_seq (cfg, code);
4129 MONO_BB_FOR_EACH_INS (bb, ins) {
4130 offset = code - cfg->native_code;
4132 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4134 if (offset > (cfg->code_size - max_len - 16)) {
4135 cfg->code_size *= 2;
4136 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4137 code = cfg->native_code + offset;
4139 // if (ins->cil_code)
4140 // g_print ("cil code\n");
4141 mono_debug_record_line_number (cfg, ins, offset);
4143 switch (ins->opcode) {
4144 case OP_MEMORY_BARRIER:
4146 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4147 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4151 #ifdef HAVE_AEABI_READ_TP
4152 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4153 (gpointer)"__aeabi_read_tp");
4154 code = emit_call_seq (cfg, code);
4156 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4158 g_assert_not_reached ();
4161 case OP_ATOMIC_EXCHANGE_I4:
4162 case OP_ATOMIC_CAS_I4:
4163 case OP_ATOMIC_ADD_NEW_I4: {
4167 g_assert (v7_supported);
4170 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4172 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4174 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4178 g_assert (cfg->arch.atomic_tmp_offset != -1);
4179 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4181 switch (ins->opcode) {
4182 case OP_ATOMIC_EXCHANGE_I4:
4184 ARM_DMB (code, ARM_DMB_SY);
4185 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4186 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4187 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4189 ARM_B_COND (code, ARMCOND_NE, 0);
4190 arm_patch (buf [1], buf [0]);
4192 case OP_ATOMIC_CAS_I4:
4193 ARM_DMB (code, ARM_DMB_SY);
4195 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4196 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4198 ARM_B_COND (code, ARMCOND_NE, 0);
4199 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4200 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4202 ARM_B_COND (code, ARMCOND_NE, 0);
4203 arm_patch (buf [2], buf [1]);
4204 arm_patch (buf [1], code);
4206 case OP_ATOMIC_ADD_NEW_I4:
4208 ARM_DMB (code, ARM_DMB_SY);
4209 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4210 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4211 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4212 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4214 ARM_B_COND (code, ARMCOND_NE, 0);
4215 arm_patch (buf [1], buf [0]);
4218 g_assert_not_reached ();
4221 ARM_DMB (code, ARM_DMB_SY);
4222 if (tmpreg != ins->dreg)
4223 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4224 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4229 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4230 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4233 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4234 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4236 case OP_STOREI1_MEMBASE_IMM:
4237 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4238 g_assert (arm_is_imm12 (ins->inst_offset));
4239 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4241 case OP_STOREI2_MEMBASE_IMM:
4242 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4243 g_assert (arm_is_imm8 (ins->inst_offset));
4244 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4246 case OP_STORE_MEMBASE_IMM:
4247 case OP_STOREI4_MEMBASE_IMM:
4248 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4249 g_assert (arm_is_imm12 (ins->inst_offset));
4250 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4252 case OP_STOREI1_MEMBASE_REG:
4253 g_assert (arm_is_imm12 (ins->inst_offset));
4254 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4256 case OP_STOREI2_MEMBASE_REG:
4257 g_assert (arm_is_imm8 (ins->inst_offset));
4258 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4260 case OP_STORE_MEMBASE_REG:
4261 case OP_STOREI4_MEMBASE_REG:
4262 /* this case is special, since it happens for spill code after lowering has been called */
4263 if (arm_is_imm12 (ins->inst_offset)) {
4264 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4266 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4267 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4270 case OP_STOREI1_MEMINDEX:
4271 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4273 case OP_STOREI2_MEMINDEX:
4274 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4276 case OP_STORE_MEMINDEX:
4277 case OP_STOREI4_MEMINDEX:
4278 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4281 g_assert_not_reached ();
4283 case OP_LOAD_MEMINDEX:
4284 case OP_LOADI4_MEMINDEX:
4285 case OP_LOADU4_MEMINDEX:
4286 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4288 case OP_LOADI1_MEMINDEX:
4289 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4291 case OP_LOADU1_MEMINDEX:
4292 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4294 case OP_LOADI2_MEMINDEX:
4295 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4297 case OP_LOADU2_MEMINDEX:
4298 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4300 case OP_LOAD_MEMBASE:
4301 case OP_LOADI4_MEMBASE:
4302 case OP_LOADU4_MEMBASE:
4303 /* this case is special, since it happens for spill code after lowering has been called */
4304 if (arm_is_imm12 (ins->inst_offset)) {
4305 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4307 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4308 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4311 case OP_LOADI1_MEMBASE:
4312 g_assert (arm_is_imm8 (ins->inst_offset));
4313 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4315 case OP_LOADU1_MEMBASE:
4316 g_assert (arm_is_imm12 (ins->inst_offset));
4317 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4319 case OP_LOADU2_MEMBASE:
4320 g_assert (arm_is_imm8 (ins->inst_offset));
4321 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4323 case OP_LOADI2_MEMBASE:
4324 g_assert (arm_is_imm8 (ins->inst_offset));
4325 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4327 case OP_ICONV_TO_I1:
4328 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4329 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4331 case OP_ICONV_TO_I2:
4332 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4333 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4335 case OP_ICONV_TO_U1:
4336 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4338 case OP_ICONV_TO_U2:
4339 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4340 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4344 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4346 case OP_COMPARE_IMM:
4347 case OP_ICOMPARE_IMM:
4348 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4349 g_assert (imm8 >= 0);
4350 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4354 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4355 * So instead of emitting a trap, we emit a call a C function and place a
4358 //*(int*)code = 0xef9f0001;
4361 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4362 (gpointer)"mono_break");
4363 code = emit_call_seq (cfg, code);
4365 case OP_RELAXED_NOP:
4370 case OP_DUMMY_STORE:
4371 case OP_DUMMY_ICONST:
4372 case OP_DUMMY_R8CONST:
4373 case OP_NOT_REACHED:
4376 case OP_SEQ_POINT: {
4378 MonoInst *info_var = cfg->arch.seq_point_info_var;
4379 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4380 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4381 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4382 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4384 int dreg = ARMREG_LR;
4386 if (cfg->soft_breakpoints) {
4387 g_assert (!cfg->compile_aot);
4391 * For AOT, we use one got slot per method, which will point to a
4392 * SeqPointInfo structure, containing all the information required
4393 * by the code below.
4395 if (cfg->compile_aot) {
4396 g_assert (info_var);
4397 g_assert (info_var->opcode == OP_REGOFFSET);
4398 g_assert (arm_is_imm12 (info_var->inst_offset));
4401 if (!cfg->soft_breakpoints) {
4403 * Read from the single stepping trigger page. This will cause a
4404 * SIGSEGV when single stepping is enabled.
4405 * We do this _before_ the breakpoint, so single stepping after
4406 * a breakpoint is hit will step to the next IL offset.
4408 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4411 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4412 if (cfg->soft_breakpoints) {
4413 /* Load the address of the sequence point trigger variable. */
4416 g_assert (var->opcode == OP_REGOFFSET);
4417 g_assert (arm_is_imm12 (var->inst_offset));
4418 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4420 /* Read the value and check whether it is non-zero. */
4421 ARM_LDR_IMM (code, dreg, dreg, 0);
4422 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4424 /* Load the address of the sequence point method. */
4425 var = ss_method_var;
4427 g_assert (var->opcode == OP_REGOFFSET);
4428 g_assert (arm_is_imm12 (var->inst_offset));
4429 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4431 /* Call it conditionally. */
4432 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4434 if (cfg->compile_aot) {
4435 /* Load the trigger page addr from the variable initialized in the prolog */
4436 var = ss_trigger_page_var;
4438 g_assert (var->opcode == OP_REGOFFSET);
4439 g_assert (arm_is_imm12 (var->inst_offset));
4440 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4442 #ifdef USE_JUMP_TABLES
4443 gpointer *jte = mono_jumptable_add_entry ();
4444 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4445 jte [0] = ss_trigger_page;
4447 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4449 *(int*)code = (int)ss_trigger_page;
4453 ARM_LDR_IMM (code, dreg, dreg, 0);
4457 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4459 if (cfg->soft_breakpoints) {
4460 /* Load the address of the breakpoint method into ip. */
4461 var = bp_method_var;
4463 g_assert (var->opcode == OP_REGOFFSET);
4464 g_assert (arm_is_imm12 (var->inst_offset));
4465 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4468 * A placeholder for a possible breakpoint inserted by
4469 * mono_arch_set_breakpoint ().
4472 } else if (cfg->compile_aot) {
4473 guint32 offset = code - cfg->native_code;
4476 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4477 /* Add the offset */
4478 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4479 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4480 if (arm_is_imm12 ((int)val)) {
4481 ARM_LDR_IMM (code, dreg, dreg, val);
4483 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4485 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4487 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4488 g_assert (!(val & 0xFF000000));
4490 ARM_LDR_IMM (code, dreg, dreg, 0);
4492 /* What is faster, a branch or a load ? */
4493 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4494 /* The breakpoint instruction */
4495 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4498 * A placeholder for a possible breakpoint inserted by
4499 * mono_arch_set_breakpoint ().
4501 for (i = 0; i < 4; ++i)
4508 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4511 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4515 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4518 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4519 g_assert (imm8 >= 0);
4520 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4524 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4525 g_assert (imm8 >= 0);
4526 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4530 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4531 g_assert (imm8 >= 0);
4532 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4535 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4536 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4538 case OP_IADD_OVF_UN:
4539 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4540 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4543 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4544 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4546 case OP_ISUB_OVF_UN:
4547 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4548 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4550 case OP_ADD_OVF_CARRY:
4551 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4552 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4554 case OP_ADD_OVF_UN_CARRY:
4555 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4556 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4558 case OP_SUB_OVF_CARRY:
4559 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4560 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4562 case OP_SUB_OVF_UN_CARRY:
4563 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4564 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4568 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4571 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4572 g_assert (imm8 >= 0);
4573 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4576 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4580 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4584 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4585 g_assert (imm8 >= 0);
4586 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4590 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4591 g_assert (imm8 >= 0);
4592 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4594 case OP_ARM_RSBS_IMM:
4595 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4596 g_assert (imm8 >= 0);
4597 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4599 case OP_ARM_RSC_IMM:
4600 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4601 g_assert (imm8 >= 0);
4602 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4605 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4609 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4610 g_assert (imm8 >= 0);
4611 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4614 g_assert (v7s_supported);
4615 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4618 g_assert (v7s_supported);
4619 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4622 g_assert (v7s_supported);
4623 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4624 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4627 g_assert (v7s_supported);
4628 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4629 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4633 g_assert_not_reached ();
4635 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4639 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4640 g_assert (imm8 >= 0);
4641 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4644 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4648 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4649 g_assert (imm8 >= 0);
4650 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4653 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4658 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4659 else if (ins->dreg != ins->sreg1)
4660 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4663 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4668 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4669 else if (ins->dreg != ins->sreg1)
4670 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4673 case OP_ISHR_UN_IMM:
4675 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4676 else if (ins->dreg != ins->sreg1)
4677 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4680 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4683 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4686 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4689 if (ins->dreg == ins->sreg2)
4690 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4692 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4695 g_assert_not_reached ();
4698 /* FIXME: handle ovf/ sreg2 != dreg */
4699 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4700 /* FIXME: MUL doesn't set the C/O flags on ARM */
4702 case OP_IMUL_OVF_UN:
4703 /* FIXME: handle ovf/ sreg2 != dreg */
4704 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4705 /* FIXME: MUL doesn't set the C/O flags on ARM */
4708 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4711 /* Load the GOT offset */
4712 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4713 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4715 *(gpointer*)code = NULL;
4717 /* Load the value from the GOT */
4718 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4720 case OP_OBJC_GET_SELECTOR:
4721 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4722 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4724 *(gpointer*)code = NULL;
4726 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4728 case OP_ICONV_TO_I4:
4729 case OP_ICONV_TO_U4:
4731 if (ins->dreg != ins->sreg1)
4732 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4735 int saved = ins->sreg2;
4736 if (ins->sreg2 == ARM_LSW_REG) {
4737 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4740 if (ins->sreg1 != ARM_LSW_REG)
4741 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4742 if (saved != ARM_MSW_REG)
4743 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4748 ARM_CPYD (code, ins->dreg, ins->sreg1);
4750 case OP_FCONV_TO_R4:
4752 ARM_CVTD (code, ins->dreg, ins->sreg1);
4753 ARM_CVTS (code, ins->dreg, ins->dreg);
4758 * Keep in sync with mono_arch_emit_epilog
4760 g_assert (!cfg->method->save_lmf);
4762 code = emit_load_volatile_arguments (cfg, code);
4764 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4766 if (cfg->used_int_regs)
4767 ARM_POP (code, cfg->used_int_regs);
4768 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4770 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4772 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4773 if (cfg->compile_aot) {
4774 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4776 *(gpointer*)code = NULL;
4778 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4780 code = mono_arm_patchable_b (code, ARMCOND_AL);
4784 MonoCallInst *call = (MonoCallInst*)ins;
4787 * The stack looks like the following:
4788 * <caller argument area>
4791 * <callee argument area>
4792 * Need to copy the arguments from the callee argument area to
4793 * the caller argument area, and pop the frame.
4795 if (call->stack_usage) {
4796 int i, prev_sp_offset = 0;
4798 /* Compute size of saved registers restored below */
4800 prev_sp_offset = 2 * 4;
4802 prev_sp_offset = 1 * 4;
4803 for (i = 0; i < 16; ++i) {
4804 if (cfg->used_int_regs & (1 << i))
4805 prev_sp_offset += 4;
4808 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4810 /* Copy arguments on the stack to our argument area */
4811 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4812 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4813 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4818 * Keep in sync with mono_arch_emit_epilog
4820 g_assert (!cfg->method->save_lmf);
4822 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4824 if (cfg->used_int_regs)
4825 ARM_POP (code, cfg->used_int_regs);
4826 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4828 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4831 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4832 if (cfg->compile_aot) {
4833 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4835 *(gpointer*)code = NULL;
4837 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4839 code = mono_arm_patchable_b (code, ARMCOND_AL);
4844 /* ensure ins->sreg1 is not NULL */
4845 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4848 g_assert (cfg->sig_cookie < 128);
4849 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4850 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4859 call = (MonoCallInst*)ins;
4862 code = emit_float_args (cfg, call, code, &max_len, &offset);
4864 if (ins->flags & MONO_INST_HAS_METHOD)
4865 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4867 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4868 code = emit_call_seq (cfg, code);
4869 ins->flags |= MONO_INST_GC_CALLSITE;
4870 ins->backend.pc_offset = code - cfg->native_code;
4871 code = emit_move_return_value (cfg, ins, code);
4877 case OP_VOIDCALL_REG:
4880 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4882 code = emit_call_reg (code, ins->sreg1);
4883 ins->flags |= MONO_INST_GC_CALLSITE;
4884 ins->backend.pc_offset = code - cfg->native_code;
4885 code = emit_move_return_value (cfg, ins, code);
4887 case OP_FCALL_MEMBASE:
4888 case OP_LCALL_MEMBASE:
4889 case OP_VCALL_MEMBASE:
4890 case OP_VCALL2_MEMBASE:
4891 case OP_VOIDCALL_MEMBASE:
4892 case OP_CALL_MEMBASE: {
4893 gboolean imt_arg = FALSE;
4895 g_assert (ins->sreg1 != ARMREG_LR);
4896 call = (MonoCallInst*)ins;
4899 code = emit_float_args (cfg, call, code, &max_len, &offset);
4901 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4903 if (!arm_is_imm12 (ins->inst_offset))
4904 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4905 #ifdef USE_JUMP_TABLES
4911 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4913 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4915 if (!arm_is_imm12 (ins->inst_offset))
4916 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4918 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4921 * We can't embed the method in the code stream in PIC code, or
4923 * Instead, we put it in V5 in code emitted by
4924 * mono_arch_emit_imt_argument (), and embed NULL here to
4925 * signal the IMT thunk that the value is in V5.
4927 #ifdef USE_JUMP_TABLES
4928 /* In case of jumptables we always use value in V5. */
4931 if (call->dynamic_imt_arg)
4932 *((gpointer*)code) = NULL;
4934 *((gpointer*)code) = (gpointer)call->method;
4938 ins->flags |= MONO_INST_GC_CALLSITE;
4939 ins->backend.pc_offset = code - cfg->native_code;
4940 code = emit_move_return_value (cfg, ins, code);
4944 /* keep alignment */
4945 int alloca_waste = cfg->param_area;
4948 /* round the size to 8 bytes */
4949 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4950 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4952 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4953 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4954 /* memzero the area: dreg holds the size, sp is the pointer */
4955 if (ins->flags & MONO_INST_INIT) {
4956 guint8 *start_loop, *branch_to_cond;
4957 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4958 branch_to_cond = code;
4961 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4962 arm_patch (branch_to_cond, code);
4963 /* decrement by 4 and set flags */
4964 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4965 ARM_B_COND (code, ARMCOND_GE, 0);
4966 arm_patch (code - 4, start_loop);
4968 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4973 MonoInst *var = cfg->dyn_call_var;
4975 g_assert (var->opcode == OP_REGOFFSET);
4976 g_assert (arm_is_imm12 (var->inst_offset));
4978 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4979 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4981 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4983 /* Save args buffer */
4984 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4986 /* Set stack slots using R0 as scratch reg */
4987 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4988 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4989 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4990 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4993 /* Set argument registers */
4994 for (i = 0; i < PARAM_REGS; ++i)
4995 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4998 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4999 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5002 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5003 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
5004 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
5008 if (ins->sreg1 != ARMREG_R0)
5009 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5010 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5011 (gpointer)"mono_arch_throw_exception");
5012 code = emit_call_seq (cfg, code);
5016 if (ins->sreg1 != ARMREG_R0)
5017 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5018 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5019 (gpointer)"mono_arch_rethrow_exception");
5020 code = emit_call_seq (cfg, code);
5023 case OP_START_HANDLER: {
5024 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5027 /* Reserve a param area, see filter-stack.exe */
5028 if (cfg->param_area) {
5029 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5030 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5032 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5033 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5037 if (arm_is_imm12 (spvar->inst_offset)) {
5038 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5040 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5041 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5045 case OP_ENDFILTER: {
5046 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5049 /* Free the param area */
5050 if (cfg->param_area) {
5051 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5052 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5054 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5055 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5059 if (ins->sreg1 != ARMREG_R0)
5060 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5061 if (arm_is_imm12 (spvar->inst_offset)) {
5062 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5064 g_assert (ARMREG_IP != spvar->inst_basereg);
5065 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5066 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5068 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5071 case OP_ENDFINALLY: {
5072 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5075 /* Free the param area */
5076 if (cfg->param_area) {
5077 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5078 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5080 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5081 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5085 if (arm_is_imm12 (spvar->inst_offset)) {
5086 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5088 g_assert (ARMREG_IP != spvar->inst_basereg);
5089 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5090 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5092 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5095 case OP_CALL_HANDLER:
5096 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5097 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5098 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5101 ins->inst_c0 = code - cfg->native_code;
5104 /*if (ins->inst_target_bb->native_offset) {
5106 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5108 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5109 code = mono_arm_patchable_b (code, ARMCOND_AL);
5113 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5117 * In the normal case we have:
5118 * ldr pc, [pc, ins->sreg1 << 2]
5121 * ldr lr, [pc, ins->sreg1 << 2]
5123 * After follows the data.
5124 * FIXME: add aot support.
5126 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5127 #ifdef USE_JUMP_TABLES
5129 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5130 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5131 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5135 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5136 if (offset + max_len > (cfg->code_size - 16)) {
5137 cfg->code_size += max_len;
5138 cfg->code_size *= 2;
5139 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5140 code = cfg->native_code + offset;
5142 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5144 code += 4 * GPOINTER_TO_INT (ins->klass);
5149 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5150 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5154 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5155 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5159 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5160 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5164 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5165 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5169 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5170 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5172 case OP_COND_EXC_EQ:
5173 case OP_COND_EXC_NE_UN:
5174 case OP_COND_EXC_LT:
5175 case OP_COND_EXC_LT_UN:
5176 case OP_COND_EXC_GT:
5177 case OP_COND_EXC_GT_UN:
5178 case OP_COND_EXC_GE:
5179 case OP_COND_EXC_GE_UN:
5180 case OP_COND_EXC_LE:
5181 case OP_COND_EXC_LE_UN:
5182 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5184 case OP_COND_EXC_IEQ:
5185 case OP_COND_EXC_INE_UN:
5186 case OP_COND_EXC_ILT:
5187 case OP_COND_EXC_ILT_UN:
5188 case OP_COND_EXC_IGT:
5189 case OP_COND_EXC_IGT_UN:
5190 case OP_COND_EXC_IGE:
5191 case OP_COND_EXC_IGE_UN:
5192 case OP_COND_EXC_ILE:
5193 case OP_COND_EXC_ILE_UN:
5194 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5197 case OP_COND_EXC_IC:
5198 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5200 case OP_COND_EXC_OV:
5201 case OP_COND_EXC_IOV:
5202 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5204 case OP_COND_EXC_NC:
5205 case OP_COND_EXC_INC:
5206 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5208 case OP_COND_EXC_NO:
5209 case OP_COND_EXC_INO:
5210 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5222 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5225 /* floating point opcodes */
5227 if (cfg->compile_aot) {
5228 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5230 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5232 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5235 /* FIXME: we can optimize the imm load by dealing with part of
5236 * the displacement in LDFD (aligning to 512).
5238 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5239 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5243 if (cfg->compile_aot) {
5244 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5246 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5248 ARM_CVTS (code, ins->dreg, ins->dreg);
5250 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5251 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5252 ARM_CVTS (code, ins->dreg, ins->dreg);
5255 case OP_STORER8_MEMBASE_REG:
5256 /* This is generated by the local regalloc pass which runs after the lowering pass */
5257 if (!arm_is_fpimm8 (ins->inst_offset)) {
5258 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5259 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5260 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5262 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5265 case OP_LOADR8_MEMBASE:
5266 /* This is generated by the local regalloc pass which runs after the lowering pass */
5267 if (!arm_is_fpimm8 (ins->inst_offset)) {
5268 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5269 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5270 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5272 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5275 case OP_STORER4_MEMBASE_REG:
5276 g_assert (arm_is_fpimm8 (ins->inst_offset));
5277 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5278 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5279 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5280 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5282 case OP_LOADR4_MEMBASE:
5283 g_assert (arm_is_fpimm8 (ins->inst_offset));
5284 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5285 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5286 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5287 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5289 case OP_ICONV_TO_R_UN: {
5290 g_assert_not_reached ();
5293 case OP_ICONV_TO_R4:
5294 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5295 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5296 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5297 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5298 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5300 case OP_ICONV_TO_R8:
5301 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5302 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5303 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5304 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5308 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5309 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5311 if (!IS_HARD_FLOAT) {
5312 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5315 if (IS_HARD_FLOAT) {
5316 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5318 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5322 case OP_FCONV_TO_I1:
5323 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5325 case OP_FCONV_TO_U1:
5326 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5328 case OP_FCONV_TO_I2:
5329 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5331 case OP_FCONV_TO_U2:
5332 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5334 case OP_FCONV_TO_I4:
5336 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5338 case OP_FCONV_TO_U4:
5340 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5342 case OP_FCONV_TO_I8:
5343 case OP_FCONV_TO_U8:
5344 g_assert_not_reached ();
5345 /* Implemented as helper calls */
5347 case OP_LCONV_TO_R_UN:
5348 g_assert_not_reached ();
5349 /* Implemented as helper calls */
5351 case OP_LCONV_TO_OVF_I4_2: {
5352 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5354 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5357 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5358 high_bit_not_set = code;
5359 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5361 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5362 valid_negative = code;
5363 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5364 invalid_negative = code;
5365 ARM_B_COND (code, ARMCOND_AL, 0);
5367 arm_patch (high_bit_not_set, code);
5369 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5370 valid_positive = code;
5371 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5373 arm_patch (invalid_negative, code);
5374 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5376 arm_patch (valid_negative, code);
5377 arm_patch (valid_positive, code);
5379 if (ins->dreg != ins->sreg1)
5380 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5384 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5387 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5390 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5393 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5396 ARM_NEGD (code, ins->dreg, ins->sreg1);
5400 g_assert_not_reached ();
5404 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5410 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5413 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5414 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5418 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5421 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5422 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5426 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5429 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5430 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5431 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5435 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5438 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5439 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5443 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5446 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5447 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5448 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5450 /* ARM FPA flags table:
5451 * N Less than ARMCOND_MI
5452 * Z Equal ARMCOND_EQ
5453 * C Greater Than or Equal ARMCOND_CS
5454 * V Unordered ARMCOND_VS
5457 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5460 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5463 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5466 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5467 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5473 g_assert_not_reached ();
5477 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5479 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5480 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5481 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5485 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5486 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5491 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5492 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5494 #ifdef USE_JUMP_TABLES
5496 gpointer *jte = mono_jumptable_add_entries (2);
5497 jte [0] = GUINT_TO_POINTER (0xffffffff);
5498 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5499 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5500 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5503 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5504 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5506 *(guint32*)code = 0xffffffff;
5508 *(guint32*)code = 0x7fefffff;
5511 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5513 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5514 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5516 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5517 ARM_CPYD (code, ins->dreg, ins->sreg1);
5519 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5520 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5525 case OP_GC_LIVENESS_DEF:
5526 case OP_GC_LIVENESS_USE:
5527 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5528 ins->backend.pc_offset = code - cfg->native_code;
5530 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5531 ins->backend.pc_offset = code - cfg->native_code;
5532 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5536 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5537 g_assert_not_reached ();
5540 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5541 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5542 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5543 g_assert_not_reached ();
5549 last_offset = offset;
5552 cfg->code_len = code - cfg->native_code;
5555 #endif /* DISABLE_JIT */
5557 #ifdef HAVE_AEABI_READ_TP
5558 void __aeabi_read_tp (void);
5562 mono_arch_register_lowlevel_calls (void)
5564 /* The signature doesn't matter */
5565 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5566 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5568 #ifndef MONO_CROSS_COMPILE
5569 #ifdef HAVE_AEABI_READ_TP
5570 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5575 #define patch_lis_ori(ip,val) do {\
5576 guint16 *__lis_ori = (guint16*)(ip); \
5577 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5578 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5582 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5584 MonoJumpInfo *patch_info;
5585 gboolean compile_aot = !run_cctors;
5587 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5588 unsigned char *ip = patch_info->ip.i + code;
5589 const unsigned char *target;
5591 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5592 #ifdef USE_JUMP_TABLES
5593 gpointer *jt = mono_jumptable_get_entry (ip);
5595 gpointer *jt = (gpointer*)(ip + 8);
5598 /* jt is the inlined jump table, 2 instructions after ip
5599 * In the normal case we store the absolute addresses,
5600 * otherwise the displacements.
5602 for (i = 0; i < patch_info->data.table->table_size; i++)
5603 jt [i] = code + (int)patch_info->data.table->table [i];
5608 switch (patch_info->type) {
5609 case MONO_PATCH_INFO_BB:
5610 case MONO_PATCH_INFO_LABEL:
5613 /* No need to patch these */
5618 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5620 switch (patch_info->type) {
5621 case MONO_PATCH_INFO_IP:
5622 g_assert_not_reached ();
5623 patch_lis_ori (ip, ip);
5625 case MONO_PATCH_INFO_METHOD_REL:
5626 g_assert_not_reached ();
5627 *((gpointer *)(ip)) = code + patch_info->data.offset;
5629 case MONO_PATCH_INFO_METHODCONST:
5630 case MONO_PATCH_INFO_CLASS:
5631 case MONO_PATCH_INFO_IMAGE:
5632 case MONO_PATCH_INFO_FIELD:
5633 case MONO_PATCH_INFO_VTABLE:
5634 case MONO_PATCH_INFO_IID:
5635 case MONO_PATCH_INFO_SFLDA:
5636 case MONO_PATCH_INFO_LDSTR:
5637 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5638 case MONO_PATCH_INFO_LDTOKEN:
5639 g_assert_not_reached ();
5640 /* from OP_AOTCONST : lis + ori */
5641 patch_lis_ori (ip, target);
5643 case MONO_PATCH_INFO_R4:
5644 case MONO_PATCH_INFO_R8:
5645 g_assert_not_reached ();
5646 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5648 case MONO_PATCH_INFO_EXC_NAME:
5649 g_assert_not_reached ();
5650 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5652 case MONO_PATCH_INFO_NONE:
5653 case MONO_PATCH_INFO_BB_OVF:
5654 case MONO_PATCH_INFO_EXC_OVF:
5655 /* everything is dealt with at epilog output time */
5660 arm_patch_general (domain, ip, target, dyn_code_mp);
5667 * Stack frame layout:
5669 * ------------------- fp
5670 * MonoLMF structure or saved registers
5671 * -------------------
5673 * -------------------
5675 * -------------------
5676 * optional 8 bytes for tracing
5677 * -------------------
5678 * param area size is cfg->param_area
5679 * ------------------- sp
5682 mono_arch_emit_prolog (MonoCompile *cfg)
5684 MonoMethod *method = cfg->method;
5686 MonoMethodSignature *sig;
5688 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5693 int prev_sp_offset, reg_offset;
5695 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5698 sig = mono_method_signature (method);
5699 cfg->code_size = 256 + sig->param_count * 64;
5700 code = cfg->native_code = g_malloc (cfg->code_size);
5702 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5704 alloc_size = cfg->stack_offset;
5710 * The iphone uses R7 as the frame pointer, and it points at the saved
5715 * We can't use r7 as a frame pointer since it points into the middle of
5716 * the frame, so we keep using our own frame pointer.
5717 * FIXME: Optimize this.
5719 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5720 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5721 prev_sp_offset += 8; /* r7 and lr */
5722 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5723 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5726 if (!method->save_lmf) {
5728 /* No need to push LR again */
5729 if (cfg->used_int_regs)
5730 ARM_PUSH (code, cfg->used_int_regs);
5732 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5733 prev_sp_offset += 4;
5735 for (i = 0; i < 16; ++i) {
5736 if (cfg->used_int_regs & (1 << i))
5737 prev_sp_offset += 4;
5739 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5741 for (i = 0; i < 16; ++i) {
5742 if ((cfg->used_int_regs & (1 << i))) {
5743 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5744 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5749 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5750 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5752 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5753 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5756 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5757 ARM_PUSH (code, 0x5ff0);
5758 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5759 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5761 for (i = 0; i < 16; ++i) {
5762 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5763 /* The original r7 is saved at the start */
5764 if (!(iphone_abi && i == ARMREG_R7))
5765 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5769 g_assert (reg_offset == 4 * 10);
5770 pos += sizeof (MonoLMF) - (4 * 10);
5774 orig_alloc_size = alloc_size;
5775 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5776 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5777 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5778 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5781 /* the stack used in the pushed regs */
5782 if (prev_sp_offset & 4)
5784 cfg->stack_usage = alloc_size;
5786 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5787 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5789 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5790 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5792 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5794 if (cfg->frame_reg != ARMREG_SP) {
5795 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5796 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5798 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5799 prev_sp_offset += alloc_size;
5801 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5802 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5804 /* compute max_offset in order to use short forward jumps
5805 * we could skip do it on arm because the immediate displacement
5806 * for jumps is large enough, it may be useful later for constant pools
5809 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5810 MonoInst *ins = bb->code;
5811 bb->max_offset = max_offset;
5813 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5816 MONO_BB_FOR_EACH_INS (bb, ins)
5817 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5820 /* store runtime generic context */
5821 if (cfg->rgctx_var) {
5822 MonoInst *ins = cfg->rgctx_var;
5824 g_assert (ins->opcode == OP_REGOFFSET);
5826 if (arm_is_imm12 (ins->inst_offset)) {
5827 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5829 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5830 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5834 /* load arguments allocated to register from the stack */
5837 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5839 if (cinfo->vtype_retaddr) {
5840 ArgInfo *ainfo = &cinfo->ret;
5841 inst = cfg->vret_addr;
5842 g_assert (arm_is_imm12 (inst->inst_offset));
5843 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5846 if (sig->call_convention == MONO_CALL_VARARG) {
5847 ArgInfo *cookie = &cinfo->sig_cookie;
5849 /* Save the sig cookie address */
5850 g_assert (cookie->storage == RegTypeBase);
5852 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5853 g_assert (arm_is_imm12 (cfg->sig_cookie));
5854 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5855 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5858 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5859 ArgInfo *ainfo = cinfo->args + i;
5860 inst = cfg->args [pos];
5862 if (cfg->verbose_level > 2)
5863 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5864 if (inst->opcode == OP_REGVAR) {
5865 if (ainfo->storage == RegTypeGeneral)
5866 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5867 else if (ainfo->storage == RegTypeFP) {
5868 g_assert_not_reached ();
5869 } else if (ainfo->storage == RegTypeBase) {
5870 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5871 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5873 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5874 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5877 g_assert_not_reached ();
5879 if (cfg->verbose_level > 2)
5880 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5882 /* the argument should be put on the stack: FIXME handle size != word */
5883 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5884 switch (ainfo->size) {
5886 if (arm_is_imm12 (inst->inst_offset))
5887 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5889 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5890 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5894 if (arm_is_imm8 (inst->inst_offset)) {
5895 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5897 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5898 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5902 if (arm_is_imm12 (inst->inst_offset)) {
5903 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5905 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5906 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5908 if (arm_is_imm12 (inst->inst_offset + 4)) {
5909 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5911 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5912 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5916 if (arm_is_imm12 (inst->inst_offset)) {
5917 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5919 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5920 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5924 } else if (ainfo->storage == RegTypeBaseGen) {
5925 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5926 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5928 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5929 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5931 if (arm_is_imm12 (inst->inst_offset + 4)) {
5932 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5933 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5935 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5936 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5937 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5938 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5940 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5941 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5942 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5944 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5945 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5948 switch (ainfo->size) {
5950 if (arm_is_imm8 (inst->inst_offset)) {
5951 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5953 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5954 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5958 if (arm_is_imm8 (inst->inst_offset)) {
5959 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5961 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5962 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5966 if (arm_is_imm12 (inst->inst_offset)) {
5967 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5969 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5970 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5972 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5973 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5975 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5976 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5978 if (arm_is_imm12 (inst->inst_offset + 4)) {
5979 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5981 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5982 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5986 if (arm_is_imm12 (inst->inst_offset)) {
5987 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5989 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5990 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5994 } else if (ainfo->storage == RegTypeFP) {
5995 int imm8, rot_amount;
5997 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
5998 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5999 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6001 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6003 if (ainfo->size == 8)
6004 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6006 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6007 } else if (ainfo->storage == RegTypeStructByVal) {
6008 int doffset = inst->inst_offset;
6012 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6013 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6014 if (arm_is_imm12 (doffset)) {
6015 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6017 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6018 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6020 soffset += sizeof (gpointer);
6021 doffset += sizeof (gpointer);
6023 if (ainfo->vtsize) {
6024 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6025 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6026 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6028 } else if (ainfo->storage == RegTypeStructByAddr) {
6029 g_assert_not_reached ();
6030 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6031 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6033 g_assert_not_reached ();
6038 if (method->save_lmf)
6039 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6042 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6044 if (cfg->arch.seq_point_info_var) {
6045 MonoInst *ins = cfg->arch.seq_point_info_var;
6047 /* Initialize the variable from a GOT slot */
6048 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6049 #ifdef USE_JUMP_TABLES
6051 gpointer *jte = mono_jumptable_add_entry ();
6052 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6053 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6055 /** XXX: is it correct? */
6057 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6059 *(gpointer*)code = NULL;
6062 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6064 g_assert (ins->opcode == OP_REGOFFSET);
6066 if (arm_is_imm12 (ins->inst_offset)) {
6067 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6069 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6070 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6074 /* Initialize ss_trigger_page_var */
6075 if (!cfg->soft_breakpoints) {
6076 MonoInst *info_var = cfg->arch.seq_point_info_var;
6077 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6078 int dreg = ARMREG_LR;
6081 g_assert (info_var->opcode == OP_REGOFFSET);
6082 g_assert (arm_is_imm12 (info_var->inst_offset));
6084 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6085 /* Load the trigger page addr */
6086 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6087 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6091 if (cfg->arch.seq_point_read_var) {
6092 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6093 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6094 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6095 #ifdef USE_JUMP_TABLES
6098 g_assert (read_ins->opcode == OP_REGOFFSET);
6099 g_assert (arm_is_imm12 (read_ins->inst_offset));
6100 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6101 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6102 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6103 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6105 #ifdef USE_JUMP_TABLES
6106 jte = mono_jumptable_add_entries (3);
6107 jte [0] = (gpointer)&ss_trigger_var;
6108 jte [1] = single_step_func_wrapper;
6109 jte [2] = breakpoint_func_wrapper;
6110 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6112 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6114 *(volatile int **)code = &ss_trigger_var;
6116 *(gpointer*)code = single_step_func_wrapper;
6118 *(gpointer*)code = breakpoint_func_wrapper;
6122 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6123 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6124 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6125 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6126 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6127 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6130 cfg->code_len = code - cfg->native_code;
6131 g_assert (cfg->code_len < cfg->code_size);
6138 mono_arch_emit_epilog (MonoCompile *cfg)
6140 MonoMethod *method = cfg->method;
6141 int pos, i, rot_amount;
6142 int max_epilog_size = 16 + 20*4;
6146 if (cfg->method->save_lmf)
6147 max_epilog_size += 128;
6149 if (mono_jit_trace_calls != NULL)
6150 max_epilog_size += 50;
6152 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6153 max_epilog_size += 50;
6155 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6156 cfg->code_size *= 2;
6157 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6158 cfg->stat_code_reallocs++;
6162 * Keep in sync with OP_JMP
6164 code = cfg->native_code + cfg->code_len;
6166 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6167 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6171 /* Load returned vtypes into registers if needed */
6172 cinfo = cfg->arch.cinfo;
6173 if (cinfo->ret.storage == RegTypeStructByVal) {
6174 MonoInst *ins = cfg->ret;
6176 if (arm_is_imm12 (ins->inst_offset)) {
6177 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6179 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6180 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6184 if (method->save_lmf) {
6185 int lmf_offset, reg, sp_adj, regmask;
6186 /* all but r0-r3, sp and pc */
6187 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6190 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6192 /* This points to r4 inside MonoLMF->iregs */
6193 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6195 regmask = 0x9ff0; /* restore lr to pc */
6196 /* Skip caller saved registers not used by the method */
6197 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6198 regmask &= ~(1 << reg);
6203 /* Restored later */
6204 regmask &= ~(1 << ARMREG_PC);
6205 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6206 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6208 ARM_POP (code, regmask);
6210 /* Restore saved r7, restore LR to PC */
6211 /* Skip lr from the lmf */
6212 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6213 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6216 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6217 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6219 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6220 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6224 /* Restore saved gregs */
6225 if (cfg->used_int_regs)
6226 ARM_POP (code, cfg->used_int_regs);
6227 /* Restore saved r7, restore LR to PC */
6228 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6230 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6234 cfg->code_len = code - cfg->native_code;
6236 g_assert (cfg->code_len < cfg->code_size);
6241 mono_arch_emit_exceptions (MonoCompile *cfg)
6243 MonoJumpInfo *patch_info;
6246 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6247 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6248 int max_epilog_size = 50;
6250 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6251 exc_throw_pos [i] = NULL;
6252 exc_throw_found [i] = 0;
6255 /* count the number of exception infos */
6258 * make sure we have enough space for exceptions
6260 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6261 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6262 i = mini_exception_id_by_name (patch_info->data.target);
6263 if (!exc_throw_found [i]) {
6264 max_epilog_size += 32;
6265 exc_throw_found [i] = TRUE;
6270 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6271 cfg->code_size *= 2;
6272 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6273 cfg->stat_code_reallocs++;
6276 code = cfg->native_code + cfg->code_len;
6278 /* add code to raise exceptions */
6279 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6280 switch (patch_info->type) {
6281 case MONO_PATCH_INFO_EXC: {
6282 MonoClass *exc_class;
6283 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6285 i = mini_exception_id_by_name (patch_info->data.target);
6286 if (exc_throw_pos [i]) {
6287 arm_patch (ip, exc_throw_pos [i]);
6288 patch_info->type = MONO_PATCH_INFO_NONE;
6291 exc_throw_pos [i] = code;
6293 arm_patch (ip, code);
6295 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6296 g_assert (exc_class);
6298 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6299 #ifdef USE_JUMP_TABLES
6301 gpointer *jte = mono_jumptable_add_entries (2);
6302 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6303 patch_info->data.name = "mono_arch_throw_corlib_exception";
6304 patch_info->ip.i = code - cfg->native_code;
6305 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6306 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6307 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6308 ARM_BLX_REG (code, ARMREG_IP);
6309 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6312 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6313 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6314 patch_info->data.name = "mono_arch_throw_corlib_exception";
6315 patch_info->ip.i = code - cfg->native_code;
6317 *(guint32*)(gpointer)code = exc_class->type_token;
6328 cfg->code_len = code - cfg->native_code;
6330 g_assert (cfg->code_len < cfg->code_size);
6334 #endif /* #ifndef DISABLE_JIT */
6337 mono_arch_finish_init (void)
6342 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6347 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6354 mono_arch_print_tree (MonoInst *tree, int arity)
6364 mono_arch_get_patch_offset (guint8 *code)
6371 mono_arch_flush_register_windows (void)
6375 #ifdef MONO_ARCH_HAVE_IMT
6380 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6382 int method_reg = mono_alloc_ireg (cfg);
6383 #ifdef USE_JUMP_TABLES
6384 int use_jumptables = TRUE;
6386 int use_jumptables = FALSE;
6389 if (cfg->compile_aot) {
6392 call->dynamic_imt_arg = TRUE;
6395 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6397 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6398 ins->dreg = method_reg;
6399 ins->inst_p0 = call->method;
6400 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6401 MONO_ADD_INS (cfg->cbb, ins);
6403 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6404 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6405 /* Always pass in a register for simplicity */
6406 call->dynamic_imt_arg = TRUE;
6408 cfg->uses_rgctx_reg = TRUE;
6411 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6415 MONO_INST_NEW (cfg, ins, OP_PCONST);
6416 ins->inst_p0 = call->method;
6417 ins->dreg = method_reg;
6418 MONO_ADD_INS (cfg->cbb, ins);
6421 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6425 #endif /* DISABLE_JIT */
6428 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6430 #ifdef USE_JUMP_TABLES
6431 return (MonoMethod*)regs [ARMREG_V5];
6434 guint32 *code_ptr = (guint32*)code;
6436 method = GUINT_TO_POINTER (code_ptr [1]);
6440 return (MonoMethod*)regs [ARMREG_V5];
6442 /* The IMT value is stored in the code stream right after the LDC instruction. */
6443 /* This is no longer true for the gsharedvt_in trampoline */
6445 if (!IS_LDR_PC (code_ptr [0])) {
6446 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6447 g_assert (IS_LDR_PC (code_ptr [0]));
6451 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6452 return (MonoMethod*)regs [ARMREG_V5];
6454 return (MonoMethod*) method;
6459 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6461 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6464 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6465 #define BASE_SIZE (6 * 4)
6466 #define BSEARCH_ENTRY_SIZE (4 * 4)
6467 #define CMP_SIZE (3 * 4)
6468 #define BRANCH_SIZE (1 * 4)
6469 #define CALL_SIZE (2 * 4)
6470 #define WMC_SIZE (8 * 4)
6471 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6473 #ifdef USE_JUMP_TABLES
6475 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6477 g_assert (base [index] == NULL);
6478 base [index] = value;
6481 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6483 if (arm_is_imm12 (jti * 4)) {
6484 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6486 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6487 if ((jti * 4) >> 16)
6488 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6489 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6495 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6497 guint32 delta = DISTANCE (target, code);
6499 g_assert (delta >= 0 && delta <= 0xFFF);
6500 *target = *target | delta;
6506 #ifdef ENABLE_WRONG_METHOD_CHECK
6508 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6510 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6516 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6517 gpointer fail_tramp)
6520 arminstr_t *code, *start;
6521 #ifdef USE_JUMP_TABLES
6524 gboolean large_offsets = FALSE;
6525 guint32 **constant_pool_starts;
6526 arminstr_t *vtable_target = NULL;
6527 int extra_space = 0;
6529 #ifdef ENABLE_WRONG_METHOD_CHECK
6534 #ifdef USE_JUMP_TABLES
6535 for (i = 0; i < count; ++i) {
6536 MonoIMTCheckItem *item = imt_entries [i];
6537 item->chunk_size += 4 * 16;
6538 if (!item->is_equals)
6539 imt_entries [item->check_target_idx]->compare_done = TRUE;
6540 size += item->chunk_size;
6543 constant_pool_starts = g_new0 (guint32*, count);
6545 for (i = 0; i < count; ++i) {
6546 MonoIMTCheckItem *item = imt_entries [i];
6547 if (item->is_equals) {
6548 gboolean fail_case = !item->check_target_idx && fail_tramp;
6550 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6551 item->chunk_size += 32;
6552 large_offsets = TRUE;
6555 if (item->check_target_idx || fail_case) {
6556 if (!item->compare_done || fail_case)
6557 item->chunk_size += CMP_SIZE;
6558 item->chunk_size += BRANCH_SIZE;
6560 #ifdef ENABLE_WRONG_METHOD_CHECK
6561 item->chunk_size += WMC_SIZE;
6565 item->chunk_size += 16;
6566 large_offsets = TRUE;
6568 item->chunk_size += CALL_SIZE;
6570 item->chunk_size += BSEARCH_ENTRY_SIZE;
6571 imt_entries [item->check_target_idx]->compare_done = TRUE;
6573 size += item->chunk_size;
6577 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6581 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6583 code = mono_domain_code_reserve (domain, size);
6587 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6588 for (i = 0; i < count; ++i) {
6589 MonoIMTCheckItem *item = imt_entries [i];
6590 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6594 #ifdef USE_JUMP_TABLES
6595 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6596 /* If jumptables we always pass the IMT method in R5 */
6597 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6598 #define VTABLE_JTI 0
6599 #define IMT_METHOD_OFFSET 0
6600 #define TARGET_CODE_OFFSET 1
6601 #define JUMP_CODE_OFFSET 2
6602 #define RECORDS_PER_ENTRY 3
6603 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6604 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6605 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6607 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6608 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6609 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6610 set_jumptable_element (jte, VTABLE_JTI, vtable);
6613 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6615 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6616 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6617 vtable_target = code;
6618 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6620 if (mono_use_llvm) {
6621 /* LLVM always passes the IMT method in R5 */
6622 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6624 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6625 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6626 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6630 for (i = 0; i < count; ++i) {
6631 MonoIMTCheckItem *item = imt_entries [i];
6632 #ifdef USE_JUMP_TABLES
6633 guint32 imt_method_jti = 0, target_code_jti = 0;
6635 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6637 gint32 vtable_offset;
6639 item->code_target = (guint8*)code;
6641 if (item->is_equals) {
6642 gboolean fail_case = !item->check_target_idx && fail_tramp;
6644 if (item->check_target_idx || fail_case) {
6645 if (!item->compare_done || fail_case) {
6646 #ifdef USE_JUMP_TABLES
6647 imt_method_jti = IMT_METHOD_JTI (i);
6648 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6651 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6653 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6655 #ifdef USE_JUMP_TABLES
6656 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6657 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6658 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6660 item->jmp_code = (guint8*)code;
6661 ARM_B_COND (code, ARMCOND_NE, 0);
6664 /*Enable the commented code to assert on wrong method*/
6665 #ifdef ENABLE_WRONG_METHOD_CHECK
6666 #ifdef USE_JUMP_TABLES
6667 imt_method_jti = IMT_METHOD_JTI (i);
6668 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6671 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6673 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6675 ARM_B_COND (code, ARMCOND_EQ, 0);
6677 /* Define this if your system is so bad that gdb is failing. */
6678 #ifdef BROKEN_DEV_ENV
6679 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6681 arm_patch (code - 1, mini_dump_bad_imt);
6685 arm_patch (cond, code);
6689 if (item->has_target_code) {
6690 /* Load target address */
6691 #ifdef USE_JUMP_TABLES
6692 target_code_jti = TARGET_CODE_JTI (i);
6693 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6694 /* Restore registers */
6695 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6697 ARM_BX (code, ARMREG_R1);
6698 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6700 target_code_ins = code;
6701 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6702 /* Save it to the fourth slot */
6703 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6704 /* Restore registers and branch */
6705 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6707 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6710 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6711 if (!arm_is_imm12 (vtable_offset)) {
6713 * We need to branch to a computed address but we don't have
6714 * a free register to store it, since IP must contain the
6715 * vtable address. So we push the two values to the stack, and
6716 * load them both using LDM.
6718 /* Compute target address */
6719 #ifdef USE_JUMP_TABLES
6720 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6721 if (vtable_offset >> 16)
6722 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6723 /* IP had vtable base. */
6724 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6725 /* Restore registers and branch */
6726 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6727 ARM_BX (code, ARMREG_IP);
6729 vtable_offset_ins = code;
6730 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6731 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6732 /* Save it to the fourth slot */
6733 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6734 /* Restore registers and branch */
6735 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6737 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6740 #ifdef USE_JUMP_TABLES
6741 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6742 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6743 ARM_BX (code, ARMREG_IP);
6745 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6747 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6748 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6754 #ifdef USE_JUMP_TABLES
6755 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6756 target_code_jti = TARGET_CODE_JTI (i);
6757 /* Load target address */
6758 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6759 /* Restore registers */
6760 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6762 ARM_BX (code, ARMREG_R1);
6763 set_jumptable_element (jte, target_code_jti, fail_tramp);
6765 arm_patch (item->jmp_code, (guchar*)code);
6767 target_code_ins = code;
6768 /* Load target address */
6769 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6770 /* Save it to the fourth slot */
6771 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6772 /* Restore registers and branch */
6773 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6775 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6777 item->jmp_code = NULL;
6780 #ifdef USE_JUMP_TABLES
6782 set_jumptable_element (jte, imt_method_jti, item->key);
6785 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6787 /*must emit after unconditional branch*/
6788 if (vtable_target) {
6789 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6790 item->chunk_size += 4;
6791 vtable_target = NULL;
6794 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6795 constant_pool_starts [i] = code;
6797 code += extra_space;
6802 #ifdef USE_JUMP_TABLES
6803 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6804 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6805 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6806 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6807 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6809 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6810 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6812 item->jmp_code = (guint8*)code;
6813 ARM_B_COND (code, ARMCOND_HS, 0);
6819 for (i = 0; i < count; ++i) {
6820 MonoIMTCheckItem *item = imt_entries [i];
6821 if (item->jmp_code) {
6822 if (item->check_target_idx)
6823 #ifdef USE_JUMP_TABLES
6824 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6826 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6829 if (i > 0 && item->is_equals) {
6831 #ifdef USE_JUMP_TABLES
6832 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6833 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6835 arminstr_t *space_start = constant_pool_starts [i];
6836 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6837 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6845 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6846 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6851 #ifndef USE_JUMP_TABLES
6852 g_free (constant_pool_starts);
6855 mono_arch_flush_icache ((guint8*)start, size);
6856 mono_stats.imt_thunks_size += code - start;
6858 g_assert (DISTANCE (start, code) <= size);
6865 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6867 return ctx->regs [reg];
6871 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6873 ctx->regs [reg] = val;
6877 * mono_arch_get_trampolines:
6879 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6883 mono_arch_get_trampolines (gboolean aot)
6885 return mono_arm_get_exception_trampolines (aot);
6889 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6891 * mono_arch_set_breakpoint:
6893 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6894 * The location should contain code emitted by OP_SEQ_POINT.
6897 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6900 guint32 native_offset = ip - (guint8*)ji->code_start;
6901 MonoDebugOptions *opt = mini_get_debug_options ();
6903 if (opt->soft_breakpoints) {
6904 g_assert (!ji->from_aot);
6906 ARM_BLX_REG (code, ARMREG_LR);
6907 mono_arch_flush_icache (code - 4, 4);
6908 } else if (ji->from_aot) {
6909 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6911 g_assert (native_offset % 4 == 0);
6912 g_assert (info->bp_addrs [native_offset / 4] == 0);
6913 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6915 int dreg = ARMREG_LR;
6917 /* Read from another trigger page */
6918 #ifdef USE_JUMP_TABLES
6919 gpointer *jte = mono_jumptable_add_entry ();
6920 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6921 jte [0] = bp_trigger_page;
6923 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6925 *(int*)code = (int)bp_trigger_page;
6928 ARM_LDR_IMM (code, dreg, dreg, 0);
6930 mono_arch_flush_icache (code - 16, 16);
6933 /* This is currently implemented by emitting an SWI instruction, which
6934 * qemu/linux seems to convert to a SIGILL.
6936 *(int*)code = (0xef << 24) | 8;
6938 mono_arch_flush_icache (code - 4, 4);
6944 * mono_arch_clear_breakpoint:
6946 * Clear the breakpoint at IP.
6949 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6951 MonoDebugOptions *opt = mini_get_debug_options ();
6955 if (opt->soft_breakpoints) {
6956 g_assert (!ji->from_aot);
6959 mono_arch_flush_icache (code - 4, 4);
6960 } else if (ji->from_aot) {
6961 guint32 native_offset = ip - (guint8*)ji->code_start;
6962 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6964 g_assert (native_offset % 4 == 0);
6965 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6966 info->bp_addrs [native_offset / 4] = 0;
6968 for (i = 0; i < 4; ++i)
6971 mono_arch_flush_icache (ip, code - ip);
6976 * mono_arch_start_single_stepping:
6978 * Start single stepping.
6981 mono_arch_start_single_stepping (void)
6983 if (ss_trigger_page)
6984 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6990 * mono_arch_stop_single_stepping:
6992 * Stop single stepping.
6995 mono_arch_stop_single_stepping (void)
6997 if (ss_trigger_page)
6998 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7004 #define DBG_SIGNAL SIGBUS
7006 #define DBG_SIGNAL SIGSEGV
7010 * mono_arch_is_single_step_event:
7012 * Return whenever the machine state in SIGCTX corresponds to a single
7016 mono_arch_is_single_step_event (void *info, void *sigctx)
7018 siginfo_t *sinfo = info;
7020 if (!ss_trigger_page)
7023 /* Sometimes the address is off by 4 */
7024 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7031 * mono_arch_is_breakpoint_event:
7033 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7036 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7038 siginfo_t *sinfo = info;
7040 if (!ss_trigger_page)
7043 if (sinfo->si_signo == DBG_SIGNAL) {
7044 /* Sometimes the address is off by 4 */
7045 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7055 * mono_arch_skip_breakpoint:
7057 * See mini-amd64.c for docs.
7060 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7062 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7066 * mono_arch_skip_single_step:
7068 * See mini-amd64.c for docs.
7071 mono_arch_skip_single_step (MonoContext *ctx)
7073 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7076 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7079 * mono_arch_get_seq_point_info:
7081 * See mini-amd64.c for docs.
7084 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7089 // FIXME: Add a free function
7091 mono_domain_lock (domain);
7092 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7094 mono_domain_unlock (domain);
7097 ji = mono_jit_info_table_find (domain, (char*)code);
7100 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7102 info->ss_trigger_page = ss_trigger_page;
7103 info->bp_trigger_page = bp_trigger_page;
7105 mono_domain_lock (domain);
7106 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7108 mono_domain_unlock (domain);
7115 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7117 ext->lmf.previous_lmf = prev_lmf;
7118 /* Mark that this is a MonoLMFExt */
7119 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7120 ext->lmf.sp = (gssize)ext;
7124 * mono_arch_set_target:
7126 * Set the target architecture the JIT backend should generate code for, in the form
7127 * of a GNU target triplet. Only used in AOT mode.
7130 mono_arch_set_target (char *mtriple)
7132 /* The GNU target triple format is not very well documented */
7133 if (strstr (mtriple, "armv7")) {
7134 v5_supported = TRUE;
7135 v6_supported = TRUE;
7136 v7_supported = TRUE;
7138 if (strstr (mtriple, "armv6")) {
7139 v5_supported = TRUE;
7140 v6_supported = TRUE;
7142 if (strstr (mtriple, "armv7s")) {
7143 v7s_supported = TRUE;
7145 if (strstr (mtriple, "thumbv7s")) {
7146 v5_supported = TRUE;
7147 v6_supported = TRUE;
7148 v7_supported = TRUE;
7149 v7s_supported = TRUE;
7150 thumb_supported = TRUE;
7151 thumb2_supported = TRUE;
7153 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7154 v5_supported = TRUE;
7155 v6_supported = TRUE;
7156 thumb_supported = TRUE;
7159 if (strstr (mtriple, "gnueabi"))
7160 eabi_supported = TRUE;
7164 mono_arch_opcode_supported (int opcode)
7167 case OP_ATOMIC_EXCHANGE_I4:
7168 case OP_ATOMIC_CAS_I4:
7169 case OP_ATOMIC_ADD_NEW_I4:
7170 return v7_supported;
7176 #if defined(ENABLE_GSHAREDVT)
7178 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7180 #endif /* !MONOTOUCH */