2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/debug-helpers.h>
18 #include <mono/utils/mono-mmap.h>
19 #include <mono/utils/mono-hwcap-arm.h>
25 #include "debugger-agent.h"
27 #include "mono/arch/arm/arm-vfp-codegen.h"
29 /* Sanity check: This makes no sense */
30 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
31 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
35 * IS_SOFT_FLOAT: Is full software floating point used?
36 * IS_HARD_FLOAT: Is full hardware floating point used?
37 * IS_VFP: Is hardware floating point with software ABI used?
39 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
40 * IS_VFP may delegate to mono_arch_is_soft_float ().
43 #if defined(ARM_FPU_VFP_HARD)
44 #define IS_SOFT_FLOAT (FALSE)
45 #define IS_HARD_FLOAT (TRUE)
47 #elif defined(ARM_FPU_NONE)
48 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
49 #define IS_HARD_FLOAT (FALSE)
50 #define IS_VFP (!mono_arch_is_soft_float ())
52 #define IS_SOFT_FLOAT (FALSE)
53 #define IS_HARD_FLOAT (FALSE)
57 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
58 #define HAVE_AEABI_READ_TP 1
61 #ifdef __native_client_codegen__
62 const guint kNaClAlignment = kNaClAlignmentARM;
63 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
64 gint8 nacl_align_byte = -1; /* 0xff */
67 mono_arch_nacl_pad (guint8 *code, int pad)
69 /* Not yet properly implemented. */
70 g_assert_not_reached ();
75 mono_arch_nacl_skip_nops (guint8 *code)
77 /* Not yet properly implemented. */
78 g_assert_not_reached ();
82 #endif /* __native_client_codegen__ */
84 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
87 void sys_icache_invalidate (void *start, size_t len);
90 /* This mutex protects architecture specific caches */
91 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
92 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
93 static CRITICAL_SECTION mini_arch_mutex;
95 static gboolean v5_supported = FALSE;
96 static gboolean v6_supported = FALSE;
97 static gboolean v7_supported = FALSE;
98 static gboolean v7s_supported = FALSE;
99 static gboolean thumb_supported = FALSE;
100 static gboolean thumb2_supported = FALSE;
102 * Whenever to use the ARM EABI
104 static gboolean eabi_supported = FALSE;
107 * Whenever to use the iphone ABI extensions:
108 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
109 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
110 * This is required for debugging/profiling tools to work, but it has some overhead so it should
111 * only be turned on in debug builds.
113 static gboolean iphone_abi = FALSE;
116 * The FPU we are generating code for. This is NOT runtime configurable right now,
117 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
119 static MonoArmFPU arm_fpu;
121 #if defined(ARM_FPU_VFP_HARD)
123 * On armhf, d0-d7 are used for argument passing and d8-d15
124 * must be preserved across calls, which leaves us no room
125 * for scratch registers. So we use d14-d15 but back up their
126 * previous contents to a stack slot before using them - see
127 * mono_arm_emit_vfp_scratch_save/_restore ().
129 static int vfp_scratch1 = ARM_VFP_D14;
130 static int vfp_scratch2 = ARM_VFP_D15;
133 * On armel, d0-d7 do not need to be preserved, so we can
134 * freely make use of them as scratch registers.
136 static int vfp_scratch1 = ARM_VFP_D0;
137 static int vfp_scratch2 = ARM_VFP_D1;
142 static volatile int ss_trigger_var = 0;
144 static gpointer single_step_func_wrapper;
145 static gpointer breakpoint_func_wrapper;
148 * The code generated for sequence points reads from this location, which is
149 * made read-only when single stepping is enabled.
151 static gpointer ss_trigger_page;
153 /* Enabled breakpoints read from this trigger page */
154 static gpointer bp_trigger_page;
158 * floating point support: on ARM it is a mess, there are at least 3
159 * different setups, each of which binary incompat with the other.
160 * 1) FPA: old and ugly, but unfortunately what current distros use
161 * the double binary format has the two words swapped. 8 double registers.
162 * Implemented usually by kernel emulation.
163 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
164 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
165 * 3) VFP: the new and actually sensible and useful FP support. Implemented
166 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
168 * We do not care about FPA. We will support soft float and VFP.
170 int mono_exc_esp_offset = 0;
172 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
173 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
174 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
176 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
177 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
178 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
180 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
181 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
182 //#define DEBUG_IMT 0
184 /* A variant of ARM_LDR_IMM which can handle large offsets */
185 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
186 if (arm_is_imm12 ((offset))) { \
187 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
189 g_assert ((scratch_reg) != (basereg)); \
190 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
191 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
195 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
196 if (arm_is_imm12 ((offset))) { \
197 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
199 g_assert ((scratch_reg) != (basereg)); \
200 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
201 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
205 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
208 mono_arch_regname (int reg)
210 static const char * rnames[] = {
211 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
212 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
213 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
216 if (reg >= 0 && reg < 16)
222 mono_arch_fregname (int reg)
224 static const char * rnames[] = {
225 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
226 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
227 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
228 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
229 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
230 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
233 if (reg >= 0 && reg < 32)
241 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
243 int imm8, rot_amount;
244 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
245 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
248 g_assert (dreg != sreg);
249 code = mono_arm_emit_load_imm (code, dreg, imm);
250 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
255 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
257 /* we can use r0-r3, since this is called only for incoming args on the stack */
258 if (size > sizeof (gpointer) * 4) {
260 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
261 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
262 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
263 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
264 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
265 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
266 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
267 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
268 ARM_B_COND (code, ARMCOND_NE, 0);
269 arm_patch (code - 4, start_loop);
272 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
273 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
275 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
276 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
282 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
283 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
284 doffset = soffset = 0;
286 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
287 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
293 g_assert (size == 0);
298 emit_call_reg (guint8 *code, int reg)
301 ARM_BLX_REG (code, reg);
303 #ifdef USE_JUMP_TABLES
304 g_assert_not_reached ();
306 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
310 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
316 emit_call_seq (MonoCompile *cfg, guint8 *code)
318 #ifdef USE_JUMP_TABLES
319 code = mono_arm_patchable_bl (code, ARMCOND_AL);
321 if (cfg->method->dynamic) {
322 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
324 *(gpointer*)code = NULL;
326 code = emit_call_reg (code, ARMREG_IP);
335 mono_arm_patchable_b (guint8 *code, int cond)
337 #ifdef USE_JUMP_TABLES
340 jte = mono_jumptable_add_entry ();
341 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
342 ARM_BX_COND (code, cond, ARMREG_IP);
344 ARM_B_COND (code, cond, 0);
350 mono_arm_patchable_bl (guint8 *code, int cond)
352 #ifdef USE_JUMP_TABLES
355 jte = mono_jumptable_add_entry ();
356 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
357 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
359 ARM_BL_COND (code, cond, 0);
364 #ifdef USE_JUMP_TABLES
366 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
368 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
369 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
374 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
376 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
377 ARM_LDR_IMM (code, reg, reg, 0);
383 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
385 switch (ins->opcode) {
388 case OP_FCALL_MEMBASE:
390 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
392 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
394 ARM_FMSR (code, ins->dreg, ARMREG_R0);
395 ARM_CVTS (code, ins->dreg, ins->dreg);
399 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
401 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
414 * Emit code to push an LMF structure on the LMF stack.
415 * On arm, this is intermixed with the initialization of other fields of the structure.
418 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
420 gboolean get_lmf_fast = FALSE;
423 #ifdef HAVE_AEABI_READ_TP
424 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
426 if (lmf_addr_tls_offset != -1) {
429 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
430 (gpointer)"__aeabi_read_tp");
431 code = emit_call_seq (cfg, code);
433 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
439 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
442 /* Inline mono_get_lmf_addr () */
443 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
445 /* Load mono_jit_tls_id */
447 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
448 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
450 *(gpointer*)code = NULL;
452 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
453 /* call pthread_getspecific () */
454 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
455 (gpointer)"pthread_getspecific");
456 code = emit_call_seq (cfg, code);
457 /* lmf_addr = &jit_tls->lmf */
458 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
459 g_assert (arm_is_imm8 (lmf_offset));
460 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
467 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
468 (gpointer)"mono_get_lmf_addr");
469 code = emit_call_seq (cfg, code);
471 /* we build the MonoLMF structure on the stack - see mini-arm.h */
472 /* lmf_offset is the offset from the previous stack pointer,
473 * alloc_size is the total stack space allocated, so the offset
474 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
475 * The pointer to the struct is put in r1 (new_lmf).
476 * ip is used as scratch
477 * The callee-saved registers are already in the MonoLMF structure
479 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
480 /* r0 is the result from mono_get_lmf_addr () */
481 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
482 /* new_lmf->previous_lmf = *lmf_addr */
483 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
484 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
485 /* *(lmf_addr) = r1 */
486 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
487 /* Skip method (only needed for trampoline LMF frames) */
488 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
489 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
490 /* save the current IP */
491 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
492 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
494 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
495 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
506 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
510 for (list = inst->float_args; list; list = list->next) {
511 FloatArgData *fad = list->data;
512 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
513 gboolean imm = arm_is_fpimm8 (var->inst_offset);
515 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
521 if (*offset + *max_len > cfg->code_size) {
522 cfg->code_size += *max_len;
523 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
525 code = cfg->native_code + *offset;
529 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
530 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
532 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
534 *offset = code - cfg->native_code;
541 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
545 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
547 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
550 if (!arm_is_fpimm8 (inst->inst_offset)) {
551 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
552 ARM_FSTD (code, reg, ARMREG_LR, 0);
554 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
561 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
565 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
567 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
570 if (!arm_is_fpimm8 (inst->inst_offset)) {
571 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
572 ARM_FLDD (code, reg, ARMREG_LR, 0);
574 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
583 * Emit code to pop an LMF structure from the LMF stack.
586 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
590 if (lmf_offset < 32) {
591 basereg = cfg->frame_reg;
596 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
599 /* ip = previous_lmf */
600 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
602 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
603 /* *(lmf_addr) = previous_lmf */
604 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
609 #endif /* #ifndef DISABLE_JIT */
612 * mono_arch_get_argument_info:
613 * @csig: a method signature
614 * @param_count: the number of parameters to consider
615 * @arg_info: an array to store the result infos
617 * Gathers information on parameters such as size, alignment and
618 * padding. arg_info should be large enought to hold param_count + 1 entries.
620 * Returns the size of the activation frame.
623 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
625 int k, frame_size = 0;
626 guint32 size, align, pad;
630 t = mini_type_get_underlying_type (gsctx, csig->ret);
631 if (MONO_TYPE_ISSTRUCT (t)) {
632 frame_size += sizeof (gpointer);
636 arg_info [0].offset = offset;
639 frame_size += sizeof (gpointer);
643 arg_info [0].size = frame_size;
645 for (k = 0; k < param_count; k++) {
646 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
648 /* ignore alignment for now */
651 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
652 arg_info [k].pad = pad;
654 arg_info [k + 1].pad = 0;
655 arg_info [k + 1].size = size;
657 arg_info [k + 1].offset = offset;
661 align = MONO_ARCH_FRAME_ALIGNMENT;
662 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
663 arg_info [k].pad = pad;
668 #define MAX_ARCH_DELEGATE_PARAMS 3
671 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
673 guint8 *code, *start;
676 start = code = mono_global_codeman_reserve (12);
678 /* Replace the this argument with the target */
679 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
680 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
681 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
683 g_assert ((code - start) <= 12);
685 mono_arch_flush_icache (start, 12);
689 size = 8 + param_count * 4;
690 start = code = mono_global_codeman_reserve (size);
692 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
693 /* slide down the arguments */
694 for (i = 0; i < param_count; ++i) {
695 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
697 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
699 g_assert ((code - start) <= size);
701 mono_arch_flush_icache (start, size);
705 *code_size = code - start;
711 * mono_arch_get_delegate_invoke_impls:
713 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
717 mono_arch_get_delegate_invoke_impls (void)
725 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
726 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
728 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
729 code = get_delegate_invoke_impl (FALSE, i, &code_len);
730 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
731 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
739 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
741 guint8 *code, *start;
744 /* FIXME: Support more cases */
745 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
746 if (MONO_TYPE_ISSTRUCT (sig_ret))
750 static guint8* cached = NULL;
751 mono_mini_arch_lock ();
753 mono_mini_arch_unlock ();
758 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
760 start = get_delegate_invoke_impl (TRUE, 0, NULL);
762 mono_mini_arch_unlock ();
765 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
768 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
770 for (i = 0; i < sig->param_count; ++i)
771 if (!mono_is_regsize_var (sig->params [i]))
774 mono_mini_arch_lock ();
775 code = cache [sig->param_count];
777 mono_mini_arch_unlock ();
782 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
783 start = mono_aot_get_trampoline (name);
786 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
788 cache [sig->param_count] = start;
789 mono_mini_arch_unlock ();
797 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
799 return (gpointer)regs [ARMREG_R0];
803 * Initialize the cpu to execute managed code.
806 mono_arch_cpu_init (void)
808 i8_align = MONO_ABI_ALIGNOF (gint64);
809 #ifdef MONO_CROSS_COMPILE
810 /* Need to set the alignment of i8 since it can different on the target */
811 #ifdef TARGET_ANDROID
813 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
819 create_function_wrapper (gpointer function)
821 guint8 *start, *code;
823 start = code = mono_global_codeman_reserve (96);
826 * Construct the MonoContext structure on the stack.
829 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
831 /* save ip, lr and pc into their correspodings ctx.regs slots. */
832 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
833 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
834 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
836 /* save r0..r10 and fp */
837 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
838 ARM_STM (code, ARMREG_IP, 0x0fff);
840 /* now we can update fp. */
841 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
843 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
844 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
845 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
846 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
848 /* make ctx.eip hold the address of the call. */
849 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
850 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
852 /* r0 now points to the MonoContext */
853 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
856 #ifdef USE_JUMP_TABLES
858 gpointer *jte = mono_jumptable_add_entry ();
859 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
863 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
865 *(gpointer*)code = function;
868 ARM_BLX_REG (code, ARMREG_IP);
870 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
871 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
872 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
873 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
875 /* make ip point to the regs array, then restore everything, including pc. */
876 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
877 ARM_LDM (code, ARMREG_IP, 0xffff);
879 mono_arch_flush_icache (start, code - start);
885 * Initialize architecture specific code.
888 mono_arch_init (void)
890 const char *cpu_arch;
892 InitializeCriticalSection (&mini_arch_mutex);
893 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
894 if (mini_get_debug_options ()->soft_breakpoints) {
895 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
896 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
901 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
902 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
903 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
906 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
907 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
908 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
909 #if defined(ENABLE_GSHAREDVT)
910 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
913 #if defined(__ARM_EABI__)
914 eabi_supported = TRUE;
917 #if defined(ARM_FPU_VFP_HARD)
918 arm_fpu = MONO_ARM_FPU_VFP_HARD;
920 arm_fpu = MONO_ARM_FPU_VFP;
922 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
923 /* If we're compiling with a soft float fallback and it
924 turns out that no VFP unit is available, we need to
925 switch to soft float. We don't do this for iOS, since
926 iOS devices always have a VFP unit. */
927 if (!mono_hwcap_arm_has_vfp)
928 arm_fpu = MONO_ARM_FPU_NONE;
932 v5_supported = mono_hwcap_arm_is_v5;
933 v6_supported = mono_hwcap_arm_is_v6;
934 v7_supported = mono_hwcap_arm_is_v7;
935 v7s_supported = mono_hwcap_arm_is_v7s;
937 #if defined(__APPLE__)
938 /* iOS is special-cased here because we don't yet
939 have a way to properly detect CPU features on it. */
940 thumb_supported = TRUE;
943 thumb_supported = mono_hwcap_arm_has_thumb;
944 thumb2_supported = mono_hwcap_arm_has_thumb2;
947 /* Format: armv(5|6|7[s])[-thumb[2]] */
948 cpu_arch = g_getenv ("MONO_CPU_ARCH");
950 /* Do this here so it overrides any detection. */
952 if (strncmp (cpu_arch, "armv", 4) == 0) {
953 v5_supported = cpu_arch [4] >= '5';
954 v6_supported = cpu_arch [4] >= '6';
955 v7_supported = cpu_arch [4] >= '7';
956 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
959 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
960 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
965 * Cleanup architecture specific code.
968 mono_arch_cleanup (void)
973 * This function returns the optimizations supported on this cpu.
976 mono_arch_cpu_optimizations (guint32 *exclude_mask)
978 /* no arm-specific optimizations yet */
984 * This function test for all SIMD functions supported.
986 * Returns a bitmask corresponding to all supported versions.
990 mono_arch_cpu_enumerate_simd_versions (void)
992 /* SIMD is currently unimplemented */
1000 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1002 if (v7s_supported) {
1016 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1018 mono_arch_is_soft_float (void)
1020 return arm_fpu == MONO_ARM_FPU_NONE;
1025 mono_arm_is_hard_float (void)
1027 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1031 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1034 t = mini_type_get_underlying_type (gsctx, t);
1041 case MONO_TYPE_FNPTR:
1043 case MONO_TYPE_OBJECT:
1044 case MONO_TYPE_STRING:
1045 case MONO_TYPE_CLASS:
1046 case MONO_TYPE_SZARRAY:
1047 case MONO_TYPE_ARRAY:
1049 case MONO_TYPE_GENERICINST:
1050 if (!mono_type_generic_inst_is_valuetype (t))
1053 case MONO_TYPE_VALUETYPE:
1060 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1065 for (i = 0; i < cfg->num_varinfo; i++) {
1066 MonoInst *ins = cfg->varinfo [i];
1067 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1070 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1073 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1076 /* we can only allocate 32 bit values */
1077 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1078 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1079 g_assert (i == vmv->idx);
1080 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1087 #define USE_EXTRA_TEMPS 0
1090 mono_arch_get_global_int_regs (MonoCompile *cfg)
1094 mono_arch_compute_omit_fp (cfg);
1097 * FIXME: Interface calls might go through a static rgctx trampoline which
1098 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1101 if (cfg->flags & MONO_CFG_HAS_CALLS)
1102 cfg->uses_rgctx_reg = TRUE;
1104 if (cfg->arch.omit_fp)
1105 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1106 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1107 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1108 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1110 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1111 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1113 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1114 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1115 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1117 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1118 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1124 * mono_arch_regalloc_cost:
1126 * Return the cost, in number of memory references, of the action of
1127 * allocating the variable VMV into a register during global register
1131 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1137 #endif /* #ifndef DISABLE_JIT */
1139 #ifndef __GNUC_PREREQ
1140 #define __GNUC_PREREQ(maj, min) (0)
1144 mono_arch_flush_icache (guint8 *code, gint size)
1146 #if defined(__native_client__)
1147 // For Native Client we don't have to flush i-cache here,
1148 // as it's being done by dyncode interface.
1151 #ifdef MONO_CROSS_COMPILE
1153 sys_icache_invalidate (code, size);
1154 #elif __GNUC_PREREQ(4, 1)
1155 __clear_cache (code, code + size);
1156 #elif defined(PLATFORM_ANDROID)
1157 const int syscall = 0xf0002;
1165 : "r" (code), "r" (code + size), "r" (syscall)
1166 : "r0", "r1", "r7", "r2"
1169 __asm __volatile ("mov r0, %0\n"
1172 "swi 0x9f0002 @ sys_cacheflush"
1174 : "r" (code), "r" (code + size), "r" (0)
1175 : "r0", "r1", "r3" );
1177 #endif /* !__native_client__ */
1188 RegTypeStructByAddr,
1189 /* gsharedvt argument passed by addr in greg */
1190 RegTypeGSharedVtInReg,
1191 /* gsharedvt argument passed by addr on stack */
1192 RegTypeGSharedVtOnStack,
1197 guint16 vtsize; /* in param area */
1201 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1206 guint32 stack_usage;
1207 gboolean vtype_retaddr;
1208 /* The index of the vret arg in the argument list */
1218 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1221 if (*gr > ARMREG_R3) {
1223 ainfo->offset = *stack_size;
1224 ainfo->reg = ARMREG_SP; /* in the caller */
1225 ainfo->storage = RegTypeBase;
1228 ainfo->storage = RegTypeGeneral;
1235 split = i8_align == 4;
1240 if (*gr == ARMREG_R3 && split) {
1241 /* first word in r3 and the second on the stack */
1242 ainfo->offset = *stack_size;
1243 ainfo->reg = ARMREG_SP; /* in the caller */
1244 ainfo->storage = RegTypeBaseGen;
1246 } else if (*gr >= ARMREG_R3) {
1247 if (eabi_supported) {
1248 /* darwin aligns longs to 4 byte only */
1249 if (i8_align == 8) {
1254 ainfo->offset = *stack_size;
1255 ainfo->reg = ARMREG_SP; /* in the caller */
1256 ainfo->storage = RegTypeBase;
1259 if (eabi_supported) {
1260 if (i8_align == 8 && ((*gr) & 1))
1263 ainfo->storage = RegTypeIRegPair;
1272 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1275 * If we're calling a function like this:
1277 * void foo(float a, double b, float c)
1279 * We pass a in s0 and b in d1. That leaves us
1280 * with s1 being unused. The armhf ABI recognizes
1281 * this and requires register assignment to then
1282 * use that for the next single-precision arg,
1283 * i.e. c in this example. So float_spare either
1284 * tells us which reg to use for the next single-
1285 * precision arg, or it's -1, meaning use *fpr.
1287 * Note that even though most of the JIT speaks
1288 * double-precision, fpr represents single-
1289 * precision registers.
1291 * See parts 5.5 and 6.1.2 of the AAPCS for how
1295 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1296 ainfo->storage = RegTypeFP;
1300 * If we're passing a double-precision value
1301 * and *fpr is odd (e.g. it's s1, s3, ...)
1302 * we need to use the next even register. So
1303 * we mark the current *fpr as a spare that
1304 * can be used for the next single-precision
1308 *float_spare = *fpr;
1313 * At this point, we have an even register
1314 * so we assign that and move along.
1318 } else if (*float_spare >= 0) {
1320 * We're passing a single-precision value
1321 * and it looks like a spare single-
1322 * precision register is available. Let's
1326 ainfo->reg = *float_spare;
1330 * If we hit this branch, we're passing a
1331 * single-precision value and we can simply
1332 * use the next available register.
1340 * We've exhausted available floating point
1341 * regs, so pass the rest on the stack.
1349 ainfo->offset = *stack_size;
1350 ainfo->reg = ARMREG_SP;
1351 ainfo->storage = RegTypeBase;
1358 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1360 guint i, gr, fpr, pstart;
1362 int n = sig->hasthis + sig->param_count;
1363 MonoType *simpletype;
1364 guint32 stack_size = 0;
1366 gboolean is_pinvoke = sig->pinvoke;
1370 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1372 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1379 t = mini_type_get_underlying_type (gsctx, sig->ret);
1380 if (MONO_TYPE_ISSTRUCT (t)) {
1383 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1384 cinfo->ret.storage = RegTypeStructByVal;
1386 cinfo->vtype_retaddr = TRUE;
1388 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1389 cinfo->vtype_retaddr = TRUE;
1395 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1396 * the first argument, allowing 'this' to be always passed in the first arg reg.
1397 * Also do this if the first argument is a reference type, since virtual calls
1398 * are sometimes made using calli without sig->hasthis set, like in the delegate
1401 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1403 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1405 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1409 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1410 cinfo->vret_arg_index = 1;
1414 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1418 if (cinfo->vtype_retaddr)
1419 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1422 DEBUG(printf("params: %d\n", sig->param_count));
1423 for (i = pstart; i < sig->param_count; ++i) {
1424 ArgInfo *ainfo = &cinfo->args [n];
1426 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1427 /* Prevent implicit arguments and sig_cookie from
1428 being passed in registers */
1431 /* Emit the signature cookie just before the implicit arguments */
1432 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1434 DEBUG(printf("param %d: ", i));
1435 if (sig->params [i]->byref) {
1436 DEBUG(printf("byref\n"));
1437 add_general (&gr, &stack_size, ainfo, TRUE);
1441 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1442 switch (simpletype->type) {
1443 case MONO_TYPE_BOOLEAN:
1446 cinfo->args [n].size = 1;
1447 add_general (&gr, &stack_size, ainfo, TRUE);
1450 case MONO_TYPE_CHAR:
1453 cinfo->args [n].size = 2;
1454 add_general (&gr, &stack_size, ainfo, TRUE);
1459 cinfo->args [n].size = 4;
1460 add_general (&gr, &stack_size, ainfo, TRUE);
1466 case MONO_TYPE_FNPTR:
1467 case MONO_TYPE_CLASS:
1468 case MONO_TYPE_OBJECT:
1469 case MONO_TYPE_STRING:
1470 case MONO_TYPE_SZARRAY:
1471 case MONO_TYPE_ARRAY:
1472 cinfo->args [n].size = sizeof (gpointer);
1473 add_general (&gr, &stack_size, ainfo, TRUE);
1476 case MONO_TYPE_GENERICINST:
1477 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1478 cinfo->args [n].size = sizeof (gpointer);
1479 add_general (&gr, &stack_size, ainfo, TRUE);
1483 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1484 /* gsharedvt arguments are passed by ref */
1485 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1486 add_general (&gr, &stack_size, ainfo, TRUE);
1487 switch (ainfo->storage) {
1488 case RegTypeGeneral:
1489 ainfo->storage = RegTypeGSharedVtInReg;
1492 ainfo->storage = RegTypeGSharedVtOnStack;
1495 g_assert_not_reached ();
1501 case MONO_TYPE_TYPEDBYREF:
1502 case MONO_TYPE_VALUETYPE: {
1508 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1509 size = sizeof (MonoTypedRef);
1510 align = sizeof (gpointer);
1512 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1514 size = mono_class_native_size (klass, &align);
1516 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1518 DEBUG(printf ("load %d bytes struct\n", size));
1521 align_size += (sizeof (gpointer) - 1);
1522 align_size &= ~(sizeof (gpointer) - 1);
1523 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1524 ainfo->storage = RegTypeStructByVal;
1525 ainfo->struct_size = size;
1526 /* FIXME: align stack_size if needed */
1527 if (eabi_supported) {
1528 if (align >= 8 && (gr & 1))
1531 if (gr > ARMREG_R3) {
1533 ainfo->vtsize = nwords;
1535 int rest = ARMREG_R3 - gr + 1;
1536 int n_in_regs = rest >= nwords? nwords: rest;
1538 ainfo->size = n_in_regs;
1539 ainfo->vtsize = nwords - n_in_regs;
1542 nwords -= n_in_regs;
1544 if (sig->call_convention == MONO_CALL_VARARG)
1545 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1546 stack_size = ALIGN_TO (stack_size, align);
1547 ainfo->offset = stack_size;
1548 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1549 stack_size += nwords * sizeof (gpointer);
1556 add_general (&gr, &stack_size, ainfo, FALSE);
1563 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1565 add_general (&gr, &stack_size, ainfo, TRUE);
1573 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1575 add_general (&gr, &stack_size, ainfo, FALSE);
1580 case MONO_TYPE_MVAR:
1581 /* gsharedvt arguments are passed by ref */
1582 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1583 add_general (&gr, &stack_size, ainfo, TRUE);
1584 switch (ainfo->storage) {
1585 case RegTypeGeneral:
1586 ainfo->storage = RegTypeGSharedVtInReg;
1589 ainfo->storage = RegTypeGSharedVtOnStack;
1592 g_assert_not_reached ();
1597 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1601 /* Handle the case where there are no implicit arguments */
1602 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1603 /* Prevent implicit arguments and sig_cookie from
1604 being passed in registers */
1607 /* Emit the signature cookie just before the implicit arguments */
1608 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1612 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1613 switch (simpletype->type) {
1614 case MONO_TYPE_BOOLEAN:
1619 case MONO_TYPE_CHAR:
1625 case MONO_TYPE_FNPTR:
1626 case MONO_TYPE_CLASS:
1627 case MONO_TYPE_OBJECT:
1628 case MONO_TYPE_SZARRAY:
1629 case MONO_TYPE_ARRAY:
1630 case MONO_TYPE_STRING:
1631 cinfo->ret.storage = RegTypeGeneral;
1632 cinfo->ret.reg = ARMREG_R0;
1636 cinfo->ret.storage = RegTypeIRegPair;
1637 cinfo->ret.reg = ARMREG_R0;
1641 cinfo->ret.storage = RegTypeFP;
1643 if (IS_HARD_FLOAT) {
1644 cinfo->ret.reg = ARM_VFP_F0;
1646 cinfo->ret.reg = ARMREG_R0;
1650 case MONO_TYPE_GENERICINST:
1651 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1652 cinfo->ret.storage = RegTypeGeneral;
1653 cinfo->ret.reg = ARMREG_R0;
1656 // FIXME: Only for variable types
1657 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1658 cinfo->ret.storage = RegTypeStructByAddr;
1659 g_assert (cinfo->vtype_retaddr);
1663 case MONO_TYPE_VALUETYPE:
1664 case MONO_TYPE_TYPEDBYREF:
1665 if (cinfo->ret.storage != RegTypeStructByVal)
1666 cinfo->ret.storage = RegTypeStructByAddr;
1669 case MONO_TYPE_MVAR:
1670 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1671 cinfo->ret.storage = RegTypeStructByAddr;
1672 g_assert (cinfo->vtype_retaddr);
1674 case MONO_TYPE_VOID:
1677 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1681 /* align stack size to 8 */
1682 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1683 stack_size = (stack_size + 7) & ~7;
1685 cinfo->stack_usage = stack_size;
1691 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1693 MonoType *callee_ret;
1697 if (cfg->compile_aot && !cfg->full_aot)
1698 /* OP_TAILCALL doesn't work with AOT */
1701 c1 = get_call_info (NULL, NULL, caller_sig);
1702 c2 = get_call_info (NULL, NULL, callee_sig);
1705 * Tail calls with more callee stack usage than the caller cannot be supported, since
1706 * the extra stack space would be left on the stack after the tail call.
1708 res = c1->stack_usage >= c2->stack_usage;
1709 callee_ret = mini_replace_type (callee_sig->ret);
1710 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1711 /* An address on the callee's stack is passed as the first argument */
1714 if (c2->stack_usage > 16 * 4)
1726 debug_omit_fp (void)
1729 return mono_debug_count ();
1736 * mono_arch_compute_omit_fp:
1738 * Determine whenever the frame pointer can be eliminated.
1741 mono_arch_compute_omit_fp (MonoCompile *cfg)
1743 MonoMethodSignature *sig;
1744 MonoMethodHeader *header;
1748 if (cfg->arch.omit_fp_computed)
1751 header = cfg->header;
1753 sig = mono_method_signature (cfg->method);
1755 if (!cfg->arch.cinfo)
1756 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1757 cinfo = cfg->arch.cinfo;
1760 * FIXME: Remove some of the restrictions.
1762 cfg->arch.omit_fp = TRUE;
1763 cfg->arch.omit_fp_computed = TRUE;
1765 if (cfg->disable_omit_fp)
1766 cfg->arch.omit_fp = FALSE;
1767 if (!debug_omit_fp ())
1768 cfg->arch.omit_fp = FALSE;
1770 if (cfg->method->save_lmf)
1771 cfg->arch.omit_fp = FALSE;
1773 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1774 cfg->arch.omit_fp = FALSE;
1775 if (header->num_clauses)
1776 cfg->arch.omit_fp = FALSE;
1777 if (cfg->param_area)
1778 cfg->arch.omit_fp = FALSE;
1779 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1780 cfg->arch.omit_fp = FALSE;
1781 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1782 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1783 cfg->arch.omit_fp = FALSE;
1784 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1785 ArgInfo *ainfo = &cinfo->args [i];
1787 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1789 * The stack offset can only be determined when the frame
1792 cfg->arch.omit_fp = FALSE;
1797 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1798 MonoInst *ins = cfg->varinfo [i];
1801 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1806 * Set var information according to the calling convention. arm version.
1807 * The locals var stuff should most likely be split in another method.
1810 mono_arch_allocate_vars (MonoCompile *cfg)
1812 MonoMethodSignature *sig;
1813 MonoMethodHeader *header;
1816 int i, offset, size, align, curinst;
1820 sig = mono_method_signature (cfg->method);
1822 if (!cfg->arch.cinfo)
1823 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1824 cinfo = cfg->arch.cinfo;
1825 sig_ret = mini_replace_type (sig->ret);
1827 mono_arch_compute_omit_fp (cfg);
1829 if (cfg->arch.omit_fp)
1830 cfg->frame_reg = ARMREG_SP;
1832 cfg->frame_reg = ARMREG_FP;
1834 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1836 /* allow room for the vararg method args: void* and long/double */
1837 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1838 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1840 header = cfg->header;
1842 /* See mono_arch_get_global_int_regs () */
1843 if (cfg->flags & MONO_CFG_HAS_CALLS)
1844 cfg->uses_rgctx_reg = TRUE;
1846 if (cfg->frame_reg != ARMREG_SP)
1847 cfg->used_int_regs |= 1 << cfg->frame_reg;
1849 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1850 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1851 cfg->used_int_regs |= (1 << ARMREG_V5);
1855 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1856 if (sig_ret->type != MONO_TYPE_VOID) {
1857 cfg->ret->opcode = OP_REGVAR;
1858 cfg->ret->inst_c0 = ARMREG_R0;
1861 /* local vars are at a positive offset from the stack pointer */
1863 * also note that if the function uses alloca, we use FP
1864 * to point at the local variables.
1866 offset = 0; /* linkage area */
1867 /* align the offset to 16 bytes: not sure this is needed here */
1869 //offset &= ~(8 - 1);
1871 /* add parameter area size for called functions */
1872 offset += cfg->param_area;
1875 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1878 /* allow room to save the return value */
1879 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1882 /* the MonoLMF structure is stored just below the stack pointer */
1883 if (cinfo->ret.storage == RegTypeStructByVal) {
1884 cfg->ret->opcode = OP_REGOFFSET;
1885 cfg->ret->inst_basereg = cfg->frame_reg;
1886 offset += sizeof (gpointer) - 1;
1887 offset &= ~(sizeof (gpointer) - 1);
1888 cfg->ret->inst_offset = - offset;
1889 offset += sizeof(gpointer);
1890 } else if (cinfo->vtype_retaddr) {
1891 ins = cfg->vret_addr;
1892 offset += sizeof(gpointer) - 1;
1893 offset &= ~(sizeof(gpointer) - 1);
1894 ins->inst_offset = offset;
1895 ins->opcode = OP_REGOFFSET;
1896 ins->inst_basereg = cfg->frame_reg;
1897 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1898 printf ("vret_addr =");
1899 mono_print_ins (cfg->vret_addr);
1901 offset += sizeof(gpointer);
1904 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1905 if (cfg->arch.seq_point_info_var) {
1908 ins = cfg->arch.seq_point_info_var;
1912 offset += align - 1;
1913 offset &= ~(align - 1);
1914 ins->opcode = OP_REGOFFSET;
1915 ins->inst_basereg = cfg->frame_reg;
1916 ins->inst_offset = offset;
1919 ins = cfg->arch.ss_trigger_page_var;
1922 offset += align - 1;
1923 offset &= ~(align - 1);
1924 ins->opcode = OP_REGOFFSET;
1925 ins->inst_basereg = cfg->frame_reg;
1926 ins->inst_offset = offset;
1930 if (cfg->arch.seq_point_read_var) {
1933 ins = cfg->arch.seq_point_read_var;
1937 offset += align - 1;
1938 offset &= ~(align - 1);
1939 ins->opcode = OP_REGOFFSET;
1940 ins->inst_basereg = cfg->frame_reg;
1941 ins->inst_offset = offset;
1944 ins = cfg->arch.seq_point_ss_method_var;
1947 offset += align - 1;
1948 offset &= ~(align - 1);
1949 ins->opcode = OP_REGOFFSET;
1950 ins->inst_basereg = cfg->frame_reg;
1951 ins->inst_offset = offset;
1954 ins = cfg->arch.seq_point_bp_method_var;
1957 offset += align - 1;
1958 offset &= ~(align - 1);
1959 ins->opcode = OP_REGOFFSET;
1960 ins->inst_basereg = cfg->frame_reg;
1961 ins->inst_offset = offset;
1965 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1966 /* Allocate a temporary used by the atomic ops */
1970 /* Allocate a local slot to hold the sig cookie address */
1971 offset += align - 1;
1972 offset &= ~(align - 1);
1973 cfg->arch.atomic_tmp_offset = offset;
1976 cfg->arch.atomic_tmp_offset = -1;
1979 cfg->locals_min_stack_offset = offset;
1981 curinst = cfg->locals_start;
1982 for (i = curinst; i < cfg->num_varinfo; ++i) {
1985 ins = cfg->varinfo [i];
1986 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1989 t = ins->inst_vtype;
1990 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1993 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1994 * pinvoke wrappers when they call functions returning structure */
1995 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1996 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2000 size = mono_type_size (t, &align);
2002 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2003 * since it loads/stores misaligned words, which don't do the right thing.
2005 if (align < 4 && size >= 4)
2007 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2008 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2009 offset += align - 1;
2010 offset &= ~(align - 1);
2011 ins->opcode = OP_REGOFFSET;
2012 ins->inst_offset = offset;
2013 ins->inst_basereg = cfg->frame_reg;
2015 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2018 cfg->locals_max_stack_offset = offset;
2022 ins = cfg->args [curinst];
2023 if (ins->opcode != OP_REGVAR) {
2024 ins->opcode = OP_REGOFFSET;
2025 ins->inst_basereg = cfg->frame_reg;
2026 offset += sizeof (gpointer) - 1;
2027 offset &= ~(sizeof (gpointer) - 1);
2028 ins->inst_offset = offset;
2029 offset += sizeof (gpointer);
2034 if (sig->call_convention == MONO_CALL_VARARG) {
2038 /* Allocate a local slot to hold the sig cookie address */
2039 offset += align - 1;
2040 offset &= ~(align - 1);
2041 cfg->sig_cookie = offset;
2045 for (i = 0; i < sig->param_count; ++i) {
2046 ins = cfg->args [curinst];
2048 if (ins->opcode != OP_REGVAR) {
2049 ins->opcode = OP_REGOFFSET;
2050 ins->inst_basereg = cfg->frame_reg;
2051 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2053 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2054 * since it loads/stores misaligned words, which don't do the right thing.
2056 if (align < 4 && size >= 4)
2058 /* The code in the prolog () stores words when storing vtypes received in a register */
2059 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2061 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2062 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2063 offset += align - 1;
2064 offset &= ~(align - 1);
2065 ins->inst_offset = offset;
2071 /* align the offset to 8 bytes */
2072 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2073 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2078 cfg->stack_offset = offset;
2082 mono_arch_create_vars (MonoCompile *cfg)
2084 MonoMethodSignature *sig;
2088 sig = mono_method_signature (cfg->method);
2090 if (!cfg->arch.cinfo)
2091 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2092 cinfo = cfg->arch.cinfo;
2094 if (IS_HARD_FLOAT) {
2095 for (i = 0; i < 2; i++) {
2096 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2097 inst->flags |= MONO_INST_VOLATILE;
2099 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2103 if (cinfo->ret.storage == RegTypeStructByVal)
2104 cfg->ret_var_is_local = TRUE;
2106 if (cinfo->vtype_retaddr) {
2107 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2108 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2109 printf ("vret_addr = ");
2110 mono_print_ins (cfg->vret_addr);
2114 if (cfg->gen_seq_points) {
2115 if (cfg->soft_breakpoints) {
2116 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2117 ins->flags |= MONO_INST_VOLATILE;
2118 cfg->arch.seq_point_read_var = ins;
2120 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2121 ins->flags |= MONO_INST_VOLATILE;
2122 cfg->arch.seq_point_ss_method_var = ins;
2124 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2125 ins->flags |= MONO_INST_VOLATILE;
2126 cfg->arch.seq_point_bp_method_var = ins;
2128 g_assert (!cfg->compile_aot);
2129 } else if (cfg->compile_aot) {
2130 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2131 ins->flags |= MONO_INST_VOLATILE;
2132 cfg->arch.seq_point_info_var = ins;
2134 /* Allocate a separate variable for this to save 1 load per seq point */
2135 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2136 ins->flags |= MONO_INST_VOLATILE;
2137 cfg->arch.ss_trigger_page_var = ins;
2143 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2145 MonoMethodSignature *tmp_sig;
2148 if (call->tail_call)
2151 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2154 * mono_ArgIterator_Setup assumes the signature cookie is
2155 * passed first and all the arguments which were before it are
2156 * passed on the stack after the signature. So compensate by
2157 * passing a different signature.
2159 tmp_sig = mono_metadata_signature_dup (call->signature);
2160 tmp_sig->param_count -= call->signature->sentinelpos;
2161 tmp_sig->sentinelpos = 0;
2162 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2164 sig_reg = mono_alloc_ireg (cfg);
2165 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2167 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2172 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2177 LLVMCallInfo *linfo;
2179 n = sig->param_count + sig->hasthis;
2181 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2183 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2186 * LLVM always uses the native ABI while we use our own ABI, the
2187 * only difference is the handling of vtypes:
2188 * - we only pass/receive them in registers in some cases, and only
2189 * in 1 or 2 integer registers.
2191 if (cinfo->vtype_retaddr) {
2192 /* Vtype returned using a hidden argument */
2193 linfo->ret.storage = LLVMArgVtypeRetAddr;
2194 linfo->vret_arg_index = cinfo->vret_arg_index;
2195 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2196 cfg->exception_message = g_strdup ("unknown ret conv");
2197 cfg->disable_llvm = TRUE;
2201 for (i = 0; i < n; ++i) {
2202 ainfo = cinfo->args + i;
2204 linfo->args [i].storage = LLVMArgNone;
2206 switch (ainfo->storage) {
2207 case RegTypeGeneral:
2208 case RegTypeIRegPair:
2210 linfo->args [i].storage = LLVMArgInIReg;
2212 case RegTypeStructByVal:
2213 // FIXME: Passing entirely on the stack or split reg/stack
2214 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2215 linfo->args [i].storage = LLVMArgVtypeInReg;
2216 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2217 if (ainfo->size == 2)
2218 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2220 linfo->args [i].pair_storage [1] = LLVMArgNone;
2222 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2223 cfg->disable_llvm = TRUE;
2227 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2228 cfg->disable_llvm = TRUE;
2238 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2241 MonoMethodSignature *sig;
2245 sig = call->signature;
2246 n = sig->param_count + sig->hasthis;
2248 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2250 for (i = 0; i < n; ++i) {
2251 ArgInfo *ainfo = cinfo->args + i;
2254 if (i >= sig->hasthis)
2255 t = sig->params [i - sig->hasthis];
2257 t = &mono_defaults.int_class->byval_arg;
2258 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2260 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2261 /* Emit the signature cookie just before the implicit arguments */
2262 emit_sig_cookie (cfg, call, cinfo);
2265 in = call->args [i];
2267 switch (ainfo->storage) {
2268 case RegTypeGeneral:
2269 case RegTypeIRegPair:
2270 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2271 MONO_INST_NEW (cfg, ins, OP_MOVE);
2272 ins->dreg = mono_alloc_ireg (cfg);
2273 ins->sreg1 = in->dreg + 1;
2274 MONO_ADD_INS (cfg->cbb, ins);
2275 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2277 MONO_INST_NEW (cfg, ins, OP_MOVE);
2278 ins->dreg = mono_alloc_ireg (cfg);
2279 ins->sreg1 = in->dreg + 2;
2280 MONO_ADD_INS (cfg->cbb, ins);
2281 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2282 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2283 if (ainfo->size == 4) {
2284 if (IS_SOFT_FLOAT) {
2285 /* mono_emit_call_args () have already done the r8->r4 conversion */
2286 /* The converted value is in an int vreg */
2287 MONO_INST_NEW (cfg, ins, OP_MOVE);
2288 ins->dreg = mono_alloc_ireg (cfg);
2289 ins->sreg1 = in->dreg;
2290 MONO_ADD_INS (cfg->cbb, ins);
2291 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2295 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2296 creg = mono_alloc_ireg (cfg);
2297 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2298 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2301 if (IS_SOFT_FLOAT) {
2302 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2303 ins->dreg = mono_alloc_ireg (cfg);
2304 ins->sreg1 = in->dreg;
2305 MONO_ADD_INS (cfg->cbb, ins);
2306 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2308 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2309 ins->dreg = mono_alloc_ireg (cfg);
2310 ins->sreg1 = in->dreg;
2311 MONO_ADD_INS (cfg->cbb, ins);
2312 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2316 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2317 creg = mono_alloc_ireg (cfg);
2318 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2319 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2320 creg = mono_alloc_ireg (cfg);
2321 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2322 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2325 cfg->flags |= MONO_CFG_HAS_FPOUT;
2327 MONO_INST_NEW (cfg, ins, OP_MOVE);
2328 ins->dreg = mono_alloc_ireg (cfg);
2329 ins->sreg1 = in->dreg;
2330 MONO_ADD_INS (cfg->cbb, ins);
2332 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2335 case RegTypeStructByAddr:
2338 /* FIXME: where si the data allocated? */
2339 arg->backend.reg3 = ainfo->reg;
2340 call->used_iregs |= 1 << ainfo->reg;
2341 g_assert_not_reached ();
2344 case RegTypeStructByVal:
2345 case RegTypeGSharedVtInReg:
2346 case RegTypeGSharedVtOnStack:
2347 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2348 ins->opcode = OP_OUTARG_VT;
2349 ins->sreg1 = in->dreg;
2350 ins->klass = in->klass;
2351 ins->inst_p0 = call;
2352 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2353 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2354 mono_call_inst_add_outarg_vt (cfg, call, ins);
2355 MONO_ADD_INS (cfg->cbb, ins);
2358 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2359 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2360 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2361 if (t->type == MONO_TYPE_R8) {
2362 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2365 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2367 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2373 case RegTypeBaseGen:
2374 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2375 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2376 MONO_INST_NEW (cfg, ins, OP_MOVE);
2377 ins->dreg = mono_alloc_ireg (cfg);
2378 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2379 MONO_ADD_INS (cfg->cbb, ins);
2380 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2381 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2384 /* This should work for soft-float as well */
2386 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2387 creg = mono_alloc_ireg (cfg);
2388 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2389 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2390 creg = mono_alloc_ireg (cfg);
2391 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2392 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2393 cfg->flags |= MONO_CFG_HAS_FPOUT;
2395 g_assert_not_reached ();
2399 int fdreg = mono_alloc_freg (cfg);
2401 if (ainfo->size == 8) {
2402 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2403 ins->sreg1 = in->dreg;
2405 MONO_ADD_INS (cfg->cbb, ins);
2407 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2412 * Mono's register allocator doesn't speak single-precision registers that
2413 * overlap double-precision registers (i.e. armhf). So we have to work around
2414 * the register allocator and load the value from memory manually.
2416 * So we create a variable for the float argument and an instruction to store
2417 * the argument into the variable. We then store the list of these arguments
2418 * in cfg->float_args. This list is then used by emit_float_args later to
2419 * pass the arguments in the various call opcodes.
2421 * This is not very nice, and we should really try to fix the allocator.
2424 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2426 /* Make sure the instruction isn't seen as pointless and removed.
2428 float_arg->flags |= MONO_INST_VOLATILE;
2430 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2432 /* We use the dreg to look up the instruction later. The hreg is used to
2433 * emit the instruction that loads the value into the FP reg.
2435 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2436 fad->vreg = float_arg->dreg;
2437 fad->hreg = ainfo->reg;
2439 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2442 call->used_iregs |= 1 << ainfo->reg;
2443 cfg->flags |= MONO_CFG_HAS_FPOUT;
2447 g_assert_not_reached ();
2451 /* Handle the case where there are no implicit arguments */
2452 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2453 emit_sig_cookie (cfg, call, cinfo);
2455 if (cinfo->ret.storage == RegTypeStructByVal) {
2456 /* The JIT will transform this into a normal call */
2457 call->vret_in_reg = TRUE;
2458 } else if (cinfo->vtype_retaddr) {
2460 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2461 vtarg->sreg1 = call->vret_var->dreg;
2462 vtarg->dreg = mono_alloc_preg (cfg);
2463 MONO_ADD_INS (cfg->cbb, vtarg);
2465 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2468 call->stack_usage = cinfo->stack_usage;
2474 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2476 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2477 ArgInfo *ainfo = ins->inst_p1;
2478 int ovf_size = ainfo->vtsize;
2479 int doffset = ainfo->offset;
2480 int struct_size = ainfo->struct_size;
2481 int i, soffset, dreg, tmpreg;
2483 if (ainfo->storage == RegTypeGSharedVtInReg) {
2485 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2488 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2489 /* Pass by addr on stack */
2490 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2495 for (i = 0; i < ainfo->size; ++i) {
2496 dreg = mono_alloc_ireg (cfg);
2497 switch (struct_size) {
2499 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2502 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2505 tmpreg = mono_alloc_ireg (cfg);
2506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2508 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2509 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2510 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2511 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2512 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2515 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2518 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2519 soffset += sizeof (gpointer);
2520 struct_size -= sizeof (gpointer);
2522 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2524 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2528 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2530 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2533 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2536 if (COMPILE_LLVM (cfg)) {
2537 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2539 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2540 ins->sreg1 = val->dreg + 1;
2541 ins->sreg2 = val->dreg + 2;
2542 MONO_ADD_INS (cfg->cbb, ins);
2547 case MONO_ARM_FPU_NONE:
2548 if (ret->type == MONO_TYPE_R8) {
2551 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2552 ins->dreg = cfg->ret->dreg;
2553 ins->sreg1 = val->dreg;
2554 MONO_ADD_INS (cfg->cbb, ins);
2557 if (ret->type == MONO_TYPE_R4) {
2558 /* Already converted to an int in method_to_ir () */
2559 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2563 case MONO_ARM_FPU_VFP:
2564 case MONO_ARM_FPU_VFP_HARD:
2565 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2568 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2569 ins->dreg = cfg->ret->dreg;
2570 ins->sreg1 = val->dreg;
2571 MONO_ADD_INS (cfg->cbb, ins);
2576 g_assert_not_reached ();
2580 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2583 #endif /* #ifndef DISABLE_JIT */
2586 mono_arch_is_inst_imm (gint64 imm)
2592 MonoMethodSignature *sig;
2597 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2601 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2604 switch (cinfo->ret.storage) {
2606 case RegTypeGeneral:
2607 case RegTypeIRegPair:
2608 case RegTypeStructByAddr:
2619 for (i = 0; i < cinfo->nargs; ++i) {
2620 ArgInfo *ainfo = &cinfo->args [i];
2623 switch (ainfo->storage) {
2624 case RegTypeGeneral:
2626 case RegTypeIRegPair:
2629 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2632 case RegTypeStructByVal:
2633 if (ainfo->size == 0)
2634 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2636 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2637 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2645 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2646 for (i = 0; i < sig->param_count; ++i) {
2647 MonoType *t = sig->params [i];
2673 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2675 ArchDynCallInfo *info;
2678 cinfo = get_call_info (NULL, NULL, sig);
2680 if (!dyn_call_supported (cinfo, sig)) {
2685 info = g_new0 (ArchDynCallInfo, 1);
2686 // FIXME: Preprocess the info to speed up start_dyn_call ()
2688 info->cinfo = cinfo;
2690 return (MonoDynCallInfo*)info;
2694 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2696 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2698 g_free (ainfo->cinfo);
2703 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2705 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2706 DynCallArgs *p = (DynCallArgs*)buf;
2707 int arg_index, greg, i, j, pindex;
2708 MonoMethodSignature *sig = dinfo->sig;
2710 g_assert (buf_len >= sizeof (DynCallArgs));
2719 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2720 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2725 if (dinfo->cinfo->vtype_retaddr)
2726 p->regs [greg ++] = (mgreg_t)ret;
2728 for (i = pindex; i < sig->param_count; i++) {
2729 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2730 gpointer *arg = args [arg_index ++];
2731 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2734 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2736 else if (ainfo->storage == RegTypeBase)
2737 slot = PARAM_REGS + (ainfo->offset / 4);
2739 g_assert_not_reached ();
2742 p->regs [slot] = (mgreg_t)*arg;
2747 case MONO_TYPE_STRING:
2748 case MONO_TYPE_CLASS:
2749 case MONO_TYPE_ARRAY:
2750 case MONO_TYPE_SZARRAY:
2751 case MONO_TYPE_OBJECT:
2755 p->regs [slot] = (mgreg_t)*arg;
2757 case MONO_TYPE_BOOLEAN:
2759 p->regs [slot] = *(guint8*)arg;
2762 p->regs [slot] = *(gint8*)arg;
2765 p->regs [slot] = *(gint16*)arg;
2768 case MONO_TYPE_CHAR:
2769 p->regs [slot] = *(guint16*)arg;
2772 p->regs [slot] = *(gint32*)arg;
2775 p->regs [slot] = *(guint32*)arg;
2779 p->regs [slot ++] = (mgreg_t)arg [0];
2780 p->regs [slot] = (mgreg_t)arg [1];
2783 p->regs [slot] = *(mgreg_t*)arg;
2786 p->regs [slot ++] = (mgreg_t)arg [0];
2787 p->regs [slot] = (mgreg_t)arg [1];
2789 case MONO_TYPE_GENERICINST:
2790 if (MONO_TYPE_IS_REFERENCE (t)) {
2791 p->regs [slot] = (mgreg_t)*arg;
2796 case MONO_TYPE_VALUETYPE:
2797 g_assert (ainfo->storage == RegTypeStructByVal);
2799 if (ainfo->size == 0)
2800 slot = PARAM_REGS + (ainfo->offset / 4);
2804 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2805 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2808 g_assert_not_reached ();
2814 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2816 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2817 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2819 guint8 *ret = ((DynCallArgs*)buf)->ret;
2820 mgreg_t res = ((DynCallArgs*)buf)->res;
2821 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2823 ptype = mini_type_get_underlying_type (NULL, sig->ret);
2824 switch (ptype->type) {
2825 case MONO_TYPE_VOID:
2826 *(gpointer*)ret = NULL;
2828 case MONO_TYPE_STRING:
2829 case MONO_TYPE_CLASS:
2830 case MONO_TYPE_ARRAY:
2831 case MONO_TYPE_SZARRAY:
2832 case MONO_TYPE_OBJECT:
2836 *(gpointer*)ret = (gpointer)res;
2842 case MONO_TYPE_BOOLEAN:
2843 *(guint8*)ret = res;
2846 *(gint16*)ret = res;
2849 case MONO_TYPE_CHAR:
2850 *(guint16*)ret = res;
2853 *(gint32*)ret = res;
2856 *(guint32*)ret = res;
2860 /* This handles endianness as well */
2861 ((gint32*)ret) [0] = res;
2862 ((gint32*)ret) [1] = res2;
2864 case MONO_TYPE_GENERICINST:
2865 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2866 *(gpointer*)ret = (gpointer)res;
2871 case MONO_TYPE_VALUETYPE:
2872 g_assert (ainfo->cinfo->vtype_retaddr);
2877 *(float*)ret = *(float*)&res;
2879 case MONO_TYPE_R8: {
2886 *(double*)ret = *(double*)®s;
2890 g_assert_not_reached ();
2897 * Allow tracing to work with this interface (with an optional argument)
2901 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2905 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2906 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2907 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2908 code = emit_call_reg (code, ARMREG_R2);
2922 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2925 int save_mode = SAVE_NONE;
2927 MonoMethod *method = cfg->method;
2928 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2929 int rtype = ret_type->type;
2930 int save_offset = cfg->param_area;
2934 offset = code - cfg->native_code;
2935 /* we need about 16 instructions */
2936 if (offset > (cfg->code_size - 16 * 4)) {
2937 cfg->code_size *= 2;
2938 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2939 code = cfg->native_code + offset;
2942 case MONO_TYPE_VOID:
2943 /* special case string .ctor icall */
2944 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2945 save_mode = SAVE_ONE;
2947 save_mode = SAVE_NONE;
2951 save_mode = SAVE_TWO;
2955 save_mode = SAVE_ONE_FP;
2957 save_mode = SAVE_ONE;
2961 save_mode = SAVE_TWO_FP;
2963 save_mode = SAVE_TWO;
2965 case MONO_TYPE_GENERICINST:
2966 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2967 save_mode = SAVE_ONE;
2971 case MONO_TYPE_VALUETYPE:
2972 save_mode = SAVE_STRUCT;
2975 save_mode = SAVE_ONE;
2979 switch (save_mode) {
2981 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2982 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2983 if (enable_arguments) {
2984 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2985 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2989 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2990 if (enable_arguments) {
2991 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2995 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
2996 if (enable_arguments) {
2997 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3001 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3002 if (enable_arguments) {
3003 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3007 if (enable_arguments) {
3008 /* FIXME: get the actual address */
3009 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3017 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3018 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3019 code = emit_call_reg (code, ARMREG_IP);
3021 switch (save_mode) {
3023 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3024 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3027 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3030 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3033 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3044 * The immediate field for cond branches is big enough for all reasonable methods
3046 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3047 if (0 && ins->inst_true_bb->native_offset) { \
3048 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3050 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3051 ARM_B_COND (code, (condcode), 0); \
3054 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3056 /* emit an exception if condition is fail
3058 * We assign the extra code used to throw the implicit exceptions
3059 * to cfg->bb_exit as far as the big branch handling is concerned
3061 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3063 mono_add_patch_info (cfg, code - cfg->native_code, \
3064 MONO_PATCH_INFO_EXC, exc_name); \
3065 ARM_BL_COND (code, (condcode), 0); \
3068 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3071 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3076 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3078 MonoInst *ins, *n, *last_ins = NULL;
3080 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3081 switch (ins->opcode) {
3084 /* Already done by an arch-independent pass */
3086 case OP_LOAD_MEMBASE:
3087 case OP_LOADI4_MEMBASE:
3089 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3090 * OP_LOAD_MEMBASE offset(basereg), reg
3092 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3093 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3094 ins->inst_basereg == last_ins->inst_destbasereg &&
3095 ins->inst_offset == last_ins->inst_offset) {
3096 if (ins->dreg == last_ins->sreg1) {
3097 MONO_DELETE_INS (bb, ins);
3100 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3101 ins->opcode = OP_MOVE;
3102 ins->sreg1 = last_ins->sreg1;
3106 * Note: reg1 must be different from the basereg in the second load
3107 * OP_LOAD_MEMBASE offset(basereg), reg1
3108 * OP_LOAD_MEMBASE offset(basereg), reg2
3110 * OP_LOAD_MEMBASE offset(basereg), reg1
3111 * OP_MOVE reg1, reg2
3113 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3114 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3115 ins->inst_basereg != last_ins->dreg &&
3116 ins->inst_basereg == last_ins->inst_basereg &&
3117 ins->inst_offset == last_ins->inst_offset) {
3119 if (ins->dreg == last_ins->dreg) {
3120 MONO_DELETE_INS (bb, ins);
3123 ins->opcode = OP_MOVE;
3124 ins->sreg1 = last_ins->dreg;
3127 //g_assert_not_reached ();
3131 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3132 * OP_LOAD_MEMBASE offset(basereg), reg
3134 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3135 * OP_ICONST reg, imm
3137 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3138 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3139 ins->inst_basereg == last_ins->inst_destbasereg &&
3140 ins->inst_offset == last_ins->inst_offset) {
3141 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3142 ins->opcode = OP_ICONST;
3143 ins->inst_c0 = last_ins->inst_imm;
3144 g_assert_not_reached (); // check this rule
3148 case OP_LOADU1_MEMBASE:
3149 case OP_LOADI1_MEMBASE:
3150 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3151 ins->inst_basereg == last_ins->inst_destbasereg &&
3152 ins->inst_offset == last_ins->inst_offset) {
3153 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3154 ins->sreg1 = last_ins->sreg1;
3157 case OP_LOADU2_MEMBASE:
3158 case OP_LOADI2_MEMBASE:
3159 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3160 ins->inst_basereg == last_ins->inst_destbasereg &&
3161 ins->inst_offset == last_ins->inst_offset) {
3162 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3163 ins->sreg1 = last_ins->sreg1;
3167 ins->opcode = OP_MOVE;
3171 if (ins->dreg == ins->sreg1) {
3172 MONO_DELETE_INS (bb, ins);
3176 * OP_MOVE sreg, dreg
3177 * OP_MOVE dreg, sreg
3179 if (last_ins && last_ins->opcode == OP_MOVE &&
3180 ins->sreg1 == last_ins->dreg &&
3181 ins->dreg == last_ins->sreg1) {
3182 MONO_DELETE_INS (bb, ins);
3190 bb->last_ins = last_ins;
3194 * the branch_cc_table should maintain the order of these
3208 branch_cc_table [] = {
3222 #define ADD_NEW_INS(cfg,dest,op) do { \
3223 MONO_INST_NEW ((cfg), (dest), (op)); \
3224 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3228 map_to_reg_reg_op (int op)
3237 case OP_COMPARE_IMM:
3239 case OP_ICOMPARE_IMM:
3253 case OP_LOAD_MEMBASE:
3254 return OP_LOAD_MEMINDEX;
3255 case OP_LOADI4_MEMBASE:
3256 return OP_LOADI4_MEMINDEX;
3257 case OP_LOADU4_MEMBASE:
3258 return OP_LOADU4_MEMINDEX;
3259 case OP_LOADU1_MEMBASE:
3260 return OP_LOADU1_MEMINDEX;
3261 case OP_LOADI2_MEMBASE:
3262 return OP_LOADI2_MEMINDEX;
3263 case OP_LOADU2_MEMBASE:
3264 return OP_LOADU2_MEMINDEX;
3265 case OP_LOADI1_MEMBASE:
3266 return OP_LOADI1_MEMINDEX;
3267 case OP_STOREI1_MEMBASE_REG:
3268 return OP_STOREI1_MEMINDEX;
3269 case OP_STOREI2_MEMBASE_REG:
3270 return OP_STOREI2_MEMINDEX;
3271 case OP_STOREI4_MEMBASE_REG:
3272 return OP_STOREI4_MEMINDEX;
3273 case OP_STORE_MEMBASE_REG:
3274 return OP_STORE_MEMINDEX;
3275 case OP_STORER4_MEMBASE_REG:
3276 return OP_STORER4_MEMINDEX;
3277 case OP_STORER8_MEMBASE_REG:
3278 return OP_STORER8_MEMINDEX;
3279 case OP_STORE_MEMBASE_IMM:
3280 return OP_STORE_MEMBASE_REG;
3281 case OP_STOREI1_MEMBASE_IMM:
3282 return OP_STOREI1_MEMBASE_REG;
3283 case OP_STOREI2_MEMBASE_IMM:
3284 return OP_STOREI2_MEMBASE_REG;
3285 case OP_STOREI4_MEMBASE_IMM:
3286 return OP_STOREI4_MEMBASE_REG;
3288 g_assert_not_reached ();
3292 * Remove from the instruction list the instructions that can't be
3293 * represented with very simple instructions with no register
3297 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3299 MonoInst *ins, *temp, *last_ins = NULL;
3300 int rot_amount, imm8, low_imm;
3302 MONO_BB_FOR_EACH_INS (bb, ins) {
3304 switch (ins->opcode) {
3308 case OP_COMPARE_IMM:
3309 case OP_ICOMPARE_IMM:
3323 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3324 ADD_NEW_INS (cfg, temp, OP_ICONST);
3325 temp->inst_c0 = ins->inst_imm;
3326 temp->dreg = mono_alloc_ireg (cfg);
3327 ins->sreg2 = temp->dreg;
3328 ins->opcode = mono_op_imm_to_op (ins->opcode);
3330 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3336 if (ins->inst_imm == 1) {
3337 ins->opcode = OP_MOVE;
3340 if (ins->inst_imm == 0) {
3341 ins->opcode = OP_ICONST;
3345 imm8 = mono_is_power_of_two (ins->inst_imm);
3347 ins->opcode = OP_SHL_IMM;
3348 ins->inst_imm = imm8;
3351 ADD_NEW_INS (cfg, temp, OP_ICONST);
3352 temp->inst_c0 = ins->inst_imm;
3353 temp->dreg = mono_alloc_ireg (cfg);
3354 ins->sreg2 = temp->dreg;
3355 ins->opcode = OP_IMUL;
3361 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3362 /* ARM sets the C flag to 1 if there was _no_ overflow */
3363 ins->next->opcode = OP_COND_EXC_NC;
3366 case OP_IDIV_UN_IMM:
3368 case OP_IREM_UN_IMM:
3369 ADD_NEW_INS (cfg, temp, OP_ICONST);
3370 temp->inst_c0 = ins->inst_imm;
3371 temp->dreg = mono_alloc_ireg (cfg);
3372 ins->sreg2 = temp->dreg;
3373 ins->opcode = mono_op_imm_to_op (ins->opcode);
3375 case OP_LOCALLOC_IMM:
3376 ADD_NEW_INS (cfg, temp, OP_ICONST);
3377 temp->inst_c0 = ins->inst_imm;
3378 temp->dreg = mono_alloc_ireg (cfg);
3379 ins->sreg1 = temp->dreg;
3380 ins->opcode = OP_LOCALLOC;
3382 case OP_LOAD_MEMBASE:
3383 case OP_LOADI4_MEMBASE:
3384 case OP_LOADU4_MEMBASE:
3385 case OP_LOADU1_MEMBASE:
3386 /* we can do two things: load the immed in a register
3387 * and use an indexed load, or see if the immed can be
3388 * represented as an ad_imm + a load with a smaller offset
3389 * that fits. We just do the first for now, optimize later.
3391 if (arm_is_imm12 (ins->inst_offset))
3393 ADD_NEW_INS (cfg, temp, OP_ICONST);
3394 temp->inst_c0 = ins->inst_offset;
3395 temp->dreg = mono_alloc_ireg (cfg);
3396 ins->sreg2 = temp->dreg;
3397 ins->opcode = map_to_reg_reg_op (ins->opcode);
3399 case OP_LOADI2_MEMBASE:
3400 case OP_LOADU2_MEMBASE:
3401 case OP_LOADI1_MEMBASE:
3402 if (arm_is_imm8 (ins->inst_offset))
3404 ADD_NEW_INS (cfg, temp, OP_ICONST);
3405 temp->inst_c0 = ins->inst_offset;
3406 temp->dreg = mono_alloc_ireg (cfg);
3407 ins->sreg2 = temp->dreg;
3408 ins->opcode = map_to_reg_reg_op (ins->opcode);
3410 case OP_LOADR4_MEMBASE:
3411 case OP_LOADR8_MEMBASE:
3412 if (arm_is_fpimm8 (ins->inst_offset))
3414 low_imm = ins->inst_offset & 0x1ff;
3415 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3416 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3417 temp->inst_imm = ins->inst_offset & ~0x1ff;
3418 temp->sreg1 = ins->inst_basereg;
3419 temp->dreg = mono_alloc_ireg (cfg);
3420 ins->inst_basereg = temp->dreg;
3421 ins->inst_offset = low_imm;
3425 ADD_NEW_INS (cfg, temp, OP_ICONST);
3426 temp->inst_c0 = ins->inst_offset;
3427 temp->dreg = mono_alloc_ireg (cfg);
3429 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3430 add_ins->sreg1 = ins->inst_basereg;
3431 add_ins->sreg2 = temp->dreg;
3432 add_ins->dreg = mono_alloc_ireg (cfg);
3434 ins->inst_basereg = add_ins->dreg;
3435 ins->inst_offset = 0;
3438 case OP_STORE_MEMBASE_REG:
3439 case OP_STOREI4_MEMBASE_REG:
3440 case OP_STOREI1_MEMBASE_REG:
3441 if (arm_is_imm12 (ins->inst_offset))
3443 ADD_NEW_INS (cfg, temp, OP_ICONST);
3444 temp->inst_c0 = ins->inst_offset;
3445 temp->dreg = mono_alloc_ireg (cfg);
3446 ins->sreg2 = temp->dreg;
3447 ins->opcode = map_to_reg_reg_op (ins->opcode);
3449 case OP_STOREI2_MEMBASE_REG:
3450 if (arm_is_imm8 (ins->inst_offset))
3452 ADD_NEW_INS (cfg, temp, OP_ICONST);
3453 temp->inst_c0 = ins->inst_offset;
3454 temp->dreg = mono_alloc_ireg (cfg);
3455 ins->sreg2 = temp->dreg;
3456 ins->opcode = map_to_reg_reg_op (ins->opcode);
3458 case OP_STORER4_MEMBASE_REG:
3459 case OP_STORER8_MEMBASE_REG:
3460 if (arm_is_fpimm8 (ins->inst_offset))
3462 low_imm = ins->inst_offset & 0x1ff;
3463 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3464 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3465 temp->inst_imm = ins->inst_offset & ~0x1ff;
3466 temp->sreg1 = ins->inst_destbasereg;
3467 temp->dreg = mono_alloc_ireg (cfg);
3468 ins->inst_destbasereg = temp->dreg;
3469 ins->inst_offset = low_imm;
3473 ADD_NEW_INS (cfg, temp, OP_ICONST);
3474 temp->inst_c0 = ins->inst_offset;
3475 temp->dreg = mono_alloc_ireg (cfg);
3477 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3478 add_ins->sreg1 = ins->inst_destbasereg;
3479 add_ins->sreg2 = temp->dreg;
3480 add_ins->dreg = mono_alloc_ireg (cfg);
3482 ins->inst_destbasereg = add_ins->dreg;
3483 ins->inst_offset = 0;
3486 case OP_STORE_MEMBASE_IMM:
3487 case OP_STOREI1_MEMBASE_IMM:
3488 case OP_STOREI2_MEMBASE_IMM:
3489 case OP_STOREI4_MEMBASE_IMM:
3490 ADD_NEW_INS (cfg, temp, OP_ICONST);
3491 temp->inst_c0 = ins->inst_imm;
3492 temp->dreg = mono_alloc_ireg (cfg);
3493 ins->sreg1 = temp->dreg;
3494 ins->opcode = map_to_reg_reg_op (ins->opcode);
3496 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3498 gboolean swap = FALSE;
3502 /* Optimized away */
3507 /* Some fp compares require swapped operands */
3508 switch (ins->next->opcode) {
3510 ins->next->opcode = OP_FBLT;
3514 ins->next->opcode = OP_FBLT_UN;
3518 ins->next->opcode = OP_FBGE;
3522 ins->next->opcode = OP_FBGE_UN;
3530 ins->sreg1 = ins->sreg2;
3539 bb->last_ins = last_ins;
3540 bb->max_vreg = cfg->next_vreg;
3544 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3548 if (long_ins->opcode == OP_LNEG) {
3550 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3551 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3557 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3559 /* sreg is a float, dreg is an integer reg */
3561 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3563 ARM_TOSIZD (code, vfp_scratch1, sreg);
3565 ARM_TOUIZD (code, vfp_scratch1, sreg);
3566 ARM_FMRS (code, dreg, vfp_scratch1);
3567 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3571 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3572 else if (size == 2) {
3573 ARM_SHL_IMM (code, dreg, dreg, 16);
3574 ARM_SHR_IMM (code, dreg, dreg, 16);
3578 ARM_SHL_IMM (code, dreg, dreg, 24);
3579 ARM_SAR_IMM (code, dreg, dreg, 24);
3580 } else if (size == 2) {
3581 ARM_SHL_IMM (code, dreg, dreg, 16);
3582 ARM_SAR_IMM (code, dreg, dreg, 16);
3588 #endif /* #ifndef DISABLE_JIT */
3592 const guchar *target;
3597 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3600 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3601 PatchData *pdata = (PatchData*)user_data;
3602 guchar *code = data;
3603 guint32 *thunks = data;
3604 guint32 *endthunks = (guint32*)(code + bsize);
3606 int difflow, diffhigh;
3608 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3609 difflow = (char*)pdata->code - (char*)thunks;
3610 diffhigh = (char*)pdata->code - (char*)endthunks;
3611 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3615 * The thunk is composed of 3 words:
3616 * load constant from thunks [2] into ARM_IP
3619 * Note that the LR register is already setup
3621 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3622 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3623 while (thunks < endthunks) {
3624 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3625 if (thunks [2] == (guint32)pdata->target) {
3626 arm_patch (pdata->code, (guchar*)thunks);
3627 mono_arch_flush_icache (pdata->code, 4);
3630 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3631 /* found a free slot instead: emit thunk */
3632 /* ARMREG_IP is fine to use since this can't be an IMT call
3635 code = (guchar*)thunks;
3636 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3637 if (thumb_supported)
3638 ARM_BX (code, ARMREG_IP);
3640 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3641 thunks [2] = (guint32)pdata->target;
3642 mono_arch_flush_icache ((guchar*)thunks, 12);
3644 arm_patch (pdata->code, (guchar*)thunks);
3645 mono_arch_flush_icache (pdata->code, 4);
3649 /* skip 12 bytes, the size of the thunk */
3653 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3659 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3664 domain = mono_domain_get ();
3667 pdata.target = target;
3668 pdata.absolute = absolute;
3672 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3675 if (pdata.found != 1) {
3676 mono_domain_lock (domain);
3677 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3680 /* this uses the first available slot */
3682 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3684 mono_domain_unlock (domain);
3687 if (pdata.found != 1) {
3689 GHashTableIter iter;
3690 MonoJitDynamicMethodInfo *ji;
3693 * This might be a dynamic method, search its code manager. We can only
3694 * use the dynamic method containing CODE, since the others might be freed later.
3698 mono_domain_lock (domain);
3699 hash = domain_jit_info (domain)->dynamic_code_hash;
3701 /* FIXME: Speed this up */
3702 g_hash_table_iter_init (&iter, hash);
3703 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3704 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3705 if (pdata.found == 1)
3709 mono_domain_unlock (domain);
3711 if (pdata.found != 1)
3712 g_print ("thunk failed for %p from %p\n", target, code);
3713 g_assert (pdata.found == 1);
3717 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3719 guint32 *code32 = (void*)code;
3720 guint32 ins = *code32;
3721 guint32 prim = (ins >> 25) & 7;
3722 guint32 tval = GPOINTER_TO_UINT (target);
3724 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3725 if (prim == 5) { /* 101b */
3726 /* the diff starts 8 bytes from the branch opcode */
3727 gint diff = target - code - 8;
3729 gint tmask = 0xffffffff;
3730 if (tval & 1) { /* entering thumb mode */
3731 diff = target - 1 - code - 8;
3732 g_assert (thumb_supported);
3733 tbits = 0xf << 28; /* bl->blx bit pattern */
3734 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3735 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3739 tmask = ~(1 << 24); /* clear the link bit */
3740 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3745 if (diff <= 33554431) {
3747 ins = (ins & 0xff000000) | diff;
3749 *code32 = ins | tbits;
3753 /* diff between 0 and -33554432 */
3754 if (diff >= -33554432) {
3756 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3758 *code32 = ins | tbits;
3763 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3767 #ifdef USE_JUMP_TABLES
3769 gpointer *jte = mono_jumptable_get_entry (code);
3771 jte [0] = (gpointer) target;
3775 * The alternative call sequences looks like this:
3777 * ldr ip, [pc] // loads the address constant
3778 * b 1f // jumps around the constant
3779 * address constant embedded in the code
3784 * There are two cases for patching:
3785 * a) at the end of method emission: in this case code points to the start
3786 * of the call sequence
3787 * b) during runtime patching of the call site: in this case code points
3788 * to the mov pc, ip instruction
3790 * We have to handle also the thunk jump code sequence:
3794 * address constant // execution never reaches here
3796 if ((ins & 0x0ffffff0) == 0x12fff10) {
3797 /* Branch and exchange: the address is constructed in a reg
3798 * We can patch BX when the code sequence is the following:
3799 * ldr ip, [pc, #0] ; 0x8
3806 guint8 *emit = (guint8*)ccode;
3807 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3809 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3810 ARM_BX (emit, ARMREG_IP);
3812 /*patching from magic trampoline*/
3813 if (ins == ccode [3]) {
3814 g_assert (code32 [-4] == ccode [0]);
3815 g_assert (code32 [-3] == ccode [1]);
3816 g_assert (code32 [-1] == ccode [2]);
3817 code32 [-2] = (guint32)target;
3820 /*patching from JIT*/
3821 if (ins == ccode [0]) {
3822 g_assert (code32 [1] == ccode [1]);
3823 g_assert (code32 [3] == ccode [2]);
3824 g_assert (code32 [4] == ccode [3]);
3825 code32 [2] = (guint32)target;
3828 g_assert_not_reached ();
3829 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3837 guint8 *emit = (guint8*)ccode;
3838 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3840 ARM_BLX_REG (emit, ARMREG_IP);
3842 g_assert (code32 [-3] == ccode [0]);
3843 g_assert (code32 [-2] == ccode [1]);
3844 g_assert (code32 [0] == ccode [2]);
3846 code32 [-1] = (guint32)target;
3849 guint32 *tmp = ccode;
3850 guint8 *emit = (guint8*)tmp;
3851 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3852 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3853 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3854 ARM_BX (emit, ARMREG_IP);
3855 if (ins == ccode [2]) {
3856 g_assert_not_reached (); // should be -2 ...
3857 code32 [-1] = (guint32)target;
3860 if (ins == ccode [0]) {
3861 /* handles both thunk jump code and the far call sequence */
3862 code32 [2] = (guint32)target;
3865 g_assert_not_reached ();
3867 // g_print ("patched with 0x%08x\n", ins);
3872 arm_patch (guchar *code, const guchar *target)
3874 arm_patch_general (NULL, code, target, NULL);
3878 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3879 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3880 * to be used with the emit macros.
3881 * Return -1 otherwise.
3884 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3887 for (i = 0; i < 31; i+= 2) {
3888 res = (val << (32 - i)) | (val >> i);
3891 *rot_amount = i? 32 - i: 0;
3898 * Emits in code a sequence of instructions that load the value 'val'
3899 * into the dreg register. Uses at most 4 instructions.
3902 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3904 int imm8, rot_amount;
3906 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3907 /* skip the constant pool */
3913 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3914 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3915 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3916 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3919 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3921 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3925 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3927 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3929 if (val & 0xFF0000) {
3930 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3932 if (val & 0xFF000000) {
3933 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3935 } else if (val & 0xFF00) {
3936 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3937 if (val & 0xFF0000) {
3938 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3940 if (val & 0xFF000000) {
3941 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3943 } else if (val & 0xFF0000) {
3944 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3945 if (val & 0xFF000000) {
3946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3949 //g_assert_not_reached ();
3955 mono_arm_thumb_supported (void)
3957 return thumb_supported;
3963 * emit_load_volatile_arguments:
3965 * Load volatile arguments from the stack to the original input registers.
3966 * Required before a tail call.
3969 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3971 MonoMethod *method = cfg->method;
3972 MonoMethodSignature *sig;
3977 /* FIXME: Generate intermediate code instead */
3979 sig = mono_method_signature (method);
3981 /* This is the opposite of the code in emit_prolog */
3985 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3987 if (cinfo->vtype_retaddr) {
3988 ArgInfo *ainfo = &cinfo->ret;
3989 inst = cfg->vret_addr;
3990 g_assert (arm_is_imm12 (inst->inst_offset));
3991 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3993 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3994 ArgInfo *ainfo = cinfo->args + i;
3995 inst = cfg->args [pos];
3997 if (cfg->verbose_level > 2)
3998 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3999 if (inst->opcode == OP_REGVAR) {
4000 if (ainfo->storage == RegTypeGeneral)
4001 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4002 else if (ainfo->storage == RegTypeFP) {
4003 g_assert_not_reached ();
4004 } else if (ainfo->storage == RegTypeBase) {
4008 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4009 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4011 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4012 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4016 g_assert_not_reached ();
4018 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4019 switch (ainfo->size) {
4026 g_assert (arm_is_imm12 (inst->inst_offset));
4027 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4028 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4029 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4032 if (arm_is_imm12 (inst->inst_offset)) {
4033 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4035 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4036 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4040 } else if (ainfo->storage == RegTypeBaseGen) {
4043 } else if (ainfo->storage == RegTypeBase) {
4045 } else if (ainfo->storage == RegTypeFP) {
4046 g_assert_not_reached ();
4047 } else if (ainfo->storage == RegTypeStructByVal) {
4048 int doffset = inst->inst_offset;
4052 if (mono_class_from_mono_type (inst->inst_vtype))
4053 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4054 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4055 if (arm_is_imm12 (doffset)) {
4056 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4058 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4059 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4061 soffset += sizeof (gpointer);
4062 doffset += sizeof (gpointer);
4067 } else if (ainfo->storage == RegTypeStructByAddr) {
4082 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4087 guint8 *code = cfg->native_code + cfg->code_len;
4088 MonoInst *last_ins = NULL;
4089 guint last_offset = 0;
4091 int imm8, rot_amount;
4093 /* we don't align basic blocks of loops on arm */
4095 if (cfg->verbose_level > 2)
4096 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4098 cpos = bb->max_offset;
4100 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4101 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4102 //g_assert (!mono_compile_aot);
4105 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4106 /* this is not thread save, but good enough */
4107 /* fixme: howto handle overflows? */
4108 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4111 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4112 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4113 (gpointer)"mono_break");
4114 code = emit_call_seq (cfg, code);
4117 MONO_BB_FOR_EACH_INS (bb, ins) {
4118 offset = code - cfg->native_code;
4120 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4122 if (offset > (cfg->code_size - max_len - 16)) {
4123 cfg->code_size *= 2;
4124 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4125 code = cfg->native_code + offset;
4127 // if (ins->cil_code)
4128 // g_print ("cil code\n");
4129 mono_debug_record_line_number (cfg, ins, offset);
4131 switch (ins->opcode) {
4132 case OP_MEMORY_BARRIER:
4134 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4135 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4139 #ifdef HAVE_AEABI_READ_TP
4140 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4141 (gpointer)"__aeabi_read_tp");
4142 code = emit_call_seq (cfg, code);
4144 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4146 g_assert_not_reached ();
4149 case OP_ATOMIC_EXCHANGE_I4:
4150 case OP_ATOMIC_CAS_I4:
4151 case OP_ATOMIC_ADD_I4: {
4155 g_assert (v7_supported);
4158 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4160 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4162 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4166 g_assert (cfg->arch.atomic_tmp_offset != -1);
4167 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4169 switch (ins->opcode) {
4170 case OP_ATOMIC_EXCHANGE_I4:
4172 ARM_DMB (code, ARM_DMB_SY);
4173 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4174 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4175 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4177 ARM_B_COND (code, ARMCOND_NE, 0);
4178 arm_patch (buf [1], buf [0]);
4180 case OP_ATOMIC_CAS_I4:
4181 ARM_DMB (code, ARM_DMB_SY);
4183 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4184 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4186 ARM_B_COND (code, ARMCOND_NE, 0);
4187 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4188 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4190 ARM_B_COND (code, ARMCOND_NE, 0);
4191 arm_patch (buf [2], buf [0]);
4192 arm_patch (buf [1], code);
4194 case OP_ATOMIC_ADD_I4:
4196 ARM_DMB (code, ARM_DMB_SY);
4197 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4198 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4199 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4200 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4202 ARM_B_COND (code, ARMCOND_NE, 0);
4203 arm_patch (buf [1], buf [0]);
4206 g_assert_not_reached ();
4209 ARM_DMB (code, ARM_DMB_SY);
4210 if (tmpreg != ins->dreg)
4211 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4212 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4217 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4218 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4221 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4222 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4224 case OP_STOREI1_MEMBASE_IMM:
4225 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4226 g_assert (arm_is_imm12 (ins->inst_offset));
4227 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4229 case OP_STOREI2_MEMBASE_IMM:
4230 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4231 g_assert (arm_is_imm8 (ins->inst_offset));
4232 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4234 case OP_STORE_MEMBASE_IMM:
4235 case OP_STOREI4_MEMBASE_IMM:
4236 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4237 g_assert (arm_is_imm12 (ins->inst_offset));
4238 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4240 case OP_STOREI1_MEMBASE_REG:
4241 g_assert (arm_is_imm12 (ins->inst_offset));
4242 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4244 case OP_STOREI2_MEMBASE_REG:
4245 g_assert (arm_is_imm8 (ins->inst_offset));
4246 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4248 case OP_STORE_MEMBASE_REG:
4249 case OP_STOREI4_MEMBASE_REG:
4250 /* this case is special, since it happens for spill code after lowering has been called */
4251 if (arm_is_imm12 (ins->inst_offset)) {
4252 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4254 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4255 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4258 case OP_STOREI1_MEMINDEX:
4259 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4261 case OP_STOREI2_MEMINDEX:
4262 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4264 case OP_STORE_MEMINDEX:
4265 case OP_STOREI4_MEMINDEX:
4266 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4269 g_assert_not_reached ();
4271 case OP_LOAD_MEMINDEX:
4272 case OP_LOADI4_MEMINDEX:
4273 case OP_LOADU4_MEMINDEX:
4274 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4276 case OP_LOADI1_MEMINDEX:
4277 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4279 case OP_LOADU1_MEMINDEX:
4280 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4282 case OP_LOADI2_MEMINDEX:
4283 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4285 case OP_LOADU2_MEMINDEX:
4286 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4288 case OP_LOAD_MEMBASE:
4289 case OP_LOADI4_MEMBASE:
4290 case OP_LOADU4_MEMBASE:
4291 /* this case is special, since it happens for spill code after lowering has been called */
4292 if (arm_is_imm12 (ins->inst_offset)) {
4293 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4295 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4296 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4299 case OP_LOADI1_MEMBASE:
4300 g_assert (arm_is_imm8 (ins->inst_offset));
4301 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4303 case OP_LOADU1_MEMBASE:
4304 g_assert (arm_is_imm12 (ins->inst_offset));
4305 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4307 case OP_LOADU2_MEMBASE:
4308 g_assert (arm_is_imm8 (ins->inst_offset));
4309 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4311 case OP_LOADI2_MEMBASE:
4312 g_assert (arm_is_imm8 (ins->inst_offset));
4313 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4315 case OP_ICONV_TO_I1:
4316 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4317 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4319 case OP_ICONV_TO_I2:
4320 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4321 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4323 case OP_ICONV_TO_U1:
4324 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4326 case OP_ICONV_TO_U2:
4327 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4328 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4332 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4334 case OP_COMPARE_IMM:
4335 case OP_ICOMPARE_IMM:
4336 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4337 g_assert (imm8 >= 0);
4338 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4342 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4343 * So instead of emitting a trap, we emit a call a C function and place a
4346 //*(int*)code = 0xef9f0001;
4349 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4350 (gpointer)"mono_break");
4351 code = emit_call_seq (cfg, code);
4353 case OP_RELAXED_NOP:
4358 case OP_DUMMY_STORE:
4359 case OP_DUMMY_ICONST:
4360 case OP_DUMMY_R8CONST:
4361 case OP_NOT_REACHED:
4364 case OP_SEQ_POINT: {
4366 MonoInst *info_var = cfg->arch.seq_point_info_var;
4367 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4368 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4369 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4370 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4372 int dreg = ARMREG_LR;
4374 if (cfg->soft_breakpoints) {
4375 g_assert (!cfg->compile_aot);
4379 * For AOT, we use one got slot per method, which will point to a
4380 * SeqPointInfo structure, containing all the information required
4381 * by the code below.
4383 if (cfg->compile_aot) {
4384 g_assert (info_var);
4385 g_assert (info_var->opcode == OP_REGOFFSET);
4386 g_assert (arm_is_imm12 (info_var->inst_offset));
4389 if (!cfg->soft_breakpoints) {
4391 * Read from the single stepping trigger page. This will cause a
4392 * SIGSEGV when single stepping is enabled.
4393 * We do this _before_ the breakpoint, so single stepping after
4394 * a breakpoint is hit will step to the next IL offset.
4396 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4399 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4400 if (cfg->soft_breakpoints) {
4401 /* Load the address of the sequence point trigger variable. */
4404 g_assert (var->opcode == OP_REGOFFSET);
4405 g_assert (arm_is_imm12 (var->inst_offset));
4406 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4408 /* Read the value and check whether it is non-zero. */
4409 ARM_LDR_IMM (code, dreg, dreg, 0);
4410 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4412 /* Load the address of the sequence point method. */
4413 var = ss_method_var;
4415 g_assert (var->opcode == OP_REGOFFSET);
4416 g_assert (arm_is_imm12 (var->inst_offset));
4417 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4419 /* Call it conditionally. */
4420 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4422 if (cfg->compile_aot) {
4423 /* Load the trigger page addr from the variable initialized in the prolog */
4424 var = ss_trigger_page_var;
4426 g_assert (var->opcode == OP_REGOFFSET);
4427 g_assert (arm_is_imm12 (var->inst_offset));
4428 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4430 #ifdef USE_JUMP_TABLES
4431 gpointer *jte = mono_jumptable_add_entry ();
4432 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4433 jte [0] = ss_trigger_page;
4435 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4437 *(int*)code = (int)ss_trigger_page;
4441 ARM_LDR_IMM (code, dreg, dreg, 0);
4445 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4447 if (cfg->soft_breakpoints) {
4448 /* Load the address of the breakpoint method into ip. */
4449 var = bp_method_var;
4451 g_assert (var->opcode == OP_REGOFFSET);
4452 g_assert (arm_is_imm12 (var->inst_offset));
4453 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4456 * A placeholder for a possible breakpoint inserted by
4457 * mono_arch_set_breakpoint ().
4460 } else if (cfg->compile_aot) {
4461 guint32 offset = code - cfg->native_code;
4464 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4465 /* Add the offset */
4466 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4467 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4468 if (arm_is_imm12 ((int)val)) {
4469 ARM_LDR_IMM (code, dreg, dreg, val);
4471 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4473 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4475 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4476 g_assert (!(val & 0xFF000000));
4478 ARM_LDR_IMM (code, dreg, dreg, 0);
4480 /* What is faster, a branch or a load ? */
4481 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4482 /* The breakpoint instruction */
4483 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4486 * A placeholder for a possible breakpoint inserted by
4487 * mono_arch_set_breakpoint ().
4489 for (i = 0; i < 4; ++i)
4496 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4499 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4503 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4506 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4507 g_assert (imm8 >= 0);
4508 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4512 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4513 g_assert (imm8 >= 0);
4514 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4518 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4519 g_assert (imm8 >= 0);
4520 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4523 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4524 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4526 case OP_IADD_OVF_UN:
4527 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4528 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4531 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4532 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4534 case OP_ISUB_OVF_UN:
4535 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4536 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4538 case OP_ADD_OVF_CARRY:
4539 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4540 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4542 case OP_ADD_OVF_UN_CARRY:
4543 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4544 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4546 case OP_SUB_OVF_CARRY:
4547 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4548 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4550 case OP_SUB_OVF_UN_CARRY:
4551 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4552 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4556 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4559 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4560 g_assert (imm8 >= 0);
4561 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4564 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4568 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4572 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4573 g_assert (imm8 >= 0);
4574 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4578 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4579 g_assert (imm8 >= 0);
4580 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4582 case OP_ARM_RSBS_IMM:
4583 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4584 g_assert (imm8 >= 0);
4585 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4587 case OP_ARM_RSC_IMM:
4588 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4589 g_assert (imm8 >= 0);
4590 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4593 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4597 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4598 g_assert (imm8 >= 0);
4599 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4602 g_assert (v7s_supported);
4603 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4606 g_assert (v7s_supported);
4607 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4610 g_assert (v7s_supported);
4611 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4612 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4615 g_assert (v7s_supported);
4616 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4617 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4621 g_assert_not_reached ();
4623 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4627 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4628 g_assert (imm8 >= 0);
4629 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4632 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4636 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4637 g_assert (imm8 >= 0);
4638 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4641 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4646 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4647 else if (ins->dreg != ins->sreg1)
4648 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4651 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4656 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4657 else if (ins->dreg != ins->sreg1)
4658 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4661 case OP_ISHR_UN_IMM:
4663 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4664 else if (ins->dreg != ins->sreg1)
4665 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4668 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4671 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4674 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4677 if (ins->dreg == ins->sreg2)
4678 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4680 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4683 g_assert_not_reached ();
4686 /* FIXME: handle ovf/ sreg2 != dreg */
4687 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4688 /* FIXME: MUL doesn't set the C/O flags on ARM */
4690 case OP_IMUL_OVF_UN:
4691 /* FIXME: handle ovf/ sreg2 != dreg */
4692 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4693 /* FIXME: MUL doesn't set the C/O flags on ARM */
4696 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4699 /* Load the GOT offset */
4700 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4701 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4703 *(gpointer*)code = NULL;
4705 /* Load the value from the GOT */
4706 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4708 case OP_OBJC_GET_SELECTOR:
4709 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4710 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4712 *(gpointer*)code = NULL;
4714 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4716 case OP_ICONV_TO_I4:
4717 case OP_ICONV_TO_U4:
4719 if (ins->dreg != ins->sreg1)
4720 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4723 int saved = ins->sreg2;
4724 if (ins->sreg2 == ARM_LSW_REG) {
4725 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4728 if (ins->sreg1 != ARM_LSW_REG)
4729 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4730 if (saved != ARM_MSW_REG)
4731 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4736 ARM_CPYD (code, ins->dreg, ins->sreg1);
4738 case OP_FCONV_TO_R4:
4740 ARM_CVTD (code, ins->dreg, ins->sreg1);
4741 ARM_CVTS (code, ins->dreg, ins->dreg);
4746 * Keep in sync with mono_arch_emit_epilog
4748 g_assert (!cfg->method->save_lmf);
4750 code = emit_load_volatile_arguments (cfg, code);
4752 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4754 if (cfg->used_int_regs)
4755 ARM_POP (code, cfg->used_int_regs);
4756 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4758 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4760 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4761 if (cfg->compile_aot) {
4762 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4764 *(gpointer*)code = NULL;
4766 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4768 code = mono_arm_patchable_b (code, ARMCOND_AL);
4772 MonoCallInst *call = (MonoCallInst*)ins;
4775 * The stack looks like the following:
4776 * <caller argument area>
4779 * <callee argument area>
4780 * Need to copy the arguments from the callee argument area to
4781 * the caller argument area, and pop the frame.
4783 if (call->stack_usage) {
4784 int i, prev_sp_offset = 0;
4786 /* Compute size of saved registers restored below */
4788 prev_sp_offset = 2 * 4;
4790 prev_sp_offset = 1 * 4;
4791 for (i = 0; i < 16; ++i) {
4792 if (cfg->used_int_regs & (1 << i))
4793 prev_sp_offset += 4;
4796 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4798 /* Copy arguments on the stack to our argument area */
4799 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4800 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4801 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4806 * Keep in sync with mono_arch_emit_epilog
4808 g_assert (!cfg->method->save_lmf);
4810 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4812 if (cfg->used_int_regs)
4813 ARM_POP (code, cfg->used_int_regs);
4814 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4816 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4819 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4820 if (cfg->compile_aot) {
4821 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4823 *(gpointer*)code = NULL;
4825 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4827 code = mono_arm_patchable_b (code, ARMCOND_AL);
4832 /* ensure ins->sreg1 is not NULL */
4833 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4836 g_assert (cfg->sig_cookie < 128);
4837 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4838 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4847 call = (MonoCallInst*)ins;
4850 code = emit_float_args (cfg, call, code, &max_len, &offset);
4852 if (ins->flags & MONO_INST_HAS_METHOD)
4853 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4855 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4856 code = emit_call_seq (cfg, code);
4857 ins->flags |= MONO_INST_GC_CALLSITE;
4858 ins->backend.pc_offset = code - cfg->native_code;
4859 code = emit_move_return_value (cfg, ins, code);
4865 case OP_VOIDCALL_REG:
4868 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4870 code = emit_call_reg (code, ins->sreg1);
4871 ins->flags |= MONO_INST_GC_CALLSITE;
4872 ins->backend.pc_offset = code - cfg->native_code;
4873 code = emit_move_return_value (cfg, ins, code);
4875 case OP_FCALL_MEMBASE:
4876 case OP_LCALL_MEMBASE:
4877 case OP_VCALL_MEMBASE:
4878 case OP_VCALL2_MEMBASE:
4879 case OP_VOIDCALL_MEMBASE:
4880 case OP_CALL_MEMBASE: {
4881 gboolean imt_arg = FALSE;
4883 g_assert (ins->sreg1 != ARMREG_LR);
4884 call = (MonoCallInst*)ins;
4887 code = emit_float_args (cfg, call, code, &max_len, &offset);
4889 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4891 if (!arm_is_imm12 (ins->inst_offset))
4892 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4893 #ifdef USE_JUMP_TABLES
4899 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4901 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4903 if (!arm_is_imm12 (ins->inst_offset))
4904 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4906 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4909 * We can't embed the method in the code stream in PIC code, or
4911 * Instead, we put it in V5 in code emitted by
4912 * mono_arch_emit_imt_argument (), and embed NULL here to
4913 * signal the IMT thunk that the value is in V5.
4915 #ifdef USE_JUMP_TABLES
4916 /* In case of jumptables we always use value in V5. */
4919 if (call->dynamic_imt_arg)
4920 *((gpointer*)code) = NULL;
4922 *((gpointer*)code) = (gpointer)call->method;
4926 ins->flags |= MONO_INST_GC_CALLSITE;
4927 ins->backend.pc_offset = code - cfg->native_code;
4928 code = emit_move_return_value (cfg, ins, code);
4932 /* keep alignment */
4933 int alloca_waste = cfg->param_area;
4936 /* round the size to 8 bytes */
4937 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4938 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4940 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4941 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4942 /* memzero the area: dreg holds the size, sp is the pointer */
4943 if (ins->flags & MONO_INST_INIT) {
4944 guint8 *start_loop, *branch_to_cond;
4945 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4946 branch_to_cond = code;
4949 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4950 arm_patch (branch_to_cond, code);
4951 /* decrement by 4 and set flags */
4952 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4953 ARM_B_COND (code, ARMCOND_GE, 0);
4954 arm_patch (code - 4, start_loop);
4956 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4961 MonoInst *var = cfg->dyn_call_var;
4963 g_assert (var->opcode == OP_REGOFFSET);
4964 g_assert (arm_is_imm12 (var->inst_offset));
4966 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4967 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4969 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4971 /* Save args buffer */
4972 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4974 /* Set stack slots using R0 as scratch reg */
4975 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4976 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4977 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4978 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4981 /* Set argument registers */
4982 for (i = 0; i < PARAM_REGS; ++i)
4983 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4986 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4987 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4990 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4991 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
4992 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
4996 if (ins->sreg1 != ARMREG_R0)
4997 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4998 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4999 (gpointer)"mono_arch_throw_exception");
5000 code = emit_call_seq (cfg, code);
5004 if (ins->sreg1 != ARMREG_R0)
5005 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5006 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5007 (gpointer)"mono_arch_rethrow_exception");
5008 code = emit_call_seq (cfg, code);
5011 case OP_START_HANDLER: {
5012 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5015 /* Reserve a param area, see filter-stack.exe */
5016 if (cfg->param_area) {
5017 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5018 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5020 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5021 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5025 if (arm_is_imm12 (spvar->inst_offset)) {
5026 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5028 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5029 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5033 case OP_ENDFILTER: {
5034 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5037 /* Free the param area */
5038 if (cfg->param_area) {
5039 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5040 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5042 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5043 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5047 if (ins->sreg1 != ARMREG_R0)
5048 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5049 if (arm_is_imm12 (spvar->inst_offset)) {
5050 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5052 g_assert (ARMREG_IP != spvar->inst_basereg);
5053 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5054 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5056 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5059 case OP_ENDFINALLY: {
5060 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5063 /* Free the param area */
5064 if (cfg->param_area) {
5065 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5066 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5068 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5069 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5073 if (arm_is_imm12 (spvar->inst_offset)) {
5074 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5076 g_assert (ARMREG_IP != spvar->inst_basereg);
5077 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5078 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5080 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5083 case OP_CALL_HANDLER:
5084 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5085 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5086 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5089 ins->inst_c0 = code - cfg->native_code;
5092 /*if (ins->inst_target_bb->native_offset) {
5094 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5096 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5097 code = mono_arm_patchable_b (code, ARMCOND_AL);
5101 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5105 * In the normal case we have:
5106 * ldr pc, [pc, ins->sreg1 << 2]
5109 * ldr lr, [pc, ins->sreg1 << 2]
5111 * After follows the data.
5112 * FIXME: add aot support.
5114 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5115 #ifdef USE_JUMP_TABLES
5117 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5118 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5119 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5123 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5124 if (offset + max_len > (cfg->code_size - 16)) {
5125 cfg->code_size += max_len;
5126 cfg->code_size *= 2;
5127 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5128 code = cfg->native_code + offset;
5130 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5132 code += 4 * GPOINTER_TO_INT (ins->klass);
5137 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5138 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5142 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5143 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5147 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5148 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5152 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5153 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5157 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5158 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5161 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5162 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5165 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5166 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5169 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5170 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5174 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5175 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5177 case OP_COND_EXC_EQ:
5178 case OP_COND_EXC_NE_UN:
5179 case OP_COND_EXC_LT:
5180 case OP_COND_EXC_LT_UN:
5181 case OP_COND_EXC_GT:
5182 case OP_COND_EXC_GT_UN:
5183 case OP_COND_EXC_GE:
5184 case OP_COND_EXC_GE_UN:
5185 case OP_COND_EXC_LE:
5186 case OP_COND_EXC_LE_UN:
5187 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5189 case OP_COND_EXC_IEQ:
5190 case OP_COND_EXC_INE_UN:
5191 case OP_COND_EXC_ILT:
5192 case OP_COND_EXC_ILT_UN:
5193 case OP_COND_EXC_IGT:
5194 case OP_COND_EXC_IGT_UN:
5195 case OP_COND_EXC_IGE:
5196 case OP_COND_EXC_IGE_UN:
5197 case OP_COND_EXC_ILE:
5198 case OP_COND_EXC_ILE_UN:
5199 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5202 case OP_COND_EXC_IC:
5203 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5205 case OP_COND_EXC_OV:
5206 case OP_COND_EXC_IOV:
5207 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5209 case OP_COND_EXC_NC:
5210 case OP_COND_EXC_INC:
5211 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5213 case OP_COND_EXC_NO:
5214 case OP_COND_EXC_INO:
5215 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5227 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5230 /* floating point opcodes */
5232 if (cfg->compile_aot) {
5233 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5235 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5237 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5240 /* FIXME: we can optimize the imm load by dealing with part of
5241 * the displacement in LDFD (aligning to 512).
5243 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5244 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5248 if (cfg->compile_aot) {
5249 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5251 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5253 ARM_CVTS (code, ins->dreg, ins->dreg);
5255 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5256 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5257 ARM_CVTS (code, ins->dreg, ins->dreg);
5260 case OP_STORER8_MEMBASE_REG:
5261 /* This is generated by the local regalloc pass which runs after the lowering pass */
5262 if (!arm_is_fpimm8 (ins->inst_offset)) {
5263 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5264 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5265 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5267 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5270 case OP_LOADR8_MEMBASE:
5271 /* This is generated by the local regalloc pass which runs after the lowering pass */
5272 if (!arm_is_fpimm8 (ins->inst_offset)) {
5273 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5274 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5275 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5277 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5280 case OP_STORER4_MEMBASE_REG:
5281 g_assert (arm_is_fpimm8 (ins->inst_offset));
5282 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5283 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5284 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5285 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5287 case OP_LOADR4_MEMBASE:
5288 g_assert (arm_is_fpimm8 (ins->inst_offset));
5289 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5290 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5291 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5292 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5294 case OP_ICONV_TO_R_UN: {
5295 g_assert_not_reached ();
5298 case OP_ICONV_TO_R4:
5299 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5300 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5301 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5302 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5303 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5305 case OP_ICONV_TO_R8:
5306 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5307 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5308 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5309 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5313 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5314 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5316 if (!IS_HARD_FLOAT) {
5317 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5320 if (IS_HARD_FLOAT) {
5321 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5323 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5327 case OP_FCONV_TO_I1:
5328 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5330 case OP_FCONV_TO_U1:
5331 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5333 case OP_FCONV_TO_I2:
5334 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5336 case OP_FCONV_TO_U2:
5337 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5339 case OP_FCONV_TO_I4:
5341 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5343 case OP_FCONV_TO_U4:
5345 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5347 case OP_FCONV_TO_I8:
5348 case OP_FCONV_TO_U8:
5349 g_assert_not_reached ();
5350 /* Implemented as helper calls */
5352 case OP_LCONV_TO_R_UN:
5353 g_assert_not_reached ();
5354 /* Implemented as helper calls */
5356 case OP_LCONV_TO_OVF_I4_2: {
5357 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5359 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5362 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5363 high_bit_not_set = code;
5364 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5366 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5367 valid_negative = code;
5368 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5369 invalid_negative = code;
5370 ARM_B_COND (code, ARMCOND_AL, 0);
5372 arm_patch (high_bit_not_set, code);
5374 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5375 valid_positive = code;
5376 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5378 arm_patch (invalid_negative, code);
5379 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5381 arm_patch (valid_negative, code);
5382 arm_patch (valid_positive, code);
5384 if (ins->dreg != ins->sreg1)
5385 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5389 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5392 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5395 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5398 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5401 ARM_NEGD (code, ins->dreg, ins->sreg1);
5405 g_assert_not_reached ();
5409 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5415 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5418 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5419 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5423 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5426 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5427 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5431 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5434 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5435 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5436 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5440 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5443 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5444 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5448 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5451 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5452 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5453 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5457 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5460 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5461 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5465 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5468 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5469 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5473 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5476 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5477 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5480 /* ARM FPA flags table:
5481 * N Less than ARMCOND_MI
5482 * Z Equal ARMCOND_EQ
5483 * C Greater Than or Equal ARMCOND_CS
5484 * V Unordered ARMCOND_VS
5487 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5490 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5493 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5496 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5497 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5503 g_assert_not_reached ();
5507 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5509 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5510 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5511 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5515 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5516 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5521 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5522 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5524 #ifdef USE_JUMP_TABLES
5526 gpointer *jte = mono_jumptable_add_entries (2);
5527 jte [0] = GUINT_TO_POINTER (0xffffffff);
5528 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5529 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5530 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5533 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5534 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5536 *(guint32*)code = 0xffffffff;
5538 *(guint32*)code = 0x7fefffff;
5541 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5543 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5544 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5546 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5547 ARM_CPYD (code, ins->dreg, ins->sreg1);
5549 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5550 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5555 case OP_GC_LIVENESS_DEF:
5556 case OP_GC_LIVENESS_USE:
5557 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5558 ins->backend.pc_offset = code - cfg->native_code;
5560 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5561 ins->backend.pc_offset = code - cfg->native_code;
5562 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5566 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5567 g_assert_not_reached ();
5570 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5571 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5572 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5573 g_assert_not_reached ();
5579 last_offset = offset;
5582 cfg->code_len = code - cfg->native_code;
5585 #endif /* DISABLE_JIT */
5587 #ifdef HAVE_AEABI_READ_TP
5588 void __aeabi_read_tp (void);
5592 mono_arch_register_lowlevel_calls (void)
5594 /* The signature doesn't matter */
5595 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5596 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5598 #ifndef MONO_CROSS_COMPILE
5599 #ifdef HAVE_AEABI_READ_TP
5600 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5605 #define patch_lis_ori(ip,val) do {\
5606 guint16 *__lis_ori = (guint16*)(ip); \
5607 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5608 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5612 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5614 MonoJumpInfo *patch_info;
5615 gboolean compile_aot = !run_cctors;
5617 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5618 unsigned char *ip = patch_info->ip.i + code;
5619 const unsigned char *target;
5621 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5622 #ifdef USE_JUMP_TABLES
5623 gpointer *jt = mono_jumptable_get_entry (ip);
5625 gpointer *jt = (gpointer*)(ip + 8);
5628 /* jt is the inlined jump table, 2 instructions after ip
5629 * In the normal case we store the absolute addresses,
5630 * otherwise the displacements.
5632 for (i = 0; i < patch_info->data.table->table_size; i++)
5633 jt [i] = code + (int)patch_info->data.table->table [i];
5638 switch (patch_info->type) {
5639 case MONO_PATCH_INFO_BB:
5640 case MONO_PATCH_INFO_LABEL:
5643 /* No need to patch these */
5648 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5650 switch (patch_info->type) {
5651 case MONO_PATCH_INFO_IP:
5652 g_assert_not_reached ();
5653 patch_lis_ori (ip, ip);
5655 case MONO_PATCH_INFO_METHOD_REL:
5656 g_assert_not_reached ();
5657 *((gpointer *)(ip)) = code + patch_info->data.offset;
5659 case MONO_PATCH_INFO_METHODCONST:
5660 case MONO_PATCH_INFO_CLASS:
5661 case MONO_PATCH_INFO_IMAGE:
5662 case MONO_PATCH_INFO_FIELD:
5663 case MONO_PATCH_INFO_VTABLE:
5664 case MONO_PATCH_INFO_IID:
5665 case MONO_PATCH_INFO_SFLDA:
5666 case MONO_PATCH_INFO_LDSTR:
5667 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5668 case MONO_PATCH_INFO_LDTOKEN:
5669 g_assert_not_reached ();
5670 /* from OP_AOTCONST : lis + ori */
5671 patch_lis_ori (ip, target);
5673 case MONO_PATCH_INFO_R4:
5674 case MONO_PATCH_INFO_R8:
5675 g_assert_not_reached ();
5676 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5678 case MONO_PATCH_INFO_EXC_NAME:
5679 g_assert_not_reached ();
5680 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5682 case MONO_PATCH_INFO_NONE:
5683 case MONO_PATCH_INFO_BB_OVF:
5684 case MONO_PATCH_INFO_EXC_OVF:
5685 /* everything is dealt with at epilog output time */
5690 arm_patch_general (domain, ip, target, dyn_code_mp);
5697 * Stack frame layout:
5699 * ------------------- fp
5700 * MonoLMF structure or saved registers
5701 * -------------------
5703 * -------------------
5705 * -------------------
5706 * optional 8 bytes for tracing
5707 * -------------------
5708 * param area size is cfg->param_area
5709 * ------------------- sp
5712 mono_arch_emit_prolog (MonoCompile *cfg)
5714 MonoMethod *method = cfg->method;
5716 MonoMethodSignature *sig;
5718 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5723 int prev_sp_offset, reg_offset;
5725 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5728 sig = mono_method_signature (method);
5729 cfg->code_size = 256 + sig->param_count * 64;
5730 code = cfg->native_code = g_malloc (cfg->code_size);
5732 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5734 alloc_size = cfg->stack_offset;
5740 * The iphone uses R7 as the frame pointer, and it points at the saved
5745 * We can't use r7 as a frame pointer since it points into the middle of
5746 * the frame, so we keep using our own frame pointer.
5747 * FIXME: Optimize this.
5749 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5750 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5751 prev_sp_offset += 8; /* r7 and lr */
5752 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5753 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5756 if (!method->save_lmf) {
5758 /* No need to push LR again */
5759 if (cfg->used_int_regs)
5760 ARM_PUSH (code, cfg->used_int_regs);
5762 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5763 prev_sp_offset += 4;
5765 for (i = 0; i < 16; ++i) {
5766 if (cfg->used_int_regs & (1 << i))
5767 prev_sp_offset += 4;
5769 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5771 for (i = 0; i < 16; ++i) {
5772 if ((cfg->used_int_regs & (1 << i))) {
5773 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5774 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5779 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5780 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5782 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5783 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5786 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5787 ARM_PUSH (code, 0x5ff0);
5788 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5789 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5791 for (i = 0; i < 16; ++i) {
5792 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5793 /* The original r7 is saved at the start */
5794 if (!(iphone_abi && i == ARMREG_R7))
5795 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5799 g_assert (reg_offset == 4 * 10);
5800 pos += sizeof (MonoLMF) - (4 * 10);
5804 orig_alloc_size = alloc_size;
5805 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5806 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5807 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5808 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5811 /* the stack used in the pushed regs */
5812 if (prev_sp_offset & 4)
5814 cfg->stack_usage = alloc_size;
5816 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5817 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5819 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5820 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5822 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5824 if (cfg->frame_reg != ARMREG_SP) {
5825 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5826 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5828 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5829 prev_sp_offset += alloc_size;
5831 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5832 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5834 /* compute max_offset in order to use short forward jumps
5835 * we could skip do it on arm because the immediate displacement
5836 * for jumps is large enough, it may be useful later for constant pools
5839 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5840 MonoInst *ins = bb->code;
5841 bb->max_offset = max_offset;
5843 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5846 MONO_BB_FOR_EACH_INS (bb, ins)
5847 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5850 /* store runtime generic context */
5851 if (cfg->rgctx_var) {
5852 MonoInst *ins = cfg->rgctx_var;
5854 g_assert (ins->opcode == OP_REGOFFSET);
5856 if (arm_is_imm12 (ins->inst_offset)) {
5857 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5859 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5860 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5864 /* load arguments allocated to register from the stack */
5867 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5869 if (cinfo->vtype_retaddr) {
5870 ArgInfo *ainfo = &cinfo->ret;
5871 inst = cfg->vret_addr;
5872 g_assert (arm_is_imm12 (inst->inst_offset));
5873 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5876 if (sig->call_convention == MONO_CALL_VARARG) {
5877 ArgInfo *cookie = &cinfo->sig_cookie;
5879 /* Save the sig cookie address */
5880 g_assert (cookie->storage == RegTypeBase);
5882 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5883 g_assert (arm_is_imm12 (cfg->sig_cookie));
5884 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5885 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5888 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5889 ArgInfo *ainfo = cinfo->args + i;
5890 inst = cfg->args [pos];
5892 if (cfg->verbose_level > 2)
5893 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5894 if (inst->opcode == OP_REGVAR) {
5895 if (ainfo->storage == RegTypeGeneral)
5896 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5897 else if (ainfo->storage == RegTypeFP) {
5898 g_assert_not_reached ();
5899 } else if (ainfo->storage == RegTypeBase) {
5900 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5901 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5903 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5904 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5907 g_assert_not_reached ();
5909 if (cfg->verbose_level > 2)
5910 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5912 /* the argument should be put on the stack: FIXME handle size != word */
5913 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5914 switch (ainfo->size) {
5916 if (arm_is_imm12 (inst->inst_offset))
5917 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5919 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5920 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5924 if (arm_is_imm8 (inst->inst_offset)) {
5925 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5927 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5928 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5932 if (arm_is_imm12 (inst->inst_offset)) {
5933 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5935 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5936 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5938 if (arm_is_imm12 (inst->inst_offset + 4)) {
5939 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5941 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5942 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5946 if (arm_is_imm12 (inst->inst_offset)) {
5947 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5949 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5950 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5954 } else if (ainfo->storage == RegTypeBaseGen) {
5955 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5956 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5958 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5959 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5961 if (arm_is_imm12 (inst->inst_offset + 4)) {
5962 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5963 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5965 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5966 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5967 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5968 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5970 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5971 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5972 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5974 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5975 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5978 switch (ainfo->size) {
5980 if (arm_is_imm8 (inst->inst_offset)) {
5981 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5984 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5988 if (arm_is_imm8 (inst->inst_offset)) {
5989 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5991 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5992 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5996 if (arm_is_imm12 (inst->inst_offset)) {
5997 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5999 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6000 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6002 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6003 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6005 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6006 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6008 if (arm_is_imm12 (inst->inst_offset + 4)) {
6009 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6011 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6012 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6016 if (arm_is_imm12 (inst->inst_offset)) {
6017 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6019 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6020 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6024 } else if (ainfo->storage == RegTypeFP) {
6025 int imm8, rot_amount;
6027 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6028 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6029 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6031 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6033 if (ainfo->size == 8)
6034 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6036 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6037 } else if (ainfo->storage == RegTypeStructByVal) {
6038 int doffset = inst->inst_offset;
6042 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6043 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6044 if (arm_is_imm12 (doffset)) {
6045 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6047 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6048 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6050 soffset += sizeof (gpointer);
6051 doffset += sizeof (gpointer);
6053 if (ainfo->vtsize) {
6054 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6055 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6056 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6058 } else if (ainfo->storage == RegTypeStructByAddr) {
6059 g_assert_not_reached ();
6060 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6061 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6063 g_assert_not_reached ();
6068 if (method->save_lmf)
6069 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6072 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6074 if (cfg->arch.seq_point_info_var) {
6075 MonoInst *ins = cfg->arch.seq_point_info_var;
6077 /* Initialize the variable from a GOT slot */
6078 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6079 #ifdef USE_JUMP_TABLES
6081 gpointer *jte = mono_jumptable_add_entry ();
6082 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6083 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6085 /** XXX: is it correct? */
6087 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6089 *(gpointer*)code = NULL;
6092 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6094 g_assert (ins->opcode == OP_REGOFFSET);
6096 if (arm_is_imm12 (ins->inst_offset)) {
6097 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6099 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6100 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6104 /* Initialize ss_trigger_page_var */
6105 if (!cfg->soft_breakpoints) {
6106 MonoInst *info_var = cfg->arch.seq_point_info_var;
6107 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6108 int dreg = ARMREG_LR;
6111 g_assert (info_var->opcode == OP_REGOFFSET);
6112 g_assert (arm_is_imm12 (info_var->inst_offset));
6114 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6115 /* Load the trigger page addr */
6116 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6117 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6121 if (cfg->arch.seq_point_read_var) {
6122 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6123 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6124 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6125 #ifdef USE_JUMP_TABLES
6128 g_assert (read_ins->opcode == OP_REGOFFSET);
6129 g_assert (arm_is_imm12 (read_ins->inst_offset));
6130 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6131 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6132 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6133 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6135 #ifdef USE_JUMP_TABLES
6136 jte = mono_jumptable_add_entries (3);
6137 jte [0] = (gpointer)&ss_trigger_var;
6138 jte [1] = single_step_func_wrapper;
6139 jte [2] = breakpoint_func_wrapper;
6140 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6142 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6144 *(volatile int **)code = &ss_trigger_var;
6146 *(gpointer*)code = single_step_func_wrapper;
6148 *(gpointer*)code = breakpoint_func_wrapper;
6152 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6153 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6154 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6155 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6156 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6157 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6160 cfg->code_len = code - cfg->native_code;
6161 g_assert (cfg->code_len < cfg->code_size);
6168 mono_arch_emit_epilog (MonoCompile *cfg)
6170 MonoMethod *method = cfg->method;
6171 int pos, i, rot_amount;
6172 int max_epilog_size = 16 + 20*4;
6176 if (cfg->method->save_lmf)
6177 max_epilog_size += 128;
6179 if (mono_jit_trace_calls != NULL)
6180 max_epilog_size += 50;
6182 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6183 max_epilog_size += 50;
6185 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6186 cfg->code_size *= 2;
6187 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6188 cfg->stat_code_reallocs++;
6192 * Keep in sync with OP_JMP
6194 code = cfg->native_code + cfg->code_len;
6196 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6197 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6201 /* Load returned vtypes into registers if needed */
6202 cinfo = cfg->arch.cinfo;
6203 if (cinfo->ret.storage == RegTypeStructByVal) {
6204 MonoInst *ins = cfg->ret;
6206 if (arm_is_imm12 (ins->inst_offset)) {
6207 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6209 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6210 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6214 if (method->save_lmf) {
6215 int lmf_offset, reg, sp_adj, regmask;
6216 /* all but r0-r3, sp and pc */
6217 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6220 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6222 /* This points to r4 inside MonoLMF->iregs */
6223 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6225 regmask = 0x9ff0; /* restore lr to pc */
6226 /* Skip caller saved registers not used by the method */
6227 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6228 regmask &= ~(1 << reg);
6233 /* Restored later */
6234 regmask &= ~(1 << ARMREG_PC);
6235 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6236 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6238 ARM_POP (code, regmask);
6240 /* Restore saved r7, restore LR to PC */
6241 /* Skip lr from the lmf */
6242 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6243 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6246 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6247 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6249 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6250 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6254 /* Restore saved gregs */
6255 if (cfg->used_int_regs)
6256 ARM_POP (code, cfg->used_int_regs);
6257 /* Restore saved r7, restore LR to PC */
6258 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6260 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6264 cfg->code_len = code - cfg->native_code;
6266 g_assert (cfg->code_len < cfg->code_size);
6271 mono_arch_emit_exceptions (MonoCompile *cfg)
6273 MonoJumpInfo *patch_info;
6276 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6277 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6278 int max_epilog_size = 50;
6280 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6281 exc_throw_pos [i] = NULL;
6282 exc_throw_found [i] = 0;
6285 /* count the number of exception infos */
6288 * make sure we have enough space for exceptions
6290 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6291 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6292 i = mini_exception_id_by_name (patch_info->data.target);
6293 if (!exc_throw_found [i]) {
6294 max_epilog_size += 32;
6295 exc_throw_found [i] = TRUE;
6300 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6301 cfg->code_size *= 2;
6302 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6303 cfg->stat_code_reallocs++;
6306 code = cfg->native_code + cfg->code_len;
6308 /* add code to raise exceptions */
6309 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6310 switch (patch_info->type) {
6311 case MONO_PATCH_INFO_EXC: {
6312 MonoClass *exc_class;
6313 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6315 i = mini_exception_id_by_name (patch_info->data.target);
6316 if (exc_throw_pos [i]) {
6317 arm_patch (ip, exc_throw_pos [i]);
6318 patch_info->type = MONO_PATCH_INFO_NONE;
6321 exc_throw_pos [i] = code;
6323 arm_patch (ip, code);
6325 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6326 g_assert (exc_class);
6328 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6329 #ifdef USE_JUMP_TABLES
6331 gpointer *jte = mono_jumptable_add_entries (2);
6332 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6333 patch_info->data.name = "mono_arch_throw_corlib_exception";
6334 patch_info->ip.i = code - cfg->native_code;
6335 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6336 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6337 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6338 ARM_BLX_REG (code, ARMREG_IP);
6339 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6342 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6343 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6344 patch_info->data.name = "mono_arch_throw_corlib_exception";
6345 patch_info->ip.i = code - cfg->native_code;
6347 *(guint32*)(gpointer)code = exc_class->type_token;
6358 cfg->code_len = code - cfg->native_code;
6360 g_assert (cfg->code_len < cfg->code_size);
6364 #endif /* #ifndef DISABLE_JIT */
6367 mono_arch_finish_init (void)
6372 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6377 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6384 mono_arch_print_tree (MonoInst *tree, int arity)
6394 mono_arch_get_patch_offset (guint8 *code)
6401 mono_arch_flush_register_windows (void)
6405 #ifdef MONO_ARCH_HAVE_IMT
6410 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6412 int method_reg = mono_alloc_ireg (cfg);
6413 #ifdef USE_JUMP_TABLES
6414 int use_jumptables = TRUE;
6416 int use_jumptables = FALSE;
6419 if (cfg->compile_aot) {
6422 call->dynamic_imt_arg = TRUE;
6425 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6427 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6428 ins->dreg = method_reg;
6429 ins->inst_p0 = call->method;
6430 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6431 MONO_ADD_INS (cfg->cbb, ins);
6433 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6434 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6435 /* Always pass in a register for simplicity */
6436 call->dynamic_imt_arg = TRUE;
6438 cfg->uses_rgctx_reg = TRUE;
6441 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6445 MONO_INST_NEW (cfg, ins, OP_PCONST);
6446 ins->inst_p0 = call->method;
6447 ins->dreg = method_reg;
6448 MONO_ADD_INS (cfg->cbb, ins);
6451 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6455 #endif /* DISABLE_JIT */
6458 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6460 #ifdef USE_JUMP_TABLES
6461 return (MonoMethod*)regs [ARMREG_V5];
6464 guint32 *code_ptr = (guint32*)code;
6466 method = GUINT_TO_POINTER (code_ptr [1]);
6470 return (MonoMethod*)regs [ARMREG_V5];
6472 /* The IMT value is stored in the code stream right after the LDC instruction. */
6473 /* This is no longer true for the gsharedvt_in trampoline */
6475 if (!IS_LDR_PC (code_ptr [0])) {
6476 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6477 g_assert (IS_LDR_PC (code_ptr [0]));
6481 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6482 return (MonoMethod*)regs [ARMREG_V5];
6484 return (MonoMethod*) method;
6489 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6491 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6494 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6495 #define BASE_SIZE (6 * 4)
6496 #define BSEARCH_ENTRY_SIZE (4 * 4)
6497 #define CMP_SIZE (3 * 4)
6498 #define BRANCH_SIZE (1 * 4)
6499 #define CALL_SIZE (2 * 4)
6500 #define WMC_SIZE (8 * 4)
6501 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6503 #ifdef USE_JUMP_TABLES
6505 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6507 g_assert (base [index] == NULL);
6508 base [index] = value;
6511 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6513 if (arm_is_imm12 (jti * 4)) {
6514 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6516 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6517 if ((jti * 4) >> 16)
6518 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6519 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6525 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6527 guint32 delta = DISTANCE (target, code);
6529 g_assert (delta >= 0 && delta <= 0xFFF);
6530 *target = *target | delta;
6536 #ifdef ENABLE_WRONG_METHOD_CHECK
6538 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6540 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6546 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6547 gpointer fail_tramp)
6550 arminstr_t *code, *start;
6551 #ifdef USE_JUMP_TABLES
6554 gboolean large_offsets = FALSE;
6555 guint32 **constant_pool_starts;
6556 arminstr_t *vtable_target = NULL;
6557 int extra_space = 0;
6559 #ifdef ENABLE_WRONG_METHOD_CHECK
6564 #ifdef USE_JUMP_TABLES
6565 for (i = 0; i < count; ++i) {
6566 MonoIMTCheckItem *item = imt_entries [i];
6567 item->chunk_size += 4 * 16;
6568 if (!item->is_equals)
6569 imt_entries [item->check_target_idx]->compare_done = TRUE;
6570 size += item->chunk_size;
6573 constant_pool_starts = g_new0 (guint32*, count);
6575 for (i = 0; i < count; ++i) {
6576 MonoIMTCheckItem *item = imt_entries [i];
6577 if (item->is_equals) {
6578 gboolean fail_case = !item->check_target_idx && fail_tramp;
6580 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6581 item->chunk_size += 32;
6582 large_offsets = TRUE;
6585 if (item->check_target_idx || fail_case) {
6586 if (!item->compare_done || fail_case)
6587 item->chunk_size += CMP_SIZE;
6588 item->chunk_size += BRANCH_SIZE;
6590 #ifdef ENABLE_WRONG_METHOD_CHECK
6591 item->chunk_size += WMC_SIZE;
6595 item->chunk_size += 16;
6596 large_offsets = TRUE;
6598 item->chunk_size += CALL_SIZE;
6600 item->chunk_size += BSEARCH_ENTRY_SIZE;
6601 imt_entries [item->check_target_idx]->compare_done = TRUE;
6603 size += item->chunk_size;
6607 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6611 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6613 code = mono_domain_code_reserve (domain, size);
6617 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6618 for (i = 0; i < count; ++i) {
6619 MonoIMTCheckItem *item = imt_entries [i];
6620 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6624 #ifdef USE_JUMP_TABLES
6625 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6626 /* If jumptables we always pass the IMT method in R5 */
6627 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6628 #define VTABLE_JTI 0
6629 #define IMT_METHOD_OFFSET 0
6630 #define TARGET_CODE_OFFSET 1
6631 #define JUMP_CODE_OFFSET 2
6632 #define RECORDS_PER_ENTRY 3
6633 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6634 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6635 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6637 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6638 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6639 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6640 set_jumptable_element (jte, VTABLE_JTI, vtable);
6643 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6645 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6646 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6647 vtable_target = code;
6648 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6650 if (mono_use_llvm) {
6651 /* LLVM always passes the IMT method in R5 */
6652 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6654 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6655 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6656 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6660 for (i = 0; i < count; ++i) {
6661 MonoIMTCheckItem *item = imt_entries [i];
6662 #ifdef USE_JUMP_TABLES
6663 guint32 imt_method_jti = 0, target_code_jti = 0;
6665 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6667 gint32 vtable_offset;
6669 item->code_target = (guint8*)code;
6671 if (item->is_equals) {
6672 gboolean fail_case = !item->check_target_idx && fail_tramp;
6674 if (item->check_target_idx || fail_case) {
6675 if (!item->compare_done || fail_case) {
6676 #ifdef USE_JUMP_TABLES
6677 imt_method_jti = IMT_METHOD_JTI (i);
6678 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6681 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6683 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6685 #ifdef USE_JUMP_TABLES
6686 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6687 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6688 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6690 item->jmp_code = (guint8*)code;
6691 ARM_B_COND (code, ARMCOND_NE, 0);
6694 /*Enable the commented code to assert on wrong method*/
6695 #ifdef ENABLE_WRONG_METHOD_CHECK
6696 #ifdef USE_JUMP_TABLES
6697 imt_method_jti = IMT_METHOD_JTI (i);
6698 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6701 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6703 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6705 ARM_B_COND (code, ARMCOND_EQ, 0);
6707 /* Define this if your system is so bad that gdb is failing. */
6708 #ifdef BROKEN_DEV_ENV
6709 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6711 arm_patch (code - 1, mini_dump_bad_imt);
6715 arm_patch (cond, code);
6719 if (item->has_target_code) {
6720 /* Load target address */
6721 #ifdef USE_JUMP_TABLES
6722 target_code_jti = TARGET_CODE_JTI (i);
6723 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6724 /* Restore registers */
6725 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6727 ARM_BX (code, ARMREG_R1);
6728 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6730 target_code_ins = code;
6731 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6732 /* Save it to the fourth slot */
6733 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6734 /* Restore registers and branch */
6735 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6737 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6740 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6741 if (!arm_is_imm12 (vtable_offset)) {
6743 * We need to branch to a computed address but we don't have
6744 * a free register to store it, since IP must contain the
6745 * vtable address. So we push the two values to the stack, and
6746 * load them both using LDM.
6748 /* Compute target address */
6749 #ifdef USE_JUMP_TABLES
6750 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6751 if (vtable_offset >> 16)
6752 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6753 /* IP had vtable base. */
6754 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6755 /* Restore registers and branch */
6756 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6757 ARM_BX (code, ARMREG_IP);
6759 vtable_offset_ins = code;
6760 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6761 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6762 /* Save it to the fourth slot */
6763 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6764 /* Restore registers and branch */
6765 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6767 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6770 #ifdef USE_JUMP_TABLES
6771 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6772 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6773 ARM_BX (code, ARMREG_IP);
6775 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6777 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6778 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6784 #ifdef USE_JUMP_TABLES
6785 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6786 target_code_jti = TARGET_CODE_JTI (i);
6787 /* Load target address */
6788 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6789 /* Restore registers */
6790 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6792 ARM_BX (code, ARMREG_R1);
6793 set_jumptable_element (jte, target_code_jti, fail_tramp);
6795 arm_patch (item->jmp_code, (guchar*)code);
6797 target_code_ins = code;
6798 /* Load target address */
6799 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6800 /* Save it to the fourth slot */
6801 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6802 /* Restore registers and branch */
6803 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6805 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6807 item->jmp_code = NULL;
6810 #ifdef USE_JUMP_TABLES
6812 set_jumptable_element (jte, imt_method_jti, item->key);
6815 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6817 /*must emit after unconditional branch*/
6818 if (vtable_target) {
6819 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6820 item->chunk_size += 4;
6821 vtable_target = NULL;
6824 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6825 constant_pool_starts [i] = code;
6827 code += extra_space;
6832 #ifdef USE_JUMP_TABLES
6833 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6834 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6835 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6836 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6837 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6839 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6840 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6842 item->jmp_code = (guint8*)code;
6843 ARM_B_COND (code, ARMCOND_HS, 0);
6849 for (i = 0; i < count; ++i) {
6850 MonoIMTCheckItem *item = imt_entries [i];
6851 if (item->jmp_code) {
6852 if (item->check_target_idx)
6853 #ifdef USE_JUMP_TABLES
6854 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6856 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6859 if (i > 0 && item->is_equals) {
6861 #ifdef USE_JUMP_TABLES
6862 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6863 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6865 arminstr_t *space_start = constant_pool_starts [i];
6866 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6867 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6875 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6876 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6881 #ifndef USE_JUMP_TABLES
6882 g_free (constant_pool_starts);
6885 mono_arch_flush_icache ((guint8*)start, size);
6886 mono_stats.imt_thunks_size += code - start;
6888 g_assert (DISTANCE (start, code) <= size);
6895 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6897 return ctx->regs [reg];
6901 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6903 ctx->regs [reg] = val;
6907 * mono_arch_get_trampolines:
6909 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6913 mono_arch_get_trampolines (gboolean aot)
6915 return mono_arm_get_exception_trampolines (aot);
6919 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6921 * mono_arch_set_breakpoint:
6923 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6924 * The location should contain code emitted by OP_SEQ_POINT.
6927 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6930 guint32 native_offset = ip - (guint8*)ji->code_start;
6931 MonoDebugOptions *opt = mini_get_debug_options ();
6933 if (opt->soft_breakpoints) {
6934 g_assert (!ji->from_aot);
6936 ARM_BLX_REG (code, ARMREG_LR);
6937 mono_arch_flush_icache (code - 4, 4);
6938 } else if (ji->from_aot) {
6939 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6941 g_assert (native_offset % 4 == 0);
6942 g_assert (info->bp_addrs [native_offset / 4] == 0);
6943 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6945 int dreg = ARMREG_LR;
6947 /* Read from another trigger page */
6948 #ifdef USE_JUMP_TABLES
6949 gpointer *jte = mono_jumptable_add_entry ();
6950 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6951 jte [0] = bp_trigger_page;
6953 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6955 *(int*)code = (int)bp_trigger_page;
6958 ARM_LDR_IMM (code, dreg, dreg, 0);
6960 mono_arch_flush_icache (code - 16, 16);
6963 /* This is currently implemented by emitting an SWI instruction, which
6964 * qemu/linux seems to convert to a SIGILL.
6966 *(int*)code = (0xef << 24) | 8;
6968 mono_arch_flush_icache (code - 4, 4);
6974 * mono_arch_clear_breakpoint:
6976 * Clear the breakpoint at IP.
6979 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6981 MonoDebugOptions *opt = mini_get_debug_options ();
6985 if (opt->soft_breakpoints) {
6986 g_assert (!ji->from_aot);
6989 mono_arch_flush_icache (code - 4, 4);
6990 } else if (ji->from_aot) {
6991 guint32 native_offset = ip - (guint8*)ji->code_start;
6992 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6994 g_assert (native_offset % 4 == 0);
6995 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6996 info->bp_addrs [native_offset / 4] = 0;
6998 for (i = 0; i < 4; ++i)
7001 mono_arch_flush_icache (ip, code - ip);
7006 * mono_arch_start_single_stepping:
7008 * Start single stepping.
7011 mono_arch_start_single_stepping (void)
7013 if (ss_trigger_page)
7014 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7020 * mono_arch_stop_single_stepping:
7022 * Stop single stepping.
7025 mono_arch_stop_single_stepping (void)
7027 if (ss_trigger_page)
7028 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7034 #define DBG_SIGNAL SIGBUS
7036 #define DBG_SIGNAL SIGSEGV
7040 * mono_arch_is_single_step_event:
7042 * Return whenever the machine state in SIGCTX corresponds to a single
7046 mono_arch_is_single_step_event (void *info, void *sigctx)
7048 siginfo_t *sinfo = info;
7050 if (!ss_trigger_page)
7053 /* Sometimes the address is off by 4 */
7054 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7061 * mono_arch_is_breakpoint_event:
7063 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7066 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7068 siginfo_t *sinfo = info;
7070 if (!ss_trigger_page)
7073 if (sinfo->si_signo == DBG_SIGNAL) {
7074 /* Sometimes the address is off by 4 */
7075 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7085 * mono_arch_skip_breakpoint:
7087 * See mini-amd64.c for docs.
7090 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7092 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7096 * mono_arch_skip_single_step:
7098 * See mini-amd64.c for docs.
7101 mono_arch_skip_single_step (MonoContext *ctx)
7103 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7106 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7109 * mono_arch_get_seq_point_info:
7111 * See mini-amd64.c for docs.
7114 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7119 // FIXME: Add a free function
7121 mono_domain_lock (domain);
7122 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7124 mono_domain_unlock (domain);
7127 ji = mono_jit_info_table_find (domain, (char*)code);
7130 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7132 info->ss_trigger_page = ss_trigger_page;
7133 info->bp_trigger_page = bp_trigger_page;
7135 mono_domain_lock (domain);
7136 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7138 mono_domain_unlock (domain);
7145 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7147 ext->lmf.previous_lmf = prev_lmf;
7148 /* Mark that this is a MonoLMFExt */
7149 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7150 ext->lmf.sp = (gssize)ext;
7154 * mono_arch_set_target:
7156 * Set the target architecture the JIT backend should generate code for, in the form
7157 * of a GNU target triplet. Only used in AOT mode.
7160 mono_arch_set_target (char *mtriple)
7162 /* The GNU target triple format is not very well documented */
7163 if (strstr (mtriple, "armv7")) {
7164 v5_supported = TRUE;
7165 v6_supported = TRUE;
7166 v7_supported = TRUE;
7168 if (strstr (mtriple, "armv6")) {
7169 v5_supported = TRUE;
7170 v6_supported = TRUE;
7172 if (strstr (mtriple, "armv7s")) {
7173 v7s_supported = TRUE;
7175 if (strstr (mtriple, "thumbv7s")) {
7176 v5_supported = TRUE;
7177 v6_supported = TRUE;
7178 v7_supported = TRUE;
7179 v7s_supported = TRUE;
7180 thumb_supported = TRUE;
7181 thumb2_supported = TRUE;
7183 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7184 v5_supported = TRUE;
7185 v6_supported = TRUE;
7186 thumb_supported = TRUE;
7189 if (strstr (mtriple, "gnueabi"))
7190 eabi_supported = TRUE;
7194 mono_arch_opcode_supported (int opcode)
7197 case OP_ATOMIC_ADD_I4:
7198 case OP_ATOMIC_EXCHANGE_I4:
7199 case OP_ATOMIC_CAS_I4:
7200 return v7_supported;
7206 #if defined(ENABLE_GSHAREDVT)
7208 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7210 #endif /* !MONOTOUCH */