2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
34 * IS_SOFT_FLOAT: Is full software floating point used?
35 * IS_HARD_FLOAT: Is full hardware floating point used?
36 * IS_VFP: Is hardware floating point with software ABI used?
38 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
39 * IS_VFP may delegate to mono_arch_is_soft_float ().
42 #if defined(ARM_FPU_VFP_HARD)
43 #define IS_SOFT_FLOAT (FALSE)
44 #define IS_HARD_FLOAT (TRUE)
46 #elif defined(ARM_FPU_NONE)
47 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
48 #define IS_HARD_FLOAT (FALSE)
49 #define IS_VFP (!mono_arch_is_soft_float ())
51 #define IS_SOFT_FLOAT (FALSE)
52 #define IS_HARD_FLOAT (FALSE)
56 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
57 #define HAVE_AEABI_READ_TP 1
60 #ifdef __native_client_codegen__
61 const guint kNaClAlignment = kNaClAlignmentARM;
62 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
63 gint8 nacl_align_byte = -1; /* 0xff */
66 mono_arch_nacl_pad (guint8 *code, int pad)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
74 mono_arch_nacl_skip_nops (guint8 *code)
76 /* Not yet properly implemented. */
77 g_assert_not_reached ();
81 #endif /* __native_client_codegen__ */
83 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
86 void sys_icache_invalidate (void *start, size_t len);
89 static gint lmf_tls_offset = -1;
90 static gint lmf_addr_tls_offset = -1;
92 /* This mutex protects architecture specific caches */
93 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
94 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
95 static CRITICAL_SECTION mini_arch_mutex;
97 static gboolean v5_supported = FALSE;
98 static gboolean v6_supported = FALSE;
99 static gboolean v7_supported = FALSE;
100 static gboolean v7s_supported = FALSE;
101 static gboolean thumb_supported = FALSE;
102 static gboolean thumb2_supported = FALSE;
104 * Whenever to use the ARM EABI
106 static gboolean eabi_supported = FALSE;
109 * Whenever to use the iphone ABI extensions:
110 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
111 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
112 * This is required for debugging/profiling tools to work, but it has some overhead so it should
113 * only be turned on in debug builds.
115 static gboolean iphone_abi = FALSE;
118 * The FPU we are generating code for. This is NOT runtime configurable right now,
119 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
121 static MonoArmFPU arm_fpu;
123 static int vfp_scratch1 = ARM_VFP_F28;
124 static int vfp_scratch2 = ARM_VFP_F30;
128 static volatile int ss_trigger_var = 0;
130 static gpointer single_step_func_wrapper;
131 static gpointer breakpoint_func_wrapper;
134 * The code generated for sequence points reads from this location, which is
135 * made read-only when single stepping is enabled.
137 static gpointer ss_trigger_page;
139 /* Enabled breakpoints read from this trigger page */
140 static gpointer bp_trigger_page;
142 /* Structure used by the sequence points in AOTed code */
144 gpointer ss_trigger_page;
145 gpointer bp_trigger_page;
146 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
151 * floating point support: on ARM it is a mess, there are at least 3
152 * different setups, each of which binary incompat with the other.
153 * 1) FPA: old and ugly, but unfortunately what current distros use
154 * the double binary format has the two words swapped. 8 double registers.
155 * Implemented usually by kernel emulation.
156 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
157 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
158 * 3) VFP: the new and actually sensible and useful FP support. Implemented
159 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
161 * We do not care about FPA. We will support soft float and VFP.
163 int mono_exc_esp_offset = 0;
165 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
166 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
167 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
169 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
170 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
171 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
173 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
174 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
175 //#define DEBUG_IMT 0
177 /* A variant of ARM_LDR_IMM which can handle large offsets */
178 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
179 if (arm_is_imm12 ((offset))) { \
180 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
182 g_assert ((scratch_reg) != (basereg)); \
183 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
184 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
188 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
189 if (arm_is_imm12 ((offset))) { \
190 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
192 g_assert ((scratch_reg) != (basereg)); \
193 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
194 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
198 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
201 mono_arch_regname (int reg)
203 static const char * rnames[] = {
204 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
205 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
206 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
209 if (reg >= 0 && reg < 16)
215 mono_arch_fregname (int reg)
217 static const char * rnames[] = {
218 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
219 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
220 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
221 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
222 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
223 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
226 if (reg >= 0 && reg < 32)
234 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
236 int imm8, rot_amount;
237 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
238 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
241 g_assert (dreg != sreg);
242 code = mono_arm_emit_load_imm (code, dreg, imm);
243 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
248 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
250 /* we can use r0-r3, since this is called only for incoming args on the stack */
251 if (size > sizeof (gpointer) * 4) {
253 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
254 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
255 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
256 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
257 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
258 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
259 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
260 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
261 ARM_B_COND (code, ARMCOND_NE, 0);
262 arm_patch (code - 4, start_loop);
265 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
266 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
268 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
269 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
275 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
276 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
277 doffset = soffset = 0;
279 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
280 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
286 g_assert (size == 0);
291 emit_call_reg (guint8 *code, int reg)
294 ARM_BLX_REG (code, reg);
296 #ifdef USE_JUMP_TABLES
297 g_assert_not_reached ();
299 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
303 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
309 emit_call_seq (MonoCompile *cfg, guint8 *code)
311 #ifdef USE_JUMP_TABLES
312 code = mono_arm_patchable_bl (code, ARMCOND_AL);
314 if (cfg->method->dynamic) {
315 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
317 *(gpointer*)code = NULL;
319 code = emit_call_reg (code, ARMREG_IP);
328 mono_arm_patchable_b (guint8 *code, int cond)
330 #ifdef USE_JUMP_TABLES
333 jte = mono_jumptable_add_entry ();
334 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
335 ARM_BX_COND (code, cond, ARMREG_IP);
337 ARM_B_COND (code, cond, 0);
343 mono_arm_patchable_bl (guint8 *code, int cond)
345 #ifdef USE_JUMP_TABLES
348 jte = mono_jumptable_add_entry ();
349 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
350 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
352 ARM_BL_COND (code, cond, 0);
357 #ifdef USE_JUMP_TABLES
359 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
361 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
362 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
367 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
369 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
370 ARM_LDR_IMM (code, reg, reg, 0);
376 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
378 switch (ins->opcode) {
381 case OP_FCALL_MEMBASE:
383 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
385 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
387 ARM_FMSR (code, ins->dreg, ARMREG_R0);
388 ARM_CVTS (code, ins->dreg, ins->dreg);
392 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
394 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
407 * Emit code to push an LMF structure on the LMF stack.
408 * On arm, this is intermixed with the initialization of other fields of the structure.
411 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
413 gboolean get_lmf_fast = FALSE;
416 #ifdef HAVE_AEABI_READ_TP
417 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
419 if (lmf_addr_tls_offset != -1) {
422 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
423 (gpointer)"__aeabi_read_tp");
424 code = emit_call_seq (cfg, code);
426 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
432 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
435 /* Inline mono_get_lmf_addr () */
436 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
438 /* Load mono_jit_tls_id */
440 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
441 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
443 *(gpointer*)code = NULL;
445 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
446 /* call pthread_getspecific () */
447 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
448 (gpointer)"pthread_getspecific");
449 code = emit_call_seq (cfg, code);
450 /* lmf_addr = &jit_tls->lmf */
451 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
452 g_assert (arm_is_imm8 (lmf_offset));
453 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
460 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
461 (gpointer)"mono_get_lmf_addr");
462 code = emit_call_seq (cfg, code);
464 /* we build the MonoLMF structure on the stack - see mini-arm.h */
465 /* lmf_offset is the offset from the previous stack pointer,
466 * alloc_size is the total stack space allocated, so the offset
467 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
468 * The pointer to the struct is put in r1 (new_lmf).
469 * ip is used as scratch
470 * The callee-saved registers are already in the MonoLMF structure
472 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
473 /* r0 is the result from mono_get_lmf_addr () */
474 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
475 /* new_lmf->previous_lmf = *lmf_addr */
476 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
477 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
478 /* *(lmf_addr) = r1 */
479 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
480 /* Skip method (only needed for trampoline LMF frames) */
481 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
482 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
483 /* save the current IP */
484 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
485 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
487 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
488 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
499 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
503 for (list = inst->float_args; list; list = list->next) {
504 FloatArgData *fad = list->data;
505 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
509 if (*offset + *max_len > cfg->code_size) {
510 cfg->code_size += *max_len;
511 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
513 code = cfg->native_code + *offset;
516 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
518 *offset = code - cfg->native_code;
527 * Emit code to pop an LMF structure from the LMF stack.
530 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
534 if (lmf_offset < 32) {
535 basereg = cfg->frame_reg;
540 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
543 /* ip = previous_lmf */
544 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
546 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
547 /* *(lmf_addr) = previous_lmf */
548 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
553 #endif /* #ifndef DISABLE_JIT */
556 * mono_arch_get_argument_info:
557 * @csig: a method signature
558 * @param_count: the number of parameters to consider
559 * @arg_info: an array to store the result infos
561 * Gathers information on parameters such as size, alignment and
562 * padding. arg_info should be large enought to hold param_count + 1 entries.
564 * Returns the size of the activation frame.
567 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
569 int k, frame_size = 0;
570 guint32 size, align, pad;
574 t = mini_type_get_underlying_type (gsctx, csig->ret);
575 if (MONO_TYPE_ISSTRUCT (t)) {
576 frame_size += sizeof (gpointer);
580 arg_info [0].offset = offset;
583 frame_size += sizeof (gpointer);
587 arg_info [0].size = frame_size;
589 for (k = 0; k < param_count; k++) {
590 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
592 /* ignore alignment for now */
595 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
596 arg_info [k].pad = pad;
598 arg_info [k + 1].pad = 0;
599 arg_info [k + 1].size = size;
601 arg_info [k + 1].offset = offset;
605 align = MONO_ARCH_FRAME_ALIGNMENT;
606 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
607 arg_info [k].pad = pad;
612 #define MAX_ARCH_DELEGATE_PARAMS 3
615 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
617 guint8 *code, *start;
620 start = code = mono_global_codeman_reserve (12);
622 /* Replace the this argument with the target */
623 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
624 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
625 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
627 g_assert ((code - start) <= 12);
629 mono_arch_flush_icache (start, 12);
633 size = 8 + param_count * 4;
634 start = code = mono_global_codeman_reserve (size);
636 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
637 /* slide down the arguments */
638 for (i = 0; i < param_count; ++i) {
639 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
641 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
643 g_assert ((code - start) <= size);
645 mono_arch_flush_icache (start, size);
649 *code_size = code - start;
655 * mono_arch_get_delegate_invoke_impls:
657 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
661 mono_arch_get_delegate_invoke_impls (void)
669 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
670 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
672 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
673 code = get_delegate_invoke_impl (FALSE, i, &code_len);
674 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
675 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
683 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
685 guint8 *code, *start;
687 /* FIXME: Support more cases */
688 if (MONO_TYPE_ISSTRUCT (sig->ret))
692 static guint8* cached = NULL;
693 mono_mini_arch_lock ();
695 mono_mini_arch_unlock ();
700 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
702 start = get_delegate_invoke_impl (TRUE, 0, NULL);
704 mono_mini_arch_unlock ();
707 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
710 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
712 for (i = 0; i < sig->param_count; ++i)
713 if (!mono_is_regsize_var (sig->params [i]))
716 mono_mini_arch_lock ();
717 code = cache [sig->param_count];
719 mono_mini_arch_unlock ();
724 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
725 start = mono_aot_get_trampoline (name);
728 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
730 cache [sig->param_count] = start;
731 mono_mini_arch_unlock ();
739 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
741 return (gpointer)regs [ARMREG_R0];
745 * Initialize the cpu to execute managed code.
748 mono_arch_cpu_init (void)
750 #if defined(__APPLE__)
753 i8_align = __alignof__ (gint64);
758 create_function_wrapper (gpointer function)
760 guint8 *start, *code;
762 start = code = mono_global_codeman_reserve (96);
765 * Construct the MonoContext structure on the stack.
768 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
770 /* save ip, lr and pc into their correspodings ctx.regs slots. */
771 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
772 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
773 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
775 /* save r0..r10 and fp */
776 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
777 ARM_STM (code, ARMREG_IP, 0x0fff);
779 /* now we can update fp. */
780 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
782 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
783 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
784 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
785 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
787 /* make ctx.eip hold the address of the call. */
788 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
789 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
791 /* r0 now points to the MonoContext */
792 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
795 #ifdef USE_JUMP_TABLES
797 gpointer *jte = mono_jumptable_add_entry ();
798 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
802 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
804 *(gpointer*)code = function;
807 ARM_BLX_REG (code, ARMREG_IP);
809 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
810 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
811 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
812 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
814 /* make ip point to the regs array, then restore everything, including pc. */
815 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
816 ARM_LDM (code, ARMREG_IP, 0xffff);
818 mono_arch_flush_icache (start, code - start);
824 * Initialize architecture specific code.
827 mono_arch_init (void)
829 const char *cpu_arch;
831 InitializeCriticalSection (&mini_arch_mutex);
832 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
833 if (mini_get_debug_options ()->soft_breakpoints) {
834 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
835 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
840 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
841 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
842 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
845 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
846 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
847 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
848 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
849 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
852 #if defined(__ARM_EABI__)
853 eabi_supported = TRUE;
856 #if defined(ARM_FPU_VFP_HARD)
857 arm_fpu = MONO_ARM_FPU_VFP_HARD;
859 arm_fpu = MONO_ARM_FPU_VFP;
861 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
862 /* If we're compiling with a soft float fallback and it
863 turns out that no VFP unit is available, we need to
864 switch to soft float. We don't do this for iOS, since
865 iOS devices always have a VFP unit. */
866 if (!mono_hwcap_arm_has_vfp)
867 arm_fpu = MONO_ARM_FPU_NONE;
871 v5_supported = mono_hwcap_arm_is_v5;
872 v6_supported = mono_hwcap_arm_is_v6;
873 v7_supported = mono_hwcap_arm_is_v7;
874 v7s_supported = mono_hwcap_arm_is_v7s;
876 #if defined(__APPLE__)
877 /* iOS is special-cased here because we don't yet
878 have a way to properly detect CPU features on it. */
879 thumb_supported = TRUE;
882 thumb_supported = mono_hwcap_arm_has_thumb;
883 thumb2_supported = mono_hwcap_arm_has_thumb2;
886 /* Format: armv(5|6|7[s])[-thumb[2]] */
887 cpu_arch = g_getenv ("MONO_CPU_ARCH");
889 /* Do this here so it overrides any detection. */
891 if (strncmp (cpu_arch, "armv", 4) == 0) {
892 v5_supported = cpu_arch [4] >= '5';
893 v6_supported = cpu_arch [4] >= '6';
894 v7_supported = cpu_arch [4] >= '7';
895 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
898 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
899 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
904 * Cleanup architecture specific code.
907 mono_arch_cleanup (void)
912 * This function returns the optimizations supported on this cpu.
915 mono_arch_cpu_optimizations (guint32 *exclude_mask)
917 /* no arm-specific optimizations yet */
923 * This function test for all SIMD functions supported.
925 * Returns a bitmask corresponding to all supported versions.
929 mono_arch_cpu_enumerate_simd_versions (void)
931 /* SIMD is currently unimplemented */
939 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
955 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
957 mono_arch_is_soft_float (void)
959 return arm_fpu == MONO_ARM_FPU_NONE;
964 mono_arm_is_hard_float (void)
966 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
970 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
973 t = mini_type_get_underlying_type (gsctx, t);
980 case MONO_TYPE_FNPTR:
982 case MONO_TYPE_OBJECT:
983 case MONO_TYPE_STRING:
984 case MONO_TYPE_CLASS:
985 case MONO_TYPE_SZARRAY:
986 case MONO_TYPE_ARRAY:
988 case MONO_TYPE_GENERICINST:
989 if (!mono_type_generic_inst_is_valuetype (t))
992 case MONO_TYPE_VALUETYPE:
999 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1004 for (i = 0; i < cfg->num_varinfo; i++) {
1005 MonoInst *ins = cfg->varinfo [i];
1006 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1009 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1012 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1015 /* we can only allocate 32 bit values */
1016 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1017 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1018 g_assert (i == vmv->idx);
1019 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1026 #define USE_EXTRA_TEMPS 0
1029 mono_arch_get_global_int_regs (MonoCompile *cfg)
1033 mono_arch_compute_omit_fp (cfg);
1036 * FIXME: Interface calls might go through a static rgctx trampoline which
1037 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1040 if (cfg->flags & MONO_CFG_HAS_CALLS)
1041 cfg->uses_rgctx_reg = TRUE;
1043 if (cfg->arch.omit_fp)
1044 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1045 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1046 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1047 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1049 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1050 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1052 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1053 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1054 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1055 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1056 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1057 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1063 * mono_arch_regalloc_cost:
1065 * Return the cost, in number of memory references, of the action of
1066 * allocating the variable VMV into a register during global register
1070 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1076 #endif /* #ifndef DISABLE_JIT */
1078 #ifndef __GNUC_PREREQ
1079 #define __GNUC_PREREQ(maj, min) (0)
1083 mono_arch_flush_icache (guint8 *code, gint size)
1085 #if defined(__native_client__)
1086 // For Native Client we don't have to flush i-cache here,
1087 // as it's being done by dyncode interface.
1090 #ifdef MONO_CROSS_COMPILE
1092 sys_icache_invalidate (code, size);
1093 #elif __GNUC_PREREQ(4, 1)
1094 __clear_cache (code, code + size);
1095 #elif defined(PLATFORM_ANDROID)
1096 const int syscall = 0xf0002;
1104 : "r" (code), "r" (code + size), "r" (syscall)
1105 : "r0", "r1", "r7", "r2"
1108 __asm __volatile ("mov r0, %0\n"
1111 "swi 0x9f0002 @ sys_cacheflush"
1113 : "r" (code), "r" (code + size), "r" (0)
1114 : "r0", "r1", "r3" );
1116 #endif /* !__native_client__ */
1127 RegTypeStructByAddr,
1128 /* gsharedvt argument passed by addr in greg */
1129 RegTypeGSharedVtInReg,
1130 /* gsharedvt argument passed by addr on stack */
1131 RegTypeGSharedVtOnStack,
1136 guint16 vtsize; /* in param area */
1140 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1145 guint32 stack_usage;
1146 gboolean vtype_retaddr;
1147 /* The index of the vret arg in the argument list */
1157 /*#define __alignof__(a) sizeof(a)*/
1158 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1161 #define PARAM_REGS 4
1164 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1167 if (*gr > ARMREG_R3) {
1169 ainfo->offset = *stack_size;
1170 ainfo->reg = ARMREG_SP; /* in the caller */
1171 ainfo->storage = RegTypeBase;
1174 ainfo->storage = RegTypeGeneral;
1181 split = i8_align == 4;
1186 if (*gr == ARMREG_R3 && split) {
1187 /* first word in r3 and the second on the stack */
1188 ainfo->offset = *stack_size;
1189 ainfo->reg = ARMREG_SP; /* in the caller */
1190 ainfo->storage = RegTypeBaseGen;
1192 } else if (*gr >= ARMREG_R3) {
1193 if (eabi_supported) {
1194 /* darwin aligns longs to 4 byte only */
1195 if (i8_align == 8) {
1200 ainfo->offset = *stack_size;
1201 ainfo->reg = ARMREG_SP; /* in the caller */
1202 ainfo->storage = RegTypeBase;
1205 if (eabi_supported) {
1206 if (i8_align == 8 && ((*gr) & 1))
1209 ainfo->storage = RegTypeIRegPair;
1218 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1221 * If we're calling a function like this:
1223 * void foo(float a, double b, float c)
1225 * We pass a in s0 and b in d1. That leaves us
1226 * with s1 being unused. The armhf ABI recognizes
1227 * this and requires register assignment to then
1228 * use that for the next single-precision arg,
1229 * i.e. c in this example. So float_spare either
1230 * tells us which reg to use for the next single-
1231 * precision arg, or it's -1, meaning use *fpr.
1233 * Note that even though most of the JIT speaks
1234 * double-precision, fpr represents single-
1235 * precision registers.
1237 * See parts 5.5 and 6.1.2 of the AAPCS for how
1241 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1242 ainfo->storage = RegTypeFP;
1246 * If we're passing a double-precision value
1247 * and *fpr is odd (e.g. it's s1, s3, ...)
1248 * we need to use the next even register. So
1249 * we mark the current *fpr as a spare that
1250 * can be used for the next single-precision
1254 *float_spare = *fpr;
1259 * At this point, we have an even register
1260 * so we assign that and move along.
1264 } else if (*float_spare >= 0) {
1266 * We're passing a single-precision value
1267 * and it looks like a spare single-
1268 * precision register is available. Let's
1272 ainfo->reg = *float_spare;
1276 * If we hit this branch, we're passing a
1277 * single-precision value and we can simply
1278 * use the next available register.
1286 * We've exhausted available floating point
1287 * regs, so pass the rest on the stack.
1295 ainfo->offset = *stack_size;
1296 ainfo->reg = ARMREG_SP;
1297 ainfo->storage = RegTypeBase;
1304 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1306 guint i, gr, fpr, pstart;
1308 int n = sig->hasthis + sig->param_count;
1309 MonoType *simpletype;
1310 guint32 stack_size = 0;
1312 gboolean is_pinvoke = sig->pinvoke;
1316 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1318 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1325 t = mini_type_get_underlying_type (gsctx, sig->ret);
1326 if (MONO_TYPE_ISSTRUCT (t)) {
1329 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1330 cinfo->ret.storage = RegTypeStructByVal;
1332 cinfo->vtype_retaddr = TRUE;
1334 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1335 cinfo->vtype_retaddr = TRUE;
1341 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1342 * the first argument, allowing 'this' to be always passed in the first arg reg.
1343 * Also do this if the first argument is a reference type, since virtual calls
1344 * are sometimes made using calli without sig->hasthis set, like in the delegate
1347 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1349 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1351 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1355 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1356 cinfo->vret_arg_index = 1;
1360 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1364 if (cinfo->vtype_retaddr)
1365 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1368 DEBUG(printf("params: %d\n", sig->param_count));
1369 for (i = pstart; i < sig->param_count; ++i) {
1370 ArgInfo *ainfo = &cinfo->args [n];
1372 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1373 /* Prevent implicit arguments and sig_cookie from
1374 being passed in registers */
1377 /* Emit the signature cookie just before the implicit arguments */
1378 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1380 DEBUG(printf("param %d: ", i));
1381 if (sig->params [i]->byref) {
1382 DEBUG(printf("byref\n"));
1383 add_general (&gr, &stack_size, ainfo, TRUE);
1387 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1388 switch (simpletype->type) {
1389 case MONO_TYPE_BOOLEAN:
1392 cinfo->args [n].size = 1;
1393 add_general (&gr, &stack_size, ainfo, TRUE);
1396 case MONO_TYPE_CHAR:
1399 cinfo->args [n].size = 2;
1400 add_general (&gr, &stack_size, ainfo, TRUE);
1405 cinfo->args [n].size = 4;
1406 add_general (&gr, &stack_size, ainfo, TRUE);
1412 case MONO_TYPE_FNPTR:
1413 case MONO_TYPE_CLASS:
1414 case MONO_TYPE_OBJECT:
1415 case MONO_TYPE_STRING:
1416 case MONO_TYPE_SZARRAY:
1417 case MONO_TYPE_ARRAY:
1418 cinfo->args [n].size = sizeof (gpointer);
1419 add_general (&gr, &stack_size, ainfo, TRUE);
1422 case MONO_TYPE_GENERICINST:
1423 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1424 cinfo->args [n].size = sizeof (gpointer);
1425 add_general (&gr, &stack_size, ainfo, TRUE);
1429 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1430 /* gsharedvt arguments are passed by ref */
1431 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1432 add_general (&gr, &stack_size, ainfo, TRUE);
1433 switch (ainfo->storage) {
1434 case RegTypeGeneral:
1435 ainfo->storage = RegTypeGSharedVtInReg;
1438 ainfo->storage = RegTypeGSharedVtOnStack;
1441 g_assert_not_reached ();
1447 case MONO_TYPE_TYPEDBYREF:
1448 case MONO_TYPE_VALUETYPE: {
1454 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1455 size = sizeof (MonoTypedRef);
1456 align = sizeof (gpointer);
1458 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1460 size = mono_class_native_size (klass, &align);
1462 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1464 DEBUG(printf ("load %d bytes struct\n", size));
1467 align_size += (sizeof (gpointer) - 1);
1468 align_size &= ~(sizeof (gpointer) - 1);
1469 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1470 ainfo->storage = RegTypeStructByVal;
1471 ainfo->struct_size = size;
1472 /* FIXME: align stack_size if needed */
1473 if (eabi_supported) {
1474 if (align >= 8 && (gr & 1))
1477 if (gr > ARMREG_R3) {
1479 ainfo->vtsize = nwords;
1481 int rest = ARMREG_R3 - gr + 1;
1482 int n_in_regs = rest >= nwords? nwords: rest;
1484 ainfo->size = n_in_regs;
1485 ainfo->vtsize = nwords - n_in_regs;
1488 nwords -= n_in_regs;
1490 ainfo->offset = stack_size;
1491 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1492 stack_size += nwords * sizeof (gpointer);
1499 add_general (&gr, &stack_size, ainfo, FALSE);
1506 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1508 add_general (&gr, &stack_size, ainfo, TRUE);
1516 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1518 add_general (&gr, &stack_size, ainfo, FALSE);
1523 case MONO_TYPE_MVAR:
1524 /* gsharedvt arguments are passed by ref */
1525 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1526 add_general (&gr, &stack_size, ainfo, TRUE);
1527 switch (ainfo->storage) {
1528 case RegTypeGeneral:
1529 ainfo->storage = RegTypeGSharedVtInReg;
1532 ainfo->storage = RegTypeGSharedVtOnStack;
1535 g_assert_not_reached ();
1540 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1544 /* Handle the case where there are no implicit arguments */
1545 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1546 /* Prevent implicit arguments and sig_cookie from
1547 being passed in registers */
1550 /* Emit the signature cookie just before the implicit arguments */
1551 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1555 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1556 switch (simpletype->type) {
1557 case MONO_TYPE_BOOLEAN:
1562 case MONO_TYPE_CHAR:
1568 case MONO_TYPE_FNPTR:
1569 case MONO_TYPE_CLASS:
1570 case MONO_TYPE_OBJECT:
1571 case MONO_TYPE_SZARRAY:
1572 case MONO_TYPE_ARRAY:
1573 case MONO_TYPE_STRING:
1574 cinfo->ret.storage = RegTypeGeneral;
1575 cinfo->ret.reg = ARMREG_R0;
1579 cinfo->ret.storage = RegTypeIRegPair;
1580 cinfo->ret.reg = ARMREG_R0;
1584 cinfo->ret.storage = RegTypeFP;
1586 if (IS_HARD_FLOAT) {
1587 cinfo->ret.reg = ARM_VFP_F0;
1589 cinfo->ret.reg = ARMREG_R0;
1593 case MONO_TYPE_GENERICINST:
1594 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1595 cinfo->ret.storage = RegTypeGeneral;
1596 cinfo->ret.reg = ARMREG_R0;
1599 // FIXME: Only for variable types
1600 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1601 cinfo->ret.storage = RegTypeStructByAddr;
1602 g_assert (cinfo->vtype_retaddr);
1606 case MONO_TYPE_VALUETYPE:
1607 case MONO_TYPE_TYPEDBYREF:
1608 if (cinfo->ret.storage != RegTypeStructByVal)
1609 cinfo->ret.storage = RegTypeStructByAddr;
1612 case MONO_TYPE_MVAR:
1613 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1614 cinfo->ret.storage = RegTypeStructByAddr;
1615 g_assert (cinfo->vtype_retaddr);
1617 case MONO_TYPE_VOID:
1620 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1624 /* align stack size to 8 */
1625 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1626 stack_size = (stack_size + 7) & ~7;
1628 cinfo->stack_usage = stack_size;
1635 debug_omit_fp (void)
1638 return mono_debug_count ();
1645 * mono_arch_compute_omit_fp:
1647 * Determine whenever the frame pointer can be eliminated.
1650 mono_arch_compute_omit_fp (MonoCompile *cfg)
1652 MonoMethodSignature *sig;
1653 MonoMethodHeader *header;
1657 if (cfg->arch.omit_fp_computed)
1660 header = cfg->header;
1662 sig = mono_method_signature (cfg->method);
1664 if (!cfg->arch.cinfo)
1665 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1666 cinfo = cfg->arch.cinfo;
1669 * FIXME: Remove some of the restrictions.
1671 cfg->arch.omit_fp = TRUE;
1672 cfg->arch.omit_fp_computed = TRUE;
1674 if (cfg->disable_omit_fp)
1675 cfg->arch.omit_fp = FALSE;
1676 if (!debug_omit_fp ())
1677 cfg->arch.omit_fp = FALSE;
1679 if (cfg->method->save_lmf)
1680 cfg->arch.omit_fp = FALSE;
1682 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1683 cfg->arch.omit_fp = FALSE;
1684 if (header->num_clauses)
1685 cfg->arch.omit_fp = FALSE;
1686 if (cfg->param_area)
1687 cfg->arch.omit_fp = FALSE;
1688 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1689 cfg->arch.omit_fp = FALSE;
1690 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1691 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1692 cfg->arch.omit_fp = FALSE;
1693 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1694 ArgInfo *ainfo = &cinfo->args [i];
1696 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1698 * The stack offset can only be determined when the frame
1701 cfg->arch.omit_fp = FALSE;
1706 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1707 MonoInst *ins = cfg->varinfo [i];
1710 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1715 * Set var information according to the calling convention. arm version.
1716 * The locals var stuff should most likely be split in another method.
1719 mono_arch_allocate_vars (MonoCompile *cfg)
1721 MonoMethodSignature *sig;
1722 MonoMethodHeader *header;
1724 int i, offset, size, align, curinst;
1728 sig = mono_method_signature (cfg->method);
1730 if (!cfg->arch.cinfo)
1731 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1732 cinfo = cfg->arch.cinfo;
1734 mono_arch_compute_omit_fp (cfg);
1736 if (cfg->arch.omit_fp)
1737 cfg->frame_reg = ARMREG_SP;
1739 cfg->frame_reg = ARMREG_FP;
1741 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1743 /* allow room for the vararg method args: void* and long/double */
1744 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1745 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1747 header = cfg->header;
1749 /* See mono_arch_get_global_int_regs () */
1750 if (cfg->flags & MONO_CFG_HAS_CALLS)
1751 cfg->uses_rgctx_reg = TRUE;
1753 if (cfg->frame_reg != ARMREG_SP)
1754 cfg->used_int_regs |= 1 << cfg->frame_reg;
1756 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1757 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1758 cfg->used_int_regs |= (1 << ARMREG_V5);
1762 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1763 if (sig->ret->type != MONO_TYPE_VOID) {
1764 cfg->ret->opcode = OP_REGVAR;
1765 cfg->ret->inst_c0 = ARMREG_R0;
1768 /* local vars are at a positive offset from the stack pointer */
1770 * also note that if the function uses alloca, we use FP
1771 * to point at the local variables.
1773 offset = 0; /* linkage area */
1774 /* align the offset to 16 bytes: not sure this is needed here */
1776 //offset &= ~(8 - 1);
1778 /* add parameter area size for called functions */
1779 offset += cfg->param_area;
1782 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1785 /* allow room to save the return value */
1786 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1789 /* the MonoLMF structure is stored just below the stack pointer */
1790 if (cinfo->ret.storage == RegTypeStructByVal) {
1791 cfg->ret->opcode = OP_REGOFFSET;
1792 cfg->ret->inst_basereg = cfg->frame_reg;
1793 offset += sizeof (gpointer) - 1;
1794 offset &= ~(sizeof (gpointer) - 1);
1795 cfg->ret->inst_offset = - offset;
1796 offset += sizeof(gpointer);
1797 } else if (cinfo->vtype_retaddr) {
1798 ins = cfg->vret_addr;
1799 offset += sizeof(gpointer) - 1;
1800 offset &= ~(sizeof(gpointer) - 1);
1801 ins->inst_offset = offset;
1802 ins->opcode = OP_REGOFFSET;
1803 ins->inst_basereg = cfg->frame_reg;
1804 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1805 printf ("vret_addr =");
1806 mono_print_ins (cfg->vret_addr);
1808 offset += sizeof(gpointer);
1811 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1812 if (cfg->arch.seq_point_info_var) {
1815 ins = cfg->arch.seq_point_info_var;
1819 offset += align - 1;
1820 offset &= ~(align - 1);
1821 ins->opcode = OP_REGOFFSET;
1822 ins->inst_basereg = cfg->frame_reg;
1823 ins->inst_offset = offset;
1826 ins = cfg->arch.ss_trigger_page_var;
1829 offset += align - 1;
1830 offset &= ~(align - 1);
1831 ins->opcode = OP_REGOFFSET;
1832 ins->inst_basereg = cfg->frame_reg;
1833 ins->inst_offset = offset;
1837 if (cfg->arch.seq_point_read_var) {
1840 ins = cfg->arch.seq_point_read_var;
1844 offset += align - 1;
1845 offset &= ~(align - 1);
1846 ins->opcode = OP_REGOFFSET;
1847 ins->inst_basereg = cfg->frame_reg;
1848 ins->inst_offset = offset;
1851 ins = cfg->arch.seq_point_ss_method_var;
1854 offset += align - 1;
1855 offset &= ~(align - 1);
1856 ins->opcode = OP_REGOFFSET;
1857 ins->inst_basereg = cfg->frame_reg;
1858 ins->inst_offset = offset;
1861 ins = cfg->arch.seq_point_bp_method_var;
1864 offset += align - 1;
1865 offset &= ~(align - 1);
1866 ins->opcode = OP_REGOFFSET;
1867 ins->inst_basereg = cfg->frame_reg;
1868 ins->inst_offset = offset;
1872 cfg->locals_min_stack_offset = offset;
1874 curinst = cfg->locals_start;
1875 for (i = curinst; i < cfg->num_varinfo; ++i) {
1878 ins = cfg->varinfo [i];
1879 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1882 t = ins->inst_vtype;
1883 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1886 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1887 * pinvoke wrappers when they call functions returning structure */
1888 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1889 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
1893 size = mono_type_size (t, &align);
1895 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1896 * since it loads/stores misaligned words, which don't do the right thing.
1898 if (align < 4 && size >= 4)
1900 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1901 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1902 offset += align - 1;
1903 offset &= ~(align - 1);
1904 ins->opcode = OP_REGOFFSET;
1905 ins->inst_offset = offset;
1906 ins->inst_basereg = cfg->frame_reg;
1908 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1911 cfg->locals_max_stack_offset = offset;
1915 ins = cfg->args [curinst];
1916 if (ins->opcode != OP_REGVAR) {
1917 ins->opcode = OP_REGOFFSET;
1918 ins->inst_basereg = cfg->frame_reg;
1919 offset += sizeof (gpointer) - 1;
1920 offset &= ~(sizeof (gpointer) - 1);
1921 ins->inst_offset = offset;
1922 offset += sizeof (gpointer);
1927 if (sig->call_convention == MONO_CALL_VARARG) {
1931 /* Allocate a local slot to hold the sig cookie address */
1932 offset += align - 1;
1933 offset &= ~(align - 1);
1934 cfg->sig_cookie = offset;
1938 for (i = 0; i < sig->param_count; ++i) {
1939 ins = cfg->args [curinst];
1941 if (ins->opcode != OP_REGVAR) {
1942 ins->opcode = OP_REGOFFSET;
1943 ins->inst_basereg = cfg->frame_reg;
1944 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
1946 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1947 * since it loads/stores misaligned words, which don't do the right thing.
1949 if (align < 4 && size >= 4)
1951 /* The code in the prolog () stores words when storing vtypes received in a register */
1952 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1954 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1955 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1956 offset += align - 1;
1957 offset &= ~(align - 1);
1958 ins->inst_offset = offset;
1964 /* align the offset to 8 bytes */
1965 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
1966 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1971 cfg->stack_offset = offset;
1975 mono_arch_create_vars (MonoCompile *cfg)
1977 MonoMethodSignature *sig;
1980 sig = mono_method_signature (cfg->method);
1982 if (!cfg->arch.cinfo)
1983 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1984 cinfo = cfg->arch.cinfo;
1986 if (cinfo->ret.storage == RegTypeStructByVal)
1987 cfg->ret_var_is_local = TRUE;
1989 if (cinfo->vtype_retaddr) {
1990 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1991 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1992 printf ("vret_addr = ");
1993 mono_print_ins (cfg->vret_addr);
1997 if (cfg->gen_seq_points) {
1998 if (cfg->soft_breakpoints) {
1999 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2000 ins->flags |= MONO_INST_VOLATILE;
2001 cfg->arch.seq_point_read_var = ins;
2003 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2004 ins->flags |= MONO_INST_VOLATILE;
2005 cfg->arch.seq_point_ss_method_var = ins;
2007 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2008 ins->flags |= MONO_INST_VOLATILE;
2009 cfg->arch.seq_point_bp_method_var = ins;
2011 g_assert (!cfg->compile_aot);
2012 } else if (cfg->compile_aot) {
2013 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2014 ins->flags |= MONO_INST_VOLATILE;
2015 cfg->arch.seq_point_info_var = ins;
2017 /* Allocate a separate variable for this to save 1 load per seq point */
2018 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2019 ins->flags |= MONO_INST_VOLATILE;
2020 cfg->arch.ss_trigger_page_var = ins;
2026 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2028 MonoMethodSignature *tmp_sig;
2031 if (call->tail_call)
2034 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2037 * mono_ArgIterator_Setup assumes the signature cookie is
2038 * passed first and all the arguments which were before it are
2039 * passed on the stack after the signature. So compensate by
2040 * passing a different signature.
2042 tmp_sig = mono_metadata_signature_dup (call->signature);
2043 tmp_sig->param_count -= call->signature->sentinelpos;
2044 tmp_sig->sentinelpos = 0;
2045 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2047 sig_reg = mono_alloc_ireg (cfg);
2048 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2050 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2055 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2060 LLVMCallInfo *linfo;
2062 n = sig->param_count + sig->hasthis;
2064 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2066 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2069 * LLVM always uses the native ABI while we use our own ABI, the
2070 * only difference is the handling of vtypes:
2071 * - we only pass/receive them in registers in some cases, and only
2072 * in 1 or 2 integer registers.
2074 if (cinfo->vtype_retaddr) {
2075 /* Vtype returned using a hidden argument */
2076 linfo->ret.storage = LLVMArgVtypeRetAddr;
2077 linfo->vret_arg_index = cinfo->vret_arg_index;
2078 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2079 cfg->exception_message = g_strdup ("unknown ret conv");
2080 cfg->disable_llvm = TRUE;
2084 for (i = 0; i < n; ++i) {
2085 ainfo = cinfo->args + i;
2087 linfo->args [i].storage = LLVMArgNone;
2089 switch (ainfo->storage) {
2090 case RegTypeGeneral:
2091 case RegTypeIRegPair:
2093 linfo->args [i].storage = LLVMArgInIReg;
2095 case RegTypeStructByVal:
2096 // FIXME: Passing entirely on the stack or split reg/stack
2097 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2098 linfo->args [i].storage = LLVMArgVtypeInReg;
2099 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2100 if (ainfo->size == 2)
2101 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2103 linfo->args [i].pair_storage [1] = LLVMArgNone;
2105 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2106 cfg->disable_llvm = TRUE;
2110 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2111 cfg->disable_llvm = TRUE;
2121 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2124 MonoMethodSignature *sig;
2128 sig = call->signature;
2129 n = sig->param_count + sig->hasthis;
2131 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2133 for (i = 0; i < n; ++i) {
2134 ArgInfo *ainfo = cinfo->args + i;
2137 if (i >= sig->hasthis)
2138 t = sig->params [i - sig->hasthis];
2140 t = &mono_defaults.int_class->byval_arg;
2141 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2143 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2144 /* Emit the signature cookie just before the implicit arguments */
2145 emit_sig_cookie (cfg, call, cinfo);
2148 in = call->args [i];
2150 switch (ainfo->storage) {
2151 case RegTypeGeneral:
2152 case RegTypeIRegPair:
2153 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2154 MONO_INST_NEW (cfg, ins, OP_MOVE);
2155 ins->dreg = mono_alloc_ireg (cfg);
2156 ins->sreg1 = in->dreg + 1;
2157 MONO_ADD_INS (cfg->cbb, ins);
2158 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2160 MONO_INST_NEW (cfg, ins, OP_MOVE);
2161 ins->dreg = mono_alloc_ireg (cfg);
2162 ins->sreg1 = in->dreg + 2;
2163 MONO_ADD_INS (cfg->cbb, ins);
2164 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2165 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2166 if (ainfo->size == 4) {
2167 if (IS_SOFT_FLOAT) {
2168 /* mono_emit_call_args () have already done the r8->r4 conversion */
2169 /* The converted value is in an int vreg */
2170 MONO_INST_NEW (cfg, ins, OP_MOVE);
2171 ins->dreg = mono_alloc_ireg (cfg);
2172 ins->sreg1 = in->dreg;
2173 MONO_ADD_INS (cfg->cbb, ins);
2174 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2178 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2179 creg = mono_alloc_ireg (cfg);
2180 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2181 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2184 if (IS_SOFT_FLOAT) {
2185 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2186 ins->dreg = mono_alloc_ireg (cfg);
2187 ins->sreg1 = in->dreg;
2188 MONO_ADD_INS (cfg->cbb, ins);
2189 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2191 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2192 ins->dreg = mono_alloc_ireg (cfg);
2193 ins->sreg1 = in->dreg;
2194 MONO_ADD_INS (cfg->cbb, ins);
2195 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2199 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2200 creg = mono_alloc_ireg (cfg);
2201 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2202 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2203 creg = mono_alloc_ireg (cfg);
2204 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2205 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2208 cfg->flags |= MONO_CFG_HAS_FPOUT;
2210 MONO_INST_NEW (cfg, ins, OP_MOVE);
2211 ins->dreg = mono_alloc_ireg (cfg);
2212 ins->sreg1 = in->dreg;
2213 MONO_ADD_INS (cfg->cbb, ins);
2215 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2218 case RegTypeStructByAddr:
2221 /* FIXME: where si the data allocated? */
2222 arg->backend.reg3 = ainfo->reg;
2223 call->used_iregs |= 1 << ainfo->reg;
2224 g_assert_not_reached ();
2227 case RegTypeStructByVal:
2228 case RegTypeGSharedVtInReg:
2229 case RegTypeGSharedVtOnStack:
2230 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2231 ins->opcode = OP_OUTARG_VT;
2232 ins->sreg1 = in->dreg;
2233 ins->klass = in->klass;
2234 ins->inst_p0 = call;
2235 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2236 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2237 mono_call_inst_add_outarg_vt (cfg, call, ins);
2238 MONO_ADD_INS (cfg->cbb, ins);
2241 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2242 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2243 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2244 if (t->type == MONO_TYPE_R8) {
2245 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2248 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2250 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2253 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2256 case RegTypeBaseGen:
2257 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2258 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2259 MONO_INST_NEW (cfg, ins, OP_MOVE);
2260 ins->dreg = mono_alloc_ireg (cfg);
2261 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2262 MONO_ADD_INS (cfg->cbb, ins);
2263 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2264 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2267 /* This should work for soft-float as well */
2269 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2270 creg = mono_alloc_ireg (cfg);
2271 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2272 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2273 creg = mono_alloc_ireg (cfg);
2274 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2275 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2276 cfg->flags |= MONO_CFG_HAS_FPOUT;
2278 g_assert_not_reached ();
2282 int fdreg = mono_alloc_freg (cfg);
2284 if (ainfo->size == 8) {
2285 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2286 ins->sreg1 = in->dreg;
2288 MONO_ADD_INS (cfg->cbb, ins);
2290 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2295 * Mono's register allocator doesn't speak single-precision registers that
2296 * overlap double-precision registers (i.e. armhf). So we have to work around
2297 * the register allocator and load the value from memory manually.
2299 * So we create a variable for the float argument and an instruction to store
2300 * the argument into the variable. We then store the list of these arguments
2301 * in cfg->float_args. This list is then used by emit_float_args later to
2302 * pass the arguments in the various call opcodes.
2304 * This is not very nice, and we should really try to fix the allocator.
2307 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2309 /* Make sure the instruction isn't seen as pointless and removed.
2311 float_arg->flags |= MONO_INST_VOLATILE;
2313 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, float_arg->dreg, in->dreg);
2315 /* We use the dreg to look up the instruction later. The hreg is used to
2316 * emit the instruction that loads the value into the FP reg.
2318 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2319 fad->vreg = float_arg->dreg;
2320 fad->hreg = ainfo->reg;
2322 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2325 call->used_iregs |= 1 << ainfo->reg;
2326 cfg->flags |= MONO_CFG_HAS_FPOUT;
2330 g_assert_not_reached ();
2334 /* Handle the case where there are no implicit arguments */
2335 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2336 emit_sig_cookie (cfg, call, cinfo);
2338 if (cinfo->ret.storage == RegTypeStructByVal) {
2339 /* The JIT will transform this into a normal call */
2340 call->vret_in_reg = TRUE;
2341 } else if (cinfo->vtype_retaddr) {
2343 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2344 vtarg->sreg1 = call->vret_var->dreg;
2345 vtarg->dreg = mono_alloc_preg (cfg);
2346 MONO_ADD_INS (cfg->cbb, vtarg);
2348 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2351 call->stack_usage = cinfo->stack_usage;
2357 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2359 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2360 ArgInfo *ainfo = ins->inst_p1;
2361 int ovf_size = ainfo->vtsize;
2362 int doffset = ainfo->offset;
2363 int struct_size = ainfo->struct_size;
2364 int i, soffset, dreg, tmpreg;
2366 if (ainfo->storage == RegTypeGSharedVtInReg) {
2368 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2371 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2372 /* Pass by addr on stack */
2373 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2378 for (i = 0; i < ainfo->size; ++i) {
2379 dreg = mono_alloc_ireg (cfg);
2380 switch (struct_size) {
2382 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2385 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2388 tmpreg = mono_alloc_ireg (cfg);
2389 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2390 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2391 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2392 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2393 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2394 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2395 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2398 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2401 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2402 soffset += sizeof (gpointer);
2403 struct_size -= sizeof (gpointer);
2405 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2407 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2411 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2413 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2416 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2419 if (COMPILE_LLVM (cfg)) {
2420 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2422 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2423 ins->sreg1 = val->dreg + 1;
2424 ins->sreg2 = val->dreg + 2;
2425 MONO_ADD_INS (cfg->cbb, ins);
2430 case MONO_ARM_FPU_NONE:
2431 if (ret->type == MONO_TYPE_R8) {
2434 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2435 ins->dreg = cfg->ret->dreg;
2436 ins->sreg1 = val->dreg;
2437 MONO_ADD_INS (cfg->cbb, ins);
2440 if (ret->type == MONO_TYPE_R4) {
2441 /* Already converted to an int in method_to_ir () */
2442 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2446 case MONO_ARM_FPU_VFP:
2447 case MONO_ARM_FPU_VFP_HARD:
2448 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2451 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2452 ins->dreg = cfg->ret->dreg;
2453 ins->sreg1 = val->dreg;
2454 MONO_ADD_INS (cfg->cbb, ins);
2459 g_assert_not_reached ();
2463 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2466 #endif /* #ifndef DISABLE_JIT */
2469 mono_arch_is_inst_imm (gint64 imm)
2474 #define DYN_CALL_STACK_ARGS 6
2477 MonoMethodSignature *sig;
2482 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2488 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2492 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2495 switch (cinfo->ret.storage) {
2497 case RegTypeGeneral:
2498 case RegTypeIRegPair:
2499 case RegTypeStructByAddr:
2510 for (i = 0; i < cinfo->nargs; ++i) {
2511 switch (cinfo->args [i].storage) {
2512 case RegTypeGeneral:
2514 case RegTypeIRegPair:
2517 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2520 case RegTypeStructByVal:
2521 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2529 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2530 for (i = 0; i < sig->param_count; ++i) {
2531 MonoType *t = sig->params [i];
2557 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2559 ArchDynCallInfo *info;
2562 cinfo = get_call_info (NULL, NULL, sig);
2564 if (!dyn_call_supported (cinfo, sig)) {
2569 info = g_new0 (ArchDynCallInfo, 1);
2570 // FIXME: Preprocess the info to speed up start_dyn_call ()
2572 info->cinfo = cinfo;
2574 return (MonoDynCallInfo*)info;
2578 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2580 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2582 g_free (ainfo->cinfo);
2587 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2589 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2590 DynCallArgs *p = (DynCallArgs*)buf;
2591 int arg_index, greg, i, j, pindex;
2592 MonoMethodSignature *sig = dinfo->sig;
2594 g_assert (buf_len >= sizeof (DynCallArgs));
2603 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2604 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2609 if (dinfo->cinfo->vtype_retaddr)
2610 p->regs [greg ++] = (mgreg_t)ret;
2612 for (i = pindex; i < sig->param_count; i++) {
2613 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2614 gpointer *arg = args [arg_index ++];
2615 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2618 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2620 else if (ainfo->storage == RegTypeBase)
2621 slot = PARAM_REGS + (ainfo->offset / 4);
2623 g_assert_not_reached ();
2626 p->regs [slot] = (mgreg_t)*arg;
2631 case MONO_TYPE_STRING:
2632 case MONO_TYPE_CLASS:
2633 case MONO_TYPE_ARRAY:
2634 case MONO_TYPE_SZARRAY:
2635 case MONO_TYPE_OBJECT:
2639 p->regs [slot] = (mgreg_t)*arg;
2641 case MONO_TYPE_BOOLEAN:
2643 p->regs [slot] = *(guint8*)arg;
2646 p->regs [slot] = *(gint8*)arg;
2649 p->regs [slot] = *(gint16*)arg;
2652 case MONO_TYPE_CHAR:
2653 p->regs [slot] = *(guint16*)arg;
2656 p->regs [slot] = *(gint32*)arg;
2659 p->regs [slot] = *(guint32*)arg;
2663 p->regs [slot ++] = (mgreg_t)arg [0];
2664 p->regs [slot] = (mgreg_t)arg [1];
2667 p->regs [slot] = *(mgreg_t*)arg;
2670 p->regs [slot ++] = (mgreg_t)arg [0];
2671 p->regs [slot] = (mgreg_t)arg [1];
2673 case MONO_TYPE_GENERICINST:
2674 if (MONO_TYPE_IS_REFERENCE (t)) {
2675 p->regs [slot] = (mgreg_t)*arg;
2680 case MONO_TYPE_VALUETYPE:
2681 g_assert (ainfo->storage == RegTypeStructByVal);
2683 if (ainfo->size == 0)
2684 slot = PARAM_REGS + (ainfo->offset / 4);
2688 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2689 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2692 g_assert_not_reached ();
2698 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2700 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2701 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2702 guint8 *ret = ((DynCallArgs*)buf)->ret;
2703 mgreg_t res = ((DynCallArgs*)buf)->res;
2704 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2706 switch (mono_type_get_underlying_type (sig->ret)->type) {
2707 case MONO_TYPE_VOID:
2708 *(gpointer*)ret = NULL;
2710 case MONO_TYPE_STRING:
2711 case MONO_TYPE_CLASS:
2712 case MONO_TYPE_ARRAY:
2713 case MONO_TYPE_SZARRAY:
2714 case MONO_TYPE_OBJECT:
2718 *(gpointer*)ret = (gpointer)res;
2724 case MONO_TYPE_BOOLEAN:
2725 *(guint8*)ret = res;
2728 *(gint16*)ret = res;
2731 case MONO_TYPE_CHAR:
2732 *(guint16*)ret = res;
2735 *(gint32*)ret = res;
2738 *(guint32*)ret = res;
2742 /* This handles endianness as well */
2743 ((gint32*)ret) [0] = res;
2744 ((gint32*)ret) [1] = res2;
2746 case MONO_TYPE_GENERICINST:
2747 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2748 *(gpointer*)ret = (gpointer)res;
2753 case MONO_TYPE_VALUETYPE:
2754 g_assert (ainfo->cinfo->vtype_retaddr);
2759 *(float*)ret = *(float*)&res;
2761 case MONO_TYPE_R8: {
2768 *(double*)ret = *(double*)®s;
2772 g_assert_not_reached ();
2779 * Allow tracing to work with this interface (with an optional argument)
2783 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2787 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2788 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2789 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2790 code = emit_call_reg (code, ARMREG_R2);
2803 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2806 int save_mode = SAVE_NONE;
2808 MonoMethod *method = cfg->method;
2809 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2810 int save_offset = cfg->param_area;
2814 offset = code - cfg->native_code;
2815 /* we need about 16 instructions */
2816 if (offset > (cfg->code_size - 16 * 4)) {
2817 cfg->code_size *= 2;
2818 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2819 code = cfg->native_code + offset;
2822 case MONO_TYPE_VOID:
2823 /* special case string .ctor icall */
2824 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2825 save_mode = SAVE_ONE;
2827 save_mode = SAVE_NONE;
2831 save_mode = SAVE_TWO;
2835 save_mode = SAVE_FP;
2837 case MONO_TYPE_VALUETYPE:
2838 save_mode = SAVE_STRUCT;
2841 save_mode = SAVE_ONE;
2845 switch (save_mode) {
2847 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2848 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2849 if (enable_arguments) {
2850 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2851 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2855 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2856 if (enable_arguments) {
2857 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2861 /* FIXME: what reg? */
2862 if (enable_arguments) {
2863 /* FIXME: what reg? */
2867 if (enable_arguments) {
2868 /* FIXME: get the actual address */
2869 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2877 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2878 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2879 code = emit_call_reg (code, ARMREG_IP);
2881 switch (save_mode) {
2883 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2884 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2887 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2901 * The immediate field for cond branches is big enough for all reasonable methods
2903 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2904 if (0 && ins->inst_true_bb->native_offset) { \
2905 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2907 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2908 ARM_B_COND (code, (condcode), 0); \
2911 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2913 /* emit an exception if condition is fail
2915 * We assign the extra code used to throw the implicit exceptions
2916 * to cfg->bb_exit as far as the big branch handling is concerned
2918 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2920 mono_add_patch_info (cfg, code - cfg->native_code, \
2921 MONO_PATCH_INFO_EXC, exc_name); \
2922 ARM_BL_COND (code, (condcode), 0); \
2925 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2928 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2933 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2935 MonoInst *ins, *n, *last_ins = NULL;
2937 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2938 switch (ins->opcode) {
2941 /* Already done by an arch-independent pass */
2943 case OP_LOAD_MEMBASE:
2944 case OP_LOADI4_MEMBASE:
2946 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2947 * OP_LOAD_MEMBASE offset(basereg), reg
2949 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2950 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2951 ins->inst_basereg == last_ins->inst_destbasereg &&
2952 ins->inst_offset == last_ins->inst_offset) {
2953 if (ins->dreg == last_ins->sreg1) {
2954 MONO_DELETE_INS (bb, ins);
2957 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2958 ins->opcode = OP_MOVE;
2959 ins->sreg1 = last_ins->sreg1;
2963 * Note: reg1 must be different from the basereg in the second load
2964 * OP_LOAD_MEMBASE offset(basereg), reg1
2965 * OP_LOAD_MEMBASE offset(basereg), reg2
2967 * OP_LOAD_MEMBASE offset(basereg), reg1
2968 * OP_MOVE reg1, reg2
2970 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2971 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2972 ins->inst_basereg != last_ins->dreg &&
2973 ins->inst_basereg == last_ins->inst_basereg &&
2974 ins->inst_offset == last_ins->inst_offset) {
2976 if (ins->dreg == last_ins->dreg) {
2977 MONO_DELETE_INS (bb, ins);
2980 ins->opcode = OP_MOVE;
2981 ins->sreg1 = last_ins->dreg;
2984 //g_assert_not_reached ();
2988 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2989 * OP_LOAD_MEMBASE offset(basereg), reg
2991 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2992 * OP_ICONST reg, imm
2994 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2995 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2996 ins->inst_basereg == last_ins->inst_destbasereg &&
2997 ins->inst_offset == last_ins->inst_offset) {
2998 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2999 ins->opcode = OP_ICONST;
3000 ins->inst_c0 = last_ins->inst_imm;
3001 g_assert_not_reached (); // check this rule
3005 case OP_LOADU1_MEMBASE:
3006 case OP_LOADI1_MEMBASE:
3007 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3008 ins->inst_basereg == last_ins->inst_destbasereg &&
3009 ins->inst_offset == last_ins->inst_offset) {
3010 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3011 ins->sreg1 = last_ins->sreg1;
3014 case OP_LOADU2_MEMBASE:
3015 case OP_LOADI2_MEMBASE:
3016 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3017 ins->inst_basereg == last_ins->inst_destbasereg &&
3018 ins->inst_offset == last_ins->inst_offset) {
3019 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3020 ins->sreg1 = last_ins->sreg1;
3024 ins->opcode = OP_MOVE;
3028 if (ins->dreg == ins->sreg1) {
3029 MONO_DELETE_INS (bb, ins);
3033 * OP_MOVE sreg, dreg
3034 * OP_MOVE dreg, sreg
3036 if (last_ins && last_ins->opcode == OP_MOVE &&
3037 ins->sreg1 == last_ins->dreg &&
3038 ins->dreg == last_ins->sreg1) {
3039 MONO_DELETE_INS (bb, ins);
3047 bb->last_ins = last_ins;
3051 * the branch_cc_table should maintain the order of these
3065 branch_cc_table [] = {
3079 #define ADD_NEW_INS(cfg,dest,op) do { \
3080 MONO_INST_NEW ((cfg), (dest), (op)); \
3081 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3085 map_to_reg_reg_op (int op)
3094 case OP_COMPARE_IMM:
3096 case OP_ICOMPARE_IMM:
3110 case OP_LOAD_MEMBASE:
3111 return OP_LOAD_MEMINDEX;
3112 case OP_LOADI4_MEMBASE:
3113 return OP_LOADI4_MEMINDEX;
3114 case OP_LOADU4_MEMBASE:
3115 return OP_LOADU4_MEMINDEX;
3116 case OP_LOADU1_MEMBASE:
3117 return OP_LOADU1_MEMINDEX;
3118 case OP_LOADI2_MEMBASE:
3119 return OP_LOADI2_MEMINDEX;
3120 case OP_LOADU2_MEMBASE:
3121 return OP_LOADU2_MEMINDEX;
3122 case OP_LOADI1_MEMBASE:
3123 return OP_LOADI1_MEMINDEX;
3124 case OP_STOREI1_MEMBASE_REG:
3125 return OP_STOREI1_MEMINDEX;
3126 case OP_STOREI2_MEMBASE_REG:
3127 return OP_STOREI2_MEMINDEX;
3128 case OP_STOREI4_MEMBASE_REG:
3129 return OP_STOREI4_MEMINDEX;
3130 case OP_STORE_MEMBASE_REG:
3131 return OP_STORE_MEMINDEX;
3132 case OP_STORER4_MEMBASE_REG:
3133 return OP_STORER4_MEMINDEX;
3134 case OP_STORER8_MEMBASE_REG:
3135 return OP_STORER8_MEMINDEX;
3136 case OP_STORE_MEMBASE_IMM:
3137 return OP_STORE_MEMBASE_REG;
3138 case OP_STOREI1_MEMBASE_IMM:
3139 return OP_STOREI1_MEMBASE_REG;
3140 case OP_STOREI2_MEMBASE_IMM:
3141 return OP_STOREI2_MEMBASE_REG;
3142 case OP_STOREI4_MEMBASE_IMM:
3143 return OP_STOREI4_MEMBASE_REG;
3145 g_assert_not_reached ();
3149 * Remove from the instruction list the instructions that can't be
3150 * represented with very simple instructions with no register
3154 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3156 MonoInst *ins, *temp, *last_ins = NULL;
3157 int rot_amount, imm8, low_imm;
3159 MONO_BB_FOR_EACH_INS (bb, ins) {
3161 switch (ins->opcode) {
3165 case OP_COMPARE_IMM:
3166 case OP_ICOMPARE_IMM:
3180 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3181 ADD_NEW_INS (cfg, temp, OP_ICONST);
3182 temp->inst_c0 = ins->inst_imm;
3183 temp->dreg = mono_alloc_ireg (cfg);
3184 ins->sreg2 = temp->dreg;
3185 ins->opcode = mono_op_imm_to_op (ins->opcode);
3187 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3193 if (ins->inst_imm == 1) {
3194 ins->opcode = OP_MOVE;
3197 if (ins->inst_imm == 0) {
3198 ins->opcode = OP_ICONST;
3202 imm8 = mono_is_power_of_two (ins->inst_imm);
3204 ins->opcode = OP_SHL_IMM;
3205 ins->inst_imm = imm8;
3208 ADD_NEW_INS (cfg, temp, OP_ICONST);
3209 temp->inst_c0 = ins->inst_imm;
3210 temp->dreg = mono_alloc_ireg (cfg);
3211 ins->sreg2 = temp->dreg;
3212 ins->opcode = OP_IMUL;
3218 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3219 /* ARM sets the C flag to 1 if there was _no_ overflow */
3220 ins->next->opcode = OP_COND_EXC_NC;
3223 case OP_IDIV_UN_IMM:
3225 case OP_IREM_UN_IMM:
3226 ADD_NEW_INS (cfg, temp, OP_ICONST);
3227 temp->inst_c0 = ins->inst_imm;
3228 temp->dreg = mono_alloc_ireg (cfg);
3229 ins->sreg2 = temp->dreg;
3230 ins->opcode = mono_op_imm_to_op (ins->opcode);
3232 case OP_LOCALLOC_IMM:
3233 ADD_NEW_INS (cfg, temp, OP_ICONST);
3234 temp->inst_c0 = ins->inst_imm;
3235 temp->dreg = mono_alloc_ireg (cfg);
3236 ins->sreg1 = temp->dreg;
3237 ins->opcode = OP_LOCALLOC;
3239 case OP_LOAD_MEMBASE:
3240 case OP_LOADI4_MEMBASE:
3241 case OP_LOADU4_MEMBASE:
3242 case OP_LOADU1_MEMBASE:
3243 /* we can do two things: load the immed in a register
3244 * and use an indexed load, or see if the immed can be
3245 * represented as an ad_imm + a load with a smaller offset
3246 * that fits. We just do the first for now, optimize later.
3248 if (arm_is_imm12 (ins->inst_offset))
3250 ADD_NEW_INS (cfg, temp, OP_ICONST);
3251 temp->inst_c0 = ins->inst_offset;
3252 temp->dreg = mono_alloc_ireg (cfg);
3253 ins->sreg2 = temp->dreg;
3254 ins->opcode = map_to_reg_reg_op (ins->opcode);
3256 case OP_LOADI2_MEMBASE:
3257 case OP_LOADU2_MEMBASE:
3258 case OP_LOADI1_MEMBASE:
3259 if (arm_is_imm8 (ins->inst_offset))
3261 ADD_NEW_INS (cfg, temp, OP_ICONST);
3262 temp->inst_c0 = ins->inst_offset;
3263 temp->dreg = mono_alloc_ireg (cfg);
3264 ins->sreg2 = temp->dreg;
3265 ins->opcode = map_to_reg_reg_op (ins->opcode);
3267 case OP_LOADR4_MEMBASE:
3268 case OP_LOADR8_MEMBASE:
3269 if (arm_is_fpimm8 (ins->inst_offset))
3271 low_imm = ins->inst_offset & 0x1ff;
3272 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3273 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3274 temp->inst_imm = ins->inst_offset & ~0x1ff;
3275 temp->sreg1 = ins->inst_basereg;
3276 temp->dreg = mono_alloc_ireg (cfg);
3277 ins->inst_basereg = temp->dreg;
3278 ins->inst_offset = low_imm;
3282 ADD_NEW_INS (cfg, temp, OP_ICONST);
3283 temp->inst_c0 = ins->inst_offset;
3284 temp->dreg = mono_alloc_ireg (cfg);
3286 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3287 add_ins->sreg1 = ins->inst_basereg;
3288 add_ins->sreg2 = temp->dreg;
3289 add_ins->dreg = mono_alloc_ireg (cfg);
3291 ins->inst_basereg = add_ins->dreg;
3292 ins->inst_offset = 0;
3295 case OP_STORE_MEMBASE_REG:
3296 case OP_STOREI4_MEMBASE_REG:
3297 case OP_STOREI1_MEMBASE_REG:
3298 if (arm_is_imm12 (ins->inst_offset))
3300 ADD_NEW_INS (cfg, temp, OP_ICONST);
3301 temp->inst_c0 = ins->inst_offset;
3302 temp->dreg = mono_alloc_ireg (cfg);
3303 ins->sreg2 = temp->dreg;
3304 ins->opcode = map_to_reg_reg_op (ins->opcode);
3306 case OP_STOREI2_MEMBASE_REG:
3307 if (arm_is_imm8 (ins->inst_offset))
3309 ADD_NEW_INS (cfg, temp, OP_ICONST);
3310 temp->inst_c0 = ins->inst_offset;
3311 temp->dreg = mono_alloc_ireg (cfg);
3312 ins->sreg2 = temp->dreg;
3313 ins->opcode = map_to_reg_reg_op (ins->opcode);
3315 case OP_STORER4_MEMBASE_REG:
3316 case OP_STORER8_MEMBASE_REG:
3317 if (arm_is_fpimm8 (ins->inst_offset))
3319 low_imm = ins->inst_offset & 0x1ff;
3320 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3321 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3322 temp->inst_imm = ins->inst_offset & ~0x1ff;
3323 temp->sreg1 = ins->inst_destbasereg;
3324 temp->dreg = mono_alloc_ireg (cfg);
3325 ins->inst_destbasereg = temp->dreg;
3326 ins->inst_offset = low_imm;
3330 ADD_NEW_INS (cfg, temp, OP_ICONST);
3331 temp->inst_c0 = ins->inst_offset;
3332 temp->dreg = mono_alloc_ireg (cfg);
3334 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3335 add_ins->sreg1 = ins->inst_destbasereg;
3336 add_ins->sreg2 = temp->dreg;
3337 add_ins->dreg = mono_alloc_ireg (cfg);
3339 ins->inst_destbasereg = add_ins->dreg;
3340 ins->inst_offset = 0;
3343 case OP_STORE_MEMBASE_IMM:
3344 case OP_STOREI1_MEMBASE_IMM:
3345 case OP_STOREI2_MEMBASE_IMM:
3346 case OP_STOREI4_MEMBASE_IMM:
3347 ADD_NEW_INS (cfg, temp, OP_ICONST);
3348 temp->inst_c0 = ins->inst_imm;
3349 temp->dreg = mono_alloc_ireg (cfg);
3350 ins->sreg1 = temp->dreg;
3351 ins->opcode = map_to_reg_reg_op (ins->opcode);
3353 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3355 gboolean swap = FALSE;
3359 /* Optimized away */
3364 /* Some fp compares require swapped operands */
3365 switch (ins->next->opcode) {
3367 ins->next->opcode = OP_FBLT;
3371 ins->next->opcode = OP_FBLT_UN;
3375 ins->next->opcode = OP_FBGE;
3379 ins->next->opcode = OP_FBGE_UN;
3387 ins->sreg1 = ins->sreg2;
3396 bb->last_ins = last_ins;
3397 bb->max_vreg = cfg->next_vreg;
3401 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3405 if (long_ins->opcode == OP_LNEG) {
3407 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3408 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3414 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3416 /* sreg is a float, dreg is an integer reg */
3419 ARM_TOSIZD (code, vfp_scratch1, sreg);
3421 ARM_TOUIZD (code, vfp_scratch1, sreg);
3422 ARM_FMRS (code, dreg, vfp_scratch1);
3426 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3427 else if (size == 2) {
3428 ARM_SHL_IMM (code, dreg, dreg, 16);
3429 ARM_SHR_IMM (code, dreg, dreg, 16);
3433 ARM_SHL_IMM (code, dreg, dreg, 24);
3434 ARM_SAR_IMM (code, dreg, dreg, 24);
3435 } else if (size == 2) {
3436 ARM_SHL_IMM (code, dreg, dreg, 16);
3437 ARM_SAR_IMM (code, dreg, dreg, 16);
3443 #endif /* #ifndef DISABLE_JIT */
3447 const guchar *target;
3452 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3455 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3456 PatchData *pdata = (PatchData*)user_data;
3457 guchar *code = data;
3458 guint32 *thunks = data;
3459 guint32 *endthunks = (guint32*)(code + bsize);
3461 int difflow, diffhigh;
3463 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3464 difflow = (char*)pdata->code - (char*)thunks;
3465 diffhigh = (char*)pdata->code - (char*)endthunks;
3466 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3470 * The thunk is composed of 3 words:
3471 * load constant from thunks [2] into ARM_IP
3474 * Note that the LR register is already setup
3476 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3477 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3478 while (thunks < endthunks) {
3479 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3480 if (thunks [2] == (guint32)pdata->target) {
3481 arm_patch (pdata->code, (guchar*)thunks);
3482 mono_arch_flush_icache (pdata->code, 4);
3485 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3486 /* found a free slot instead: emit thunk */
3487 /* ARMREG_IP is fine to use since this can't be an IMT call
3490 code = (guchar*)thunks;
3491 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3492 if (thumb_supported)
3493 ARM_BX (code, ARMREG_IP);
3495 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3496 thunks [2] = (guint32)pdata->target;
3497 mono_arch_flush_icache ((guchar*)thunks, 12);
3499 arm_patch (pdata->code, (guchar*)thunks);
3500 mono_arch_flush_icache (pdata->code, 4);
3504 /* skip 12 bytes, the size of the thunk */
3508 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3514 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3519 domain = mono_domain_get ();
3522 pdata.target = target;
3523 pdata.absolute = absolute;
3527 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3530 if (pdata.found != 1) {
3531 mono_domain_lock (domain);
3532 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3535 /* this uses the first available slot */
3537 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3539 mono_domain_unlock (domain);
3542 if (pdata.found != 1) {
3544 GHashTableIter iter;
3545 MonoJitDynamicMethodInfo *ji;
3548 * This might be a dynamic method, search its code manager. We can only
3549 * use the dynamic method containing CODE, since the others might be freed later.
3553 mono_domain_lock (domain);
3554 hash = domain_jit_info (domain)->dynamic_code_hash;
3556 /* FIXME: Speed this up */
3557 g_hash_table_iter_init (&iter, hash);
3558 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3559 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3560 if (pdata.found == 1)
3564 mono_domain_unlock (domain);
3566 if (pdata.found != 1)
3567 g_print ("thunk failed for %p from %p\n", target, code);
3568 g_assert (pdata.found == 1);
3572 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3574 guint32 *code32 = (void*)code;
3575 guint32 ins = *code32;
3576 guint32 prim = (ins >> 25) & 7;
3577 guint32 tval = GPOINTER_TO_UINT (target);
3579 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3580 if (prim == 5) { /* 101b */
3581 /* the diff starts 8 bytes from the branch opcode */
3582 gint diff = target - code - 8;
3584 gint tmask = 0xffffffff;
3585 if (tval & 1) { /* entering thumb mode */
3586 diff = target - 1 - code - 8;
3587 g_assert (thumb_supported);
3588 tbits = 0xf << 28; /* bl->blx bit pattern */
3589 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3590 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3594 tmask = ~(1 << 24); /* clear the link bit */
3595 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3600 if (diff <= 33554431) {
3602 ins = (ins & 0xff000000) | diff;
3604 *code32 = ins | tbits;
3608 /* diff between 0 and -33554432 */
3609 if (diff >= -33554432) {
3611 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3613 *code32 = ins | tbits;
3618 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3622 #ifdef USE_JUMP_TABLES
3624 gpointer *jte = mono_jumptable_get_entry (code);
3626 jte [0] = (gpointer) target;
3630 * The alternative call sequences looks like this:
3632 * ldr ip, [pc] // loads the address constant
3633 * b 1f // jumps around the constant
3634 * address constant embedded in the code
3639 * There are two cases for patching:
3640 * a) at the end of method emission: in this case code points to the start
3641 * of the call sequence
3642 * b) during runtime patching of the call site: in this case code points
3643 * to the mov pc, ip instruction
3645 * We have to handle also the thunk jump code sequence:
3649 * address constant // execution never reaches here
3651 if ((ins & 0x0ffffff0) == 0x12fff10) {
3652 /* Branch and exchange: the address is constructed in a reg
3653 * We can patch BX when the code sequence is the following:
3654 * ldr ip, [pc, #0] ; 0x8
3661 guint8 *emit = (guint8*)ccode;
3662 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3664 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3665 ARM_BX (emit, ARMREG_IP);
3667 /*patching from magic trampoline*/
3668 if (ins == ccode [3]) {
3669 g_assert (code32 [-4] == ccode [0]);
3670 g_assert (code32 [-3] == ccode [1]);
3671 g_assert (code32 [-1] == ccode [2]);
3672 code32 [-2] = (guint32)target;
3675 /*patching from JIT*/
3676 if (ins == ccode [0]) {
3677 g_assert (code32 [1] == ccode [1]);
3678 g_assert (code32 [3] == ccode [2]);
3679 g_assert (code32 [4] == ccode [3]);
3680 code32 [2] = (guint32)target;
3683 g_assert_not_reached ();
3684 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3692 guint8 *emit = (guint8*)ccode;
3693 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3695 ARM_BLX_REG (emit, ARMREG_IP);
3697 g_assert (code32 [-3] == ccode [0]);
3698 g_assert (code32 [-2] == ccode [1]);
3699 g_assert (code32 [0] == ccode [2]);
3701 code32 [-1] = (guint32)target;
3704 guint32 *tmp = ccode;
3705 guint8 *emit = (guint8*)tmp;
3706 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3707 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3708 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3709 ARM_BX (emit, ARMREG_IP);
3710 if (ins == ccode [2]) {
3711 g_assert_not_reached (); // should be -2 ...
3712 code32 [-1] = (guint32)target;
3715 if (ins == ccode [0]) {
3716 /* handles both thunk jump code and the far call sequence */
3717 code32 [2] = (guint32)target;
3720 g_assert_not_reached ();
3722 // g_print ("patched with 0x%08x\n", ins);
3727 arm_patch (guchar *code, const guchar *target)
3729 arm_patch_general (NULL, code, target, NULL);
3733 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3734 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3735 * to be used with the emit macros.
3736 * Return -1 otherwise.
3739 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3742 for (i = 0; i < 31; i+= 2) {
3743 res = (val << (32 - i)) | (val >> i);
3746 *rot_amount = i? 32 - i: 0;
3753 * Emits in code a sequence of instructions that load the value 'val'
3754 * into the dreg register. Uses at most 4 instructions.
3757 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3759 int imm8, rot_amount;
3761 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3762 /* skip the constant pool */
3768 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3769 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3770 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3771 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3774 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3776 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3780 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3782 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3784 if (val & 0xFF0000) {
3785 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3787 if (val & 0xFF000000) {
3788 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3790 } else if (val & 0xFF00) {
3791 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3792 if (val & 0xFF0000) {
3793 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3795 if (val & 0xFF000000) {
3796 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3798 } else if (val & 0xFF0000) {
3799 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3800 if (val & 0xFF000000) {
3801 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3804 //g_assert_not_reached ();
3810 mono_arm_thumb_supported (void)
3812 return thumb_supported;
3818 * emit_load_volatile_arguments:
3820 * Load volatile arguments from the stack to the original input registers.
3821 * Required before a tail call.
3824 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3826 MonoMethod *method = cfg->method;
3827 MonoMethodSignature *sig;
3832 /* FIXME: Generate intermediate code instead */
3834 sig = mono_method_signature (method);
3836 /* This is the opposite of the code in emit_prolog */
3840 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3842 if (cinfo->vtype_retaddr) {
3843 ArgInfo *ainfo = &cinfo->ret;
3844 inst = cfg->vret_addr;
3845 g_assert (arm_is_imm12 (inst->inst_offset));
3846 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3848 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3849 ArgInfo *ainfo = cinfo->args + i;
3850 inst = cfg->args [pos];
3852 if (cfg->verbose_level > 2)
3853 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3854 if (inst->opcode == OP_REGVAR) {
3855 if (ainfo->storage == RegTypeGeneral)
3856 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3857 else if (ainfo->storage == RegTypeFP) {
3858 g_assert_not_reached ();
3859 } else if (ainfo->storage == RegTypeBase) {
3863 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3864 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3866 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3867 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3871 g_assert_not_reached ();
3873 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3874 switch (ainfo->size) {
3881 g_assert (arm_is_imm12 (inst->inst_offset));
3882 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3883 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3884 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3887 if (arm_is_imm12 (inst->inst_offset)) {
3888 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3890 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3891 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3895 } else if (ainfo->storage == RegTypeBaseGen) {
3898 } else if (ainfo->storage == RegTypeBase) {
3900 } else if (ainfo->storage == RegTypeFP) {
3901 g_assert_not_reached ();
3902 } else if (ainfo->storage == RegTypeStructByVal) {
3903 int doffset = inst->inst_offset;
3907 if (mono_class_from_mono_type (inst->inst_vtype))
3908 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3909 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3910 if (arm_is_imm12 (doffset)) {
3911 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3913 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3914 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3916 soffset += sizeof (gpointer);
3917 doffset += sizeof (gpointer);
3922 } else if (ainfo->storage == RegTypeStructByAddr) {
3937 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3942 guint8 *code = cfg->native_code + cfg->code_len;
3943 MonoInst *last_ins = NULL;
3944 guint last_offset = 0;
3946 int imm8, rot_amount;
3948 /* we don't align basic blocks of loops on arm */
3950 if (cfg->verbose_level > 2)
3951 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3953 cpos = bb->max_offset;
3955 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3956 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3957 //g_assert (!mono_compile_aot);
3960 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3961 /* this is not thread save, but good enough */
3962 /* fixme: howto handle overflows? */
3963 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3966 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3967 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3968 (gpointer)"mono_break");
3969 code = emit_call_seq (cfg, code);
3972 MONO_BB_FOR_EACH_INS (bb, ins) {
3973 offset = code - cfg->native_code;
3975 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3977 if (offset > (cfg->code_size - max_len - 16)) {
3978 cfg->code_size *= 2;
3979 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3980 code = cfg->native_code + offset;
3982 // if (ins->cil_code)
3983 // g_print ("cil code\n");
3984 mono_debug_record_line_number (cfg, ins, offset);
3986 switch (ins->opcode) {
3987 case OP_MEMORY_BARRIER:
3989 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
3990 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
3994 #ifdef HAVE_AEABI_READ_TP
3995 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3996 (gpointer)"__aeabi_read_tp");
3997 code = emit_call_seq (cfg, code);
3999 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4001 g_assert_not_reached ();
4005 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4006 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4009 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4010 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4012 case OP_STOREI1_MEMBASE_IMM:
4013 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4014 g_assert (arm_is_imm12 (ins->inst_offset));
4015 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4017 case OP_STOREI2_MEMBASE_IMM:
4018 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4019 g_assert (arm_is_imm8 (ins->inst_offset));
4020 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4022 case OP_STORE_MEMBASE_IMM:
4023 case OP_STOREI4_MEMBASE_IMM:
4024 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4025 g_assert (arm_is_imm12 (ins->inst_offset));
4026 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4028 case OP_STOREI1_MEMBASE_REG:
4029 g_assert (arm_is_imm12 (ins->inst_offset));
4030 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4032 case OP_STOREI2_MEMBASE_REG:
4033 g_assert (arm_is_imm8 (ins->inst_offset));
4034 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4036 case OP_STORE_MEMBASE_REG:
4037 case OP_STOREI4_MEMBASE_REG:
4038 /* this case is special, since it happens for spill code after lowering has been called */
4039 if (arm_is_imm12 (ins->inst_offset)) {
4040 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4042 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4043 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4046 case OP_STOREI1_MEMINDEX:
4047 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4049 case OP_STOREI2_MEMINDEX:
4050 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4052 case OP_STORE_MEMINDEX:
4053 case OP_STOREI4_MEMINDEX:
4054 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4057 g_assert_not_reached ();
4059 case OP_LOAD_MEMINDEX:
4060 case OP_LOADI4_MEMINDEX:
4061 case OP_LOADU4_MEMINDEX:
4062 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4064 case OP_LOADI1_MEMINDEX:
4065 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4067 case OP_LOADU1_MEMINDEX:
4068 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4070 case OP_LOADI2_MEMINDEX:
4071 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4073 case OP_LOADU2_MEMINDEX:
4074 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4076 case OP_LOAD_MEMBASE:
4077 case OP_LOADI4_MEMBASE:
4078 case OP_LOADU4_MEMBASE:
4079 /* this case is special, since it happens for spill code after lowering has been called */
4080 if (arm_is_imm12 (ins->inst_offset)) {
4081 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4083 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4084 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4087 case OP_LOADI1_MEMBASE:
4088 g_assert (arm_is_imm8 (ins->inst_offset));
4089 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4091 case OP_LOADU1_MEMBASE:
4092 g_assert (arm_is_imm12 (ins->inst_offset));
4093 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4095 case OP_LOADU2_MEMBASE:
4096 g_assert (arm_is_imm8 (ins->inst_offset));
4097 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4099 case OP_LOADI2_MEMBASE:
4100 g_assert (arm_is_imm8 (ins->inst_offset));
4101 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4103 case OP_ICONV_TO_I1:
4104 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4105 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4107 case OP_ICONV_TO_I2:
4108 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4109 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4111 case OP_ICONV_TO_U1:
4112 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4114 case OP_ICONV_TO_U2:
4115 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4116 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4120 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4122 case OP_COMPARE_IMM:
4123 case OP_ICOMPARE_IMM:
4124 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4125 g_assert (imm8 >= 0);
4126 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4130 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4131 * So instead of emitting a trap, we emit a call a C function and place a
4134 //*(int*)code = 0xef9f0001;
4137 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4138 (gpointer)"mono_break");
4139 code = emit_call_seq (cfg, code);
4141 case OP_RELAXED_NOP:
4146 case OP_DUMMY_STORE:
4147 case OP_NOT_REACHED:
4150 case OP_SEQ_POINT: {
4152 MonoInst *info_var = cfg->arch.seq_point_info_var;
4153 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4154 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4155 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4156 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4158 int dreg = ARMREG_LR;
4160 if (cfg->soft_breakpoints) {
4161 g_assert (!cfg->compile_aot);
4165 * For AOT, we use one got slot per method, which will point to a
4166 * SeqPointInfo structure, containing all the information required
4167 * by the code below.
4169 if (cfg->compile_aot) {
4170 g_assert (info_var);
4171 g_assert (info_var->opcode == OP_REGOFFSET);
4172 g_assert (arm_is_imm12 (info_var->inst_offset));
4175 if (!cfg->soft_breakpoints) {
4177 * Read from the single stepping trigger page. This will cause a
4178 * SIGSEGV when single stepping is enabled.
4179 * We do this _before_ the breakpoint, so single stepping after
4180 * a breakpoint is hit will step to the next IL offset.
4182 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4185 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4186 if (cfg->soft_breakpoints) {
4187 /* Load the address of the sequence point trigger variable. */
4190 g_assert (var->opcode == OP_REGOFFSET);
4191 g_assert (arm_is_imm12 (var->inst_offset));
4192 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4194 /* Read the value and check whether it is non-zero. */
4195 ARM_LDR_IMM (code, dreg, dreg, 0);
4196 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4198 /* Load the address of the sequence point method. */
4199 var = ss_method_var;
4201 g_assert (var->opcode == OP_REGOFFSET);
4202 g_assert (arm_is_imm12 (var->inst_offset));
4203 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4205 /* Call it conditionally. */
4206 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4208 if (cfg->compile_aot) {
4209 /* Load the trigger page addr from the variable initialized in the prolog */
4210 var = ss_trigger_page_var;
4212 g_assert (var->opcode == OP_REGOFFSET);
4213 g_assert (arm_is_imm12 (var->inst_offset));
4214 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4216 #ifdef USE_JUMP_TABLES
4217 gpointer *jte = mono_jumptable_add_entry ();
4218 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4219 jte [0] = ss_trigger_page;
4221 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4223 *(int*)code = (int)ss_trigger_page;
4227 ARM_LDR_IMM (code, dreg, dreg, 0);
4231 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4233 if (cfg->soft_breakpoints) {
4234 /* Load the address of the breakpoint method into ip. */
4235 var = bp_method_var;
4237 g_assert (var->opcode == OP_REGOFFSET);
4238 g_assert (arm_is_imm12 (var->inst_offset));
4239 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4242 * A placeholder for a possible breakpoint inserted by
4243 * mono_arch_set_breakpoint ().
4246 } else if (cfg->compile_aot) {
4247 guint32 offset = code - cfg->native_code;
4250 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4251 /* Add the offset */
4252 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4253 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4254 if (arm_is_imm12 ((int)val)) {
4255 ARM_LDR_IMM (code, dreg, dreg, val);
4257 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4259 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4261 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4262 g_assert (!(val & 0xFF000000));
4264 ARM_LDR_IMM (code, dreg, dreg, 0);
4266 /* What is faster, a branch or a load ? */
4267 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4268 /* The breakpoint instruction */
4269 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4272 * A placeholder for a possible breakpoint inserted by
4273 * mono_arch_set_breakpoint ().
4275 for (i = 0; i < 4; ++i)
4282 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4285 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4289 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4292 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4293 g_assert (imm8 >= 0);
4294 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4298 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4299 g_assert (imm8 >= 0);
4300 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4304 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4305 g_assert (imm8 >= 0);
4306 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4309 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4310 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4312 case OP_IADD_OVF_UN:
4313 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4314 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4317 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4318 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4320 case OP_ISUB_OVF_UN:
4321 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4322 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4324 case OP_ADD_OVF_CARRY:
4325 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4326 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4328 case OP_ADD_OVF_UN_CARRY:
4329 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4330 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4332 case OP_SUB_OVF_CARRY:
4333 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4334 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4336 case OP_SUB_OVF_UN_CARRY:
4337 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4338 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4342 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4345 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4346 g_assert (imm8 >= 0);
4347 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4350 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4354 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4358 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4359 g_assert (imm8 >= 0);
4360 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4364 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4365 g_assert (imm8 >= 0);
4366 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4368 case OP_ARM_RSBS_IMM:
4369 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4370 g_assert (imm8 >= 0);
4371 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4373 case OP_ARM_RSC_IMM:
4374 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4375 g_assert (imm8 >= 0);
4376 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4379 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4383 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4384 g_assert (imm8 >= 0);
4385 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4388 g_assert (v7s_supported);
4389 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4392 g_assert (v7s_supported);
4393 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4396 g_assert (v7s_supported);
4397 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4398 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4401 g_assert (v7s_supported);
4402 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4403 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4407 g_assert_not_reached ();
4409 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4413 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4414 g_assert (imm8 >= 0);
4415 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4418 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4422 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4423 g_assert (imm8 >= 0);
4424 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4427 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4432 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4433 else if (ins->dreg != ins->sreg1)
4434 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4437 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4442 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4443 else if (ins->dreg != ins->sreg1)
4444 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4447 case OP_ISHR_UN_IMM:
4449 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4450 else if (ins->dreg != ins->sreg1)
4451 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4454 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4457 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4460 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4463 if (ins->dreg == ins->sreg2)
4464 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4466 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4469 g_assert_not_reached ();
4472 /* FIXME: handle ovf/ sreg2 != dreg */
4473 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4474 /* FIXME: MUL doesn't set the C/O flags on ARM */
4476 case OP_IMUL_OVF_UN:
4477 /* FIXME: handle ovf/ sreg2 != dreg */
4478 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4479 /* FIXME: MUL doesn't set the C/O flags on ARM */
4482 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4485 /* Load the GOT offset */
4486 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4487 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4489 *(gpointer*)code = NULL;
4491 /* Load the value from the GOT */
4492 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4494 case OP_OBJC_GET_SELECTOR:
4495 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4496 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4498 *(gpointer*)code = NULL;
4500 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4502 case OP_ICONV_TO_I4:
4503 case OP_ICONV_TO_U4:
4505 if (ins->dreg != ins->sreg1)
4506 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4509 int saved = ins->sreg2;
4510 if (ins->sreg2 == ARM_LSW_REG) {
4511 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4514 if (ins->sreg1 != ARM_LSW_REG)
4515 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4516 if (saved != ARM_MSW_REG)
4517 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4522 ARM_CPYD (code, ins->dreg, ins->sreg1);
4524 case OP_FCONV_TO_R4:
4526 ARM_CVTD (code, ins->dreg, ins->sreg1);
4527 ARM_CVTS (code, ins->dreg, ins->dreg);
4532 * Keep in sync with mono_arch_emit_epilog
4534 g_assert (!cfg->method->save_lmf);
4536 code = emit_load_volatile_arguments (cfg, code);
4538 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4540 if (cfg->used_int_regs)
4541 ARM_POP (code, cfg->used_int_regs);
4542 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4544 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4546 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4547 if (cfg->compile_aot) {
4548 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4550 *(gpointer*)code = NULL;
4552 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4554 code = mono_arm_patchable_b (code, ARMCOND_AL);
4558 /* ensure ins->sreg1 is not NULL */
4559 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4562 g_assert (cfg->sig_cookie < 128);
4563 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4564 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4573 call = (MonoCallInst*)ins;
4576 code = emit_float_args (cfg, call, code, &max_len, &offset);
4578 if (ins->flags & MONO_INST_HAS_METHOD)
4579 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4581 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4582 code = emit_call_seq (cfg, code);
4583 ins->flags |= MONO_INST_GC_CALLSITE;
4584 ins->backend.pc_offset = code - cfg->native_code;
4585 code = emit_move_return_value (cfg, ins, code);
4591 case OP_VOIDCALL_REG:
4594 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4596 code = emit_call_reg (code, ins->sreg1);
4597 ins->flags |= MONO_INST_GC_CALLSITE;
4598 ins->backend.pc_offset = code - cfg->native_code;
4599 code = emit_move_return_value (cfg, ins, code);
4601 case OP_FCALL_MEMBASE:
4602 case OP_LCALL_MEMBASE:
4603 case OP_VCALL_MEMBASE:
4604 case OP_VCALL2_MEMBASE:
4605 case OP_VOIDCALL_MEMBASE:
4606 case OP_CALL_MEMBASE: {
4607 gboolean imt_arg = FALSE;
4609 g_assert (ins->sreg1 != ARMREG_LR);
4610 call = (MonoCallInst*)ins;
4613 code = emit_float_args (cfg, call, code, &max_len, &offset);
4615 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4617 if (!arm_is_imm12 (ins->inst_offset))
4618 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4619 #ifdef USE_JUMP_TABLES
4625 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4627 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4629 if (!arm_is_imm12 (ins->inst_offset))
4630 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4632 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4635 * We can't embed the method in the code stream in PIC code, or
4637 * Instead, we put it in V5 in code emitted by
4638 * mono_arch_emit_imt_argument (), and embed NULL here to
4639 * signal the IMT thunk that the value is in V5.
4641 #ifdef USE_JUMP_TABLES
4642 /* In case of jumptables we always use value in V5. */
4645 if (call->dynamic_imt_arg)
4646 *((gpointer*)code) = NULL;
4648 *((gpointer*)code) = (gpointer)call->method;
4652 ins->flags |= MONO_INST_GC_CALLSITE;
4653 ins->backend.pc_offset = code - cfg->native_code;
4654 code = emit_move_return_value (cfg, ins, code);
4658 /* keep alignment */
4659 int alloca_waste = cfg->param_area;
4662 /* round the size to 8 bytes */
4663 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4664 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4666 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4667 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4668 /* memzero the area: dreg holds the size, sp is the pointer */
4669 if (ins->flags & MONO_INST_INIT) {
4670 guint8 *start_loop, *branch_to_cond;
4671 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4672 branch_to_cond = code;
4675 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4676 arm_patch (branch_to_cond, code);
4677 /* decrement by 4 and set flags */
4678 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4679 ARM_B_COND (code, ARMCOND_GE, 0);
4680 arm_patch (code - 4, start_loop);
4682 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4687 MonoInst *var = cfg->dyn_call_var;
4689 g_assert (var->opcode == OP_REGOFFSET);
4690 g_assert (arm_is_imm12 (var->inst_offset));
4692 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4693 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4695 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4697 /* Save args buffer */
4698 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4700 /* Set stack slots using R0 as scratch reg */
4701 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4702 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4703 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4704 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4707 /* Set argument registers */
4708 for (i = 0; i < PARAM_REGS; ++i)
4709 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4712 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4713 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4716 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4717 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4718 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4722 if (ins->sreg1 != ARMREG_R0)
4723 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4724 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4725 (gpointer)"mono_arch_throw_exception");
4726 code = emit_call_seq (cfg, code);
4730 if (ins->sreg1 != ARMREG_R0)
4731 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4732 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4733 (gpointer)"mono_arch_rethrow_exception");
4734 code = emit_call_seq (cfg, code);
4737 case OP_START_HANDLER: {
4738 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4741 /* Reserve a param area, see filter-stack.exe */
4742 if (cfg->param_area) {
4743 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4744 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4746 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4747 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4751 if (arm_is_imm12 (spvar->inst_offset)) {
4752 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
4754 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4755 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
4759 case OP_ENDFILTER: {
4760 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4763 /* Free the param area */
4764 if (cfg->param_area) {
4765 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4766 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4768 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4769 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4773 if (ins->sreg1 != ARMREG_R0)
4774 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4775 if (arm_is_imm12 (spvar->inst_offset)) {
4776 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4778 g_assert (ARMREG_IP != spvar->inst_basereg);
4779 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4780 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4782 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4785 case OP_ENDFINALLY: {
4786 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4789 /* Free the param area */
4790 if (cfg->param_area) {
4791 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4792 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4794 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4795 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4799 if (arm_is_imm12 (spvar->inst_offset)) {
4800 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4802 g_assert (ARMREG_IP != spvar->inst_basereg);
4803 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4804 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4806 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4809 case OP_CALL_HANDLER:
4810 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4811 code = mono_arm_patchable_bl (code, ARMCOND_AL);
4812 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4815 ins->inst_c0 = code - cfg->native_code;
4818 /*if (ins->inst_target_bb->native_offset) {
4820 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4822 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4823 code = mono_arm_patchable_b (code, ARMCOND_AL);
4827 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
4831 * In the normal case we have:
4832 * ldr pc, [pc, ins->sreg1 << 2]
4835 * ldr lr, [pc, ins->sreg1 << 2]
4837 * After follows the data.
4838 * FIXME: add aot support.
4840 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
4841 #ifdef USE_JUMP_TABLES
4843 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
4844 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4845 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
4849 max_len += 4 * GPOINTER_TO_INT (ins->klass);
4850 if (offset + max_len > (cfg->code_size - 16)) {
4851 cfg->code_size += max_len;
4852 cfg->code_size *= 2;
4853 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4854 code = cfg->native_code + offset;
4856 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
4858 code += 4 * GPOINTER_TO_INT (ins->klass);
4863 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4864 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4868 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4869 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
4873 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4874 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
4878 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4879 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
4883 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4884 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
4886 case OP_COND_EXC_EQ:
4887 case OP_COND_EXC_NE_UN:
4888 case OP_COND_EXC_LT:
4889 case OP_COND_EXC_LT_UN:
4890 case OP_COND_EXC_GT:
4891 case OP_COND_EXC_GT_UN:
4892 case OP_COND_EXC_GE:
4893 case OP_COND_EXC_GE_UN:
4894 case OP_COND_EXC_LE:
4895 case OP_COND_EXC_LE_UN:
4896 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4898 case OP_COND_EXC_IEQ:
4899 case OP_COND_EXC_INE_UN:
4900 case OP_COND_EXC_ILT:
4901 case OP_COND_EXC_ILT_UN:
4902 case OP_COND_EXC_IGT:
4903 case OP_COND_EXC_IGT_UN:
4904 case OP_COND_EXC_IGE:
4905 case OP_COND_EXC_IGE_UN:
4906 case OP_COND_EXC_ILE:
4907 case OP_COND_EXC_ILE_UN:
4908 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4911 case OP_COND_EXC_IC:
4912 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
4914 case OP_COND_EXC_OV:
4915 case OP_COND_EXC_IOV:
4916 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
4918 case OP_COND_EXC_NC:
4919 case OP_COND_EXC_INC:
4920 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
4922 case OP_COND_EXC_NO:
4923 case OP_COND_EXC_INO:
4924 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
4936 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4939 /* floating point opcodes */
4941 if (cfg->compile_aot) {
4942 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4944 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4946 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4949 /* FIXME: we can optimize the imm load by dealing with part of
4950 * the displacement in LDFD (aligning to 512).
4952 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4953 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4957 if (cfg->compile_aot) {
4958 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4960 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4962 ARM_CVTS (code, ins->dreg, ins->dreg);
4964 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4965 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4966 ARM_CVTS (code, ins->dreg, ins->dreg);
4969 case OP_STORER8_MEMBASE_REG:
4970 /* This is generated by the local regalloc pass which runs after the lowering pass */
4971 if (!arm_is_fpimm8 (ins->inst_offset)) {
4972 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4973 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4974 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4976 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4979 case OP_LOADR8_MEMBASE:
4980 /* This is generated by the local regalloc pass which runs after the lowering pass */
4981 if (!arm_is_fpimm8 (ins->inst_offset)) {
4982 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4983 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4984 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4986 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4989 case OP_STORER4_MEMBASE_REG:
4990 g_assert (arm_is_fpimm8 (ins->inst_offset));
4991 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
4992 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
4994 case OP_LOADR4_MEMBASE:
4995 g_assert (arm_is_fpimm8 (ins->inst_offset));
4996 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
4997 ARM_CVTS (code, ins->dreg, vfp_scratch1);
4999 case OP_ICONV_TO_R_UN: {
5000 g_assert_not_reached ();
5003 case OP_ICONV_TO_R4:
5004 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5005 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5006 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5008 case OP_ICONV_TO_R8:
5009 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5010 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5014 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
5015 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5017 if (!IS_HARD_FLOAT) {
5018 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5021 if (IS_HARD_FLOAT) {
5022 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5024 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5028 case OP_FCONV_TO_I1:
5029 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5031 case OP_FCONV_TO_U1:
5032 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5034 case OP_FCONV_TO_I2:
5035 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5037 case OP_FCONV_TO_U2:
5038 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5040 case OP_FCONV_TO_I4:
5042 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5044 case OP_FCONV_TO_U4:
5046 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5048 case OP_FCONV_TO_I8:
5049 case OP_FCONV_TO_U8:
5050 g_assert_not_reached ();
5051 /* Implemented as helper calls */
5053 case OP_LCONV_TO_R_UN:
5054 g_assert_not_reached ();
5055 /* Implemented as helper calls */
5057 case OP_LCONV_TO_OVF_I4_2: {
5058 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5060 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5063 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5064 high_bit_not_set = code;
5065 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5067 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5068 valid_negative = code;
5069 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5070 invalid_negative = code;
5071 ARM_B_COND (code, ARMCOND_AL, 0);
5073 arm_patch (high_bit_not_set, code);
5075 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5076 valid_positive = code;
5077 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5079 arm_patch (invalid_negative, code);
5080 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5082 arm_patch (valid_negative, code);
5083 arm_patch (valid_positive, code);
5085 if (ins->dreg != ins->sreg1)
5086 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5090 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5093 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5096 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5099 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5102 ARM_NEGD (code, ins->dreg, ins->sreg1);
5106 g_assert_not_reached ();
5110 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5116 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5119 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5120 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5124 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5127 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5128 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5132 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5135 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5136 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5137 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5141 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5144 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5145 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5149 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5152 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5153 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5154 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5156 /* ARM FPA flags table:
5157 * N Less than ARMCOND_MI
5158 * Z Equal ARMCOND_EQ
5159 * C Greater Than or Equal ARMCOND_CS
5160 * V Unordered ARMCOND_VS
5163 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5166 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5169 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5172 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5173 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5179 g_assert_not_reached ();
5183 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5185 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5186 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5187 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5191 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5192 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5197 #ifdef USE_JUMP_TABLES
5199 gpointer *jte = mono_jumptable_add_entries (2);
5200 jte [0] = GUINT_TO_POINTER (0xffffffff);
5201 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5202 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5203 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5206 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5207 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5209 *(guint32*)code = 0xffffffff;
5211 *(guint32*)code = 0x7fefffff;
5214 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5216 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5217 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5219 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5220 ARM_CPYD (code, ins->dreg, ins->sreg1);
5225 case OP_GC_LIVENESS_DEF:
5226 case OP_GC_LIVENESS_USE:
5227 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5228 ins->backend.pc_offset = code - cfg->native_code;
5230 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5231 ins->backend.pc_offset = code - cfg->native_code;
5232 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5236 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5237 g_assert_not_reached ();
5240 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5241 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5242 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5243 g_assert_not_reached ();
5249 last_offset = offset;
5252 cfg->code_len = code - cfg->native_code;
5255 #endif /* DISABLE_JIT */
5257 #ifdef HAVE_AEABI_READ_TP
5258 void __aeabi_read_tp (void);
5262 mono_arch_register_lowlevel_calls (void)
5264 /* The signature doesn't matter */
5265 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5266 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5268 #ifndef MONO_CROSS_COMPILE
5269 #ifdef HAVE_AEABI_READ_TP
5270 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5275 #define patch_lis_ori(ip,val) do {\
5276 guint16 *__lis_ori = (guint16*)(ip); \
5277 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5278 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5282 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5284 MonoJumpInfo *patch_info;
5285 gboolean compile_aot = !run_cctors;
5287 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5288 unsigned char *ip = patch_info->ip.i + code;
5289 const unsigned char *target;
5291 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5292 #ifdef USE_JUMP_TABLES
5293 gpointer *jt = mono_jumptable_get_entry (ip);
5295 gpointer *jt = (gpointer*)(ip + 8);
5298 /* jt is the inlined jump table, 2 instructions after ip
5299 * In the normal case we store the absolute addresses,
5300 * otherwise the displacements.
5302 for (i = 0; i < patch_info->data.table->table_size; i++)
5303 jt [i] = code + (int)patch_info->data.table->table [i];
5308 switch (patch_info->type) {
5309 case MONO_PATCH_INFO_BB:
5310 case MONO_PATCH_INFO_LABEL:
5313 /* No need to patch these */
5318 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5320 switch (patch_info->type) {
5321 case MONO_PATCH_INFO_IP:
5322 g_assert_not_reached ();
5323 patch_lis_ori (ip, ip);
5325 case MONO_PATCH_INFO_METHOD_REL:
5326 g_assert_not_reached ();
5327 *((gpointer *)(ip)) = code + patch_info->data.offset;
5329 case MONO_PATCH_INFO_METHODCONST:
5330 case MONO_PATCH_INFO_CLASS:
5331 case MONO_PATCH_INFO_IMAGE:
5332 case MONO_PATCH_INFO_FIELD:
5333 case MONO_PATCH_INFO_VTABLE:
5334 case MONO_PATCH_INFO_IID:
5335 case MONO_PATCH_INFO_SFLDA:
5336 case MONO_PATCH_INFO_LDSTR:
5337 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5338 case MONO_PATCH_INFO_LDTOKEN:
5339 g_assert_not_reached ();
5340 /* from OP_AOTCONST : lis + ori */
5341 patch_lis_ori (ip, target);
5343 case MONO_PATCH_INFO_R4:
5344 case MONO_PATCH_INFO_R8:
5345 g_assert_not_reached ();
5346 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5348 case MONO_PATCH_INFO_EXC_NAME:
5349 g_assert_not_reached ();
5350 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5352 case MONO_PATCH_INFO_NONE:
5353 case MONO_PATCH_INFO_BB_OVF:
5354 case MONO_PATCH_INFO_EXC_OVF:
5355 /* everything is dealt with at epilog output time */
5360 arm_patch_general (domain, ip, target, dyn_code_mp);
5367 * Stack frame layout:
5369 * ------------------- fp
5370 * MonoLMF structure or saved registers
5371 * -------------------
5373 * -------------------
5375 * -------------------
5376 * optional 8 bytes for tracing
5377 * -------------------
5378 * param area size is cfg->param_area
5379 * ------------------- sp
5382 mono_arch_emit_prolog (MonoCompile *cfg)
5384 MonoMethod *method = cfg->method;
5386 MonoMethodSignature *sig;
5388 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5393 int prev_sp_offset, reg_offset;
5395 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5398 sig = mono_method_signature (method);
5399 cfg->code_size = 256 + sig->param_count * 64;
5400 code = cfg->native_code = g_malloc (cfg->code_size);
5402 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5404 alloc_size = cfg->stack_offset;
5410 * The iphone uses R7 as the frame pointer, and it points at the saved
5415 * We can't use r7 as a frame pointer since it points into the middle of
5416 * the frame, so we keep using our own frame pointer.
5417 * FIXME: Optimize this.
5419 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5420 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5421 prev_sp_offset += 8; /* r7 and lr */
5422 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5423 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5426 if (!method->save_lmf) {
5428 /* No need to push LR again */
5429 if (cfg->used_int_regs)
5430 ARM_PUSH (code, cfg->used_int_regs);
5432 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5433 prev_sp_offset += 4;
5435 for (i = 0; i < 16; ++i) {
5436 if (cfg->used_int_regs & (1 << i))
5437 prev_sp_offset += 4;
5439 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5441 for (i = 0; i < 16; ++i) {
5442 if ((cfg->used_int_regs & (1 << i))) {
5443 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5444 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5449 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5450 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5452 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5453 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5456 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5457 ARM_PUSH (code, 0x5ff0);
5458 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5459 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5461 for (i = 0; i < 16; ++i) {
5462 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5463 /* The original r7 is saved at the start */
5464 if (!(iphone_abi && i == ARMREG_R7))
5465 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5469 g_assert (reg_offset == 4 * 10);
5470 pos += sizeof (MonoLMF) - (4 * 10);
5474 orig_alloc_size = alloc_size;
5475 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5476 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5477 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5478 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5481 /* the stack used in the pushed regs */
5482 if (prev_sp_offset & 4)
5484 cfg->stack_usage = alloc_size;
5486 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5487 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5489 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5490 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5492 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5494 if (cfg->frame_reg != ARMREG_SP) {
5495 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5496 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5498 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5499 prev_sp_offset += alloc_size;
5501 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5502 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5504 /* compute max_offset in order to use short forward jumps
5505 * we could skip do it on arm because the immediate displacement
5506 * for jumps is large enough, it may be useful later for constant pools
5509 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5510 MonoInst *ins = bb->code;
5511 bb->max_offset = max_offset;
5513 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5516 MONO_BB_FOR_EACH_INS (bb, ins)
5517 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5520 /* store runtime generic context */
5521 if (cfg->rgctx_var) {
5522 MonoInst *ins = cfg->rgctx_var;
5524 g_assert (ins->opcode == OP_REGOFFSET);
5526 if (arm_is_imm12 (ins->inst_offset)) {
5527 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5529 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5530 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5534 /* load arguments allocated to register from the stack */
5537 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5539 if (cinfo->vtype_retaddr) {
5540 ArgInfo *ainfo = &cinfo->ret;
5541 inst = cfg->vret_addr;
5542 g_assert (arm_is_imm12 (inst->inst_offset));
5543 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5546 if (sig->call_convention == MONO_CALL_VARARG) {
5547 ArgInfo *cookie = &cinfo->sig_cookie;
5549 /* Save the sig cookie address */
5550 g_assert (cookie->storage == RegTypeBase);
5552 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5553 g_assert (arm_is_imm12 (cfg->sig_cookie));
5554 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5555 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5558 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5559 ArgInfo *ainfo = cinfo->args + i;
5560 inst = cfg->args [pos];
5562 if (cfg->verbose_level > 2)
5563 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5564 if (inst->opcode == OP_REGVAR) {
5565 if (ainfo->storage == RegTypeGeneral)
5566 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5567 else if (ainfo->storage == RegTypeFP) {
5568 g_assert_not_reached ();
5569 } else if (ainfo->storage == RegTypeBase) {
5570 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5571 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5573 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5574 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5577 g_assert_not_reached ();
5579 if (cfg->verbose_level > 2)
5580 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5582 /* the argument should be put on the stack: FIXME handle size != word */
5583 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5584 switch (ainfo->size) {
5586 if (arm_is_imm12 (inst->inst_offset))
5587 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5589 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5590 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5594 if (arm_is_imm8 (inst->inst_offset)) {
5595 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5597 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5598 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5602 if (arm_is_imm12 (inst->inst_offset)) {
5603 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5605 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5606 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5608 if (arm_is_imm12 (inst->inst_offset + 4)) {
5609 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5611 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5612 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5616 if (arm_is_imm12 (inst->inst_offset)) {
5617 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5619 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5620 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5624 } else if (ainfo->storage == RegTypeBaseGen) {
5625 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
5626 g_assert (arm_is_imm12 (inst->inst_offset));
5627 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5628 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5629 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5630 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5631 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5632 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5634 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5635 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5638 switch (ainfo->size) {
5640 if (arm_is_imm8 (inst->inst_offset)) {
5641 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5643 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5644 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5648 if (arm_is_imm8 (inst->inst_offset)) {
5649 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5651 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5652 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5656 if (arm_is_imm12 (inst->inst_offset)) {
5657 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5659 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5660 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5662 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5663 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5665 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5666 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5668 if (arm_is_imm12 (inst->inst_offset + 4)) {
5669 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5671 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5672 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5676 if (arm_is_imm12 (inst->inst_offset)) {
5677 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5679 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5680 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5684 } else if (ainfo->storage == RegTypeFP) {
5685 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5686 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
5688 if (ainfo->size == 8)
5689 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
5691 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
5692 } else if (ainfo->storage == RegTypeStructByVal) {
5693 int doffset = inst->inst_offset;
5697 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5698 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5699 if (arm_is_imm12 (doffset)) {
5700 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5702 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5703 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5705 soffset += sizeof (gpointer);
5706 doffset += sizeof (gpointer);
5708 if (ainfo->vtsize) {
5709 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5710 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
5711 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
5713 } else if (ainfo->storage == RegTypeStructByAddr) {
5714 g_assert_not_reached ();
5715 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5716 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
5718 g_assert_not_reached ();
5723 if (method->save_lmf)
5724 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
5727 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5729 if (cfg->arch.seq_point_info_var) {
5730 MonoInst *ins = cfg->arch.seq_point_info_var;
5732 /* Initialize the variable from a GOT slot */
5733 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
5734 #ifdef USE_JUMP_TABLES
5736 gpointer *jte = mono_jumptable_add_entry ();
5737 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
5738 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
5740 /** XXX: is it correct? */
5742 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5744 *(gpointer*)code = NULL;
5747 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
5749 g_assert (ins->opcode == OP_REGOFFSET);
5751 if (arm_is_imm12 (ins->inst_offset)) {
5752 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5754 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5755 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5759 /* Initialize ss_trigger_page_var */
5760 if (!cfg->soft_breakpoints) {
5761 MonoInst *info_var = cfg->arch.seq_point_info_var;
5762 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
5763 int dreg = ARMREG_LR;
5766 g_assert (info_var->opcode == OP_REGOFFSET);
5767 g_assert (arm_is_imm12 (info_var->inst_offset));
5769 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
5770 /* Load the trigger page addr */
5771 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
5772 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
5776 if (cfg->arch.seq_point_read_var) {
5777 MonoInst *read_ins = cfg->arch.seq_point_read_var;
5778 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
5779 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
5780 #ifdef USE_JUMP_TABLES
5783 g_assert (read_ins->opcode == OP_REGOFFSET);
5784 g_assert (arm_is_imm12 (read_ins->inst_offset));
5785 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
5786 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
5787 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
5788 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
5790 #ifdef USE_JUMP_TABLES
5791 jte = mono_jumptable_add_entries (3);
5792 jte [0] = (gpointer)&ss_trigger_var;
5793 jte [1] = single_step_func_wrapper;
5794 jte [2] = breakpoint_func_wrapper;
5795 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
5797 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5799 *(volatile int **)code = &ss_trigger_var;
5801 *(gpointer*)code = single_step_func_wrapper;
5803 *(gpointer*)code = breakpoint_func_wrapper;
5807 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
5808 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
5809 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
5810 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
5811 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
5812 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
5815 cfg->code_len = code - cfg->native_code;
5816 g_assert (cfg->code_len < cfg->code_size);
5823 mono_arch_emit_epilog (MonoCompile *cfg)
5825 MonoMethod *method = cfg->method;
5826 int pos, i, rot_amount;
5827 int max_epilog_size = 16 + 20*4;
5831 if (cfg->method->save_lmf)
5832 max_epilog_size += 128;
5834 if (mono_jit_trace_calls != NULL)
5835 max_epilog_size += 50;
5837 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5838 max_epilog_size += 50;
5840 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5841 cfg->code_size *= 2;
5842 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5843 cfg->stat_code_reallocs++;
5847 * Keep in sync with OP_JMP
5849 code = cfg->native_code + cfg->code_len;
5851 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5852 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5856 /* Load returned vtypes into registers if needed */
5857 cinfo = cfg->arch.cinfo;
5858 if (cinfo->ret.storage == RegTypeStructByVal) {
5859 MonoInst *ins = cfg->ret;
5861 if (arm_is_imm12 (ins->inst_offset)) {
5862 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5864 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5865 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5869 if (method->save_lmf) {
5870 int lmf_offset, reg, sp_adj, regmask;
5871 /* all but r0-r3, sp and pc */
5872 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5875 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
5877 /* This points to r4 inside MonoLMF->iregs */
5878 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5880 regmask = 0x9ff0; /* restore lr to pc */
5881 /* Skip caller saved registers not used by the method */
5882 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
5883 regmask &= ~(1 << reg);
5888 /* Restored later */
5889 regmask &= ~(1 << ARMREG_PC);
5890 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
5891 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
5893 ARM_POP (code, regmask);
5895 /* Restore saved r7, restore LR to PC */
5896 /* Skip lr from the lmf */
5897 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
5898 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5901 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
5902 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
5904 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
5905 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
5909 /* Restore saved gregs */
5910 if (cfg->used_int_regs)
5911 ARM_POP (code, cfg->used_int_regs);
5912 /* Restore saved r7, restore LR to PC */
5913 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5915 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
5919 cfg->code_len = code - cfg->native_code;
5921 g_assert (cfg->code_len < cfg->code_size);
5926 mono_arch_emit_exceptions (MonoCompile *cfg)
5928 MonoJumpInfo *patch_info;
5931 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5932 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5933 int max_epilog_size = 50;
5935 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5936 exc_throw_pos [i] = NULL;
5937 exc_throw_found [i] = 0;
5940 /* count the number of exception infos */
5943 * make sure we have enough space for exceptions
5945 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5946 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5947 i = mini_exception_id_by_name (patch_info->data.target);
5948 if (!exc_throw_found [i]) {
5949 max_epilog_size += 32;
5950 exc_throw_found [i] = TRUE;
5955 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5956 cfg->code_size *= 2;
5957 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5958 cfg->stat_code_reallocs++;
5961 code = cfg->native_code + cfg->code_len;
5963 /* add code to raise exceptions */
5964 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5965 switch (patch_info->type) {
5966 case MONO_PATCH_INFO_EXC: {
5967 MonoClass *exc_class;
5968 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5970 i = mini_exception_id_by_name (patch_info->data.target);
5971 if (exc_throw_pos [i]) {
5972 arm_patch (ip, exc_throw_pos [i]);
5973 patch_info->type = MONO_PATCH_INFO_NONE;
5976 exc_throw_pos [i] = code;
5978 arm_patch (ip, code);
5980 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5981 g_assert (exc_class);
5983 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5984 #ifdef USE_JUMP_TABLES
5986 gpointer *jte = mono_jumptable_add_entries (2);
5987 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5988 patch_info->data.name = "mono_arch_throw_corlib_exception";
5989 patch_info->ip.i = code - cfg->native_code;
5990 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
5991 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
5992 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
5993 ARM_BLX_REG (code, ARMREG_IP);
5994 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
5997 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5998 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5999 patch_info->data.name = "mono_arch_throw_corlib_exception";
6000 patch_info->ip.i = code - cfg->native_code;
6002 *(guint32*)(gpointer)code = exc_class->type_token;
6013 cfg->code_len = code - cfg->native_code;
6015 g_assert (cfg->code_len < cfg->code_size);
6019 #endif /* #ifndef DISABLE_JIT */
6022 mono_arch_finish_init (void)
6024 lmf_tls_offset = mono_get_lmf_tls_offset ();
6025 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
6029 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6034 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6041 mono_arch_print_tree (MonoInst *tree, int arity)
6051 mono_arch_get_patch_offset (guint8 *code)
6058 mono_arch_flush_register_windows (void)
6062 #ifdef MONO_ARCH_HAVE_IMT
6067 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6069 int method_reg = mono_alloc_ireg (cfg);
6070 #ifdef USE_JUMP_TABLES
6071 int use_jumptables = TRUE;
6073 int use_jumptables = FALSE;
6076 if (cfg->compile_aot) {
6079 call->dynamic_imt_arg = TRUE;
6082 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6084 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6085 ins->dreg = method_reg;
6086 ins->inst_p0 = call->method;
6087 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6088 MONO_ADD_INS (cfg->cbb, ins);
6090 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6091 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6092 /* Always pass in a register for simplicity */
6093 call->dynamic_imt_arg = TRUE;
6095 cfg->uses_rgctx_reg = TRUE;
6098 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6102 MONO_INST_NEW (cfg, ins, OP_PCONST);
6103 ins->inst_p0 = call->method;
6104 ins->dreg = method_reg;
6105 MONO_ADD_INS (cfg->cbb, ins);
6108 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6112 #endif /* DISABLE_JIT */
6115 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6117 #ifdef USE_JUMP_TABLES
6118 return (MonoMethod*)regs [ARMREG_V5];
6121 guint32 *code_ptr = (guint32*)code;
6123 method = GUINT_TO_POINTER (code_ptr [1]);
6127 return (MonoMethod*)regs [ARMREG_V5];
6129 /* The IMT value is stored in the code stream right after the LDC instruction. */
6130 /* This is no longer true for the gsharedvt_in trampoline */
6132 if (!IS_LDR_PC (code_ptr [0])) {
6133 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6134 g_assert (IS_LDR_PC (code_ptr [0]));
6138 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6139 return (MonoMethod*)regs [ARMREG_V5];
6141 return (MonoMethod*) method;
6146 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6148 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6151 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6152 #define BASE_SIZE (6 * 4)
6153 #define BSEARCH_ENTRY_SIZE (4 * 4)
6154 #define CMP_SIZE (3 * 4)
6155 #define BRANCH_SIZE (1 * 4)
6156 #define CALL_SIZE (2 * 4)
6157 #define WMC_SIZE (8 * 4)
6158 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6160 #ifdef USE_JUMP_TABLES
6162 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6164 g_assert (base [index] == NULL);
6165 base [index] = value;
6168 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6170 if (arm_is_imm12 (jti * 4)) {
6171 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6173 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6174 if ((jti * 4) >> 16)
6175 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6176 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6182 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6184 guint32 delta = DISTANCE (target, code);
6186 g_assert (delta >= 0 && delta <= 0xFFF);
6187 *target = *target | delta;
6193 #ifdef ENABLE_WRONG_METHOD_CHECK
6195 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6197 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6203 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6204 gpointer fail_tramp)
6207 arminstr_t *code, *start;
6208 #ifdef USE_JUMP_TABLES
6211 gboolean large_offsets = FALSE;
6212 guint32 **constant_pool_starts;
6213 arminstr_t *vtable_target = NULL;
6214 int extra_space = 0;
6216 #ifdef ENABLE_WRONG_METHOD_CHECK
6221 #ifdef USE_JUMP_TABLES
6222 for (i = 0; i < count; ++i) {
6223 MonoIMTCheckItem *item = imt_entries [i];
6224 item->chunk_size += 4 * 16;
6225 if (!item->is_equals)
6226 imt_entries [item->check_target_idx]->compare_done = TRUE;
6227 size += item->chunk_size;
6230 constant_pool_starts = g_new0 (guint32*, count);
6232 for (i = 0; i < count; ++i) {
6233 MonoIMTCheckItem *item = imt_entries [i];
6234 if (item->is_equals) {
6235 gboolean fail_case = !item->check_target_idx && fail_tramp;
6237 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6238 item->chunk_size += 32;
6239 large_offsets = TRUE;
6242 if (item->check_target_idx || fail_case) {
6243 if (!item->compare_done || fail_case)
6244 item->chunk_size += CMP_SIZE;
6245 item->chunk_size += BRANCH_SIZE;
6247 #ifdef ENABLE_WRONG_METHOD_CHECK
6248 item->chunk_size += WMC_SIZE;
6252 item->chunk_size += 16;
6253 large_offsets = TRUE;
6255 item->chunk_size += CALL_SIZE;
6257 item->chunk_size += BSEARCH_ENTRY_SIZE;
6258 imt_entries [item->check_target_idx]->compare_done = TRUE;
6260 size += item->chunk_size;
6264 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6268 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6270 code = mono_domain_code_reserve (domain, size);
6274 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6275 for (i = 0; i < count; ++i) {
6276 MonoIMTCheckItem *item = imt_entries [i];
6277 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6281 #ifdef USE_JUMP_TABLES
6282 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6283 /* If jumptables we always pass the IMT method in R5 */
6284 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6285 #define VTABLE_JTI 0
6286 #define IMT_METHOD_OFFSET 0
6287 #define TARGET_CODE_OFFSET 1
6288 #define JUMP_CODE_OFFSET 2
6289 #define RECORDS_PER_ENTRY 3
6290 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6291 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6292 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6294 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6295 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6296 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6297 set_jumptable_element (jte, VTABLE_JTI, vtable);
6300 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6302 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6303 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6304 vtable_target = code;
6305 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6307 if (mono_use_llvm) {
6308 /* LLVM always passes the IMT method in R5 */
6309 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6311 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6312 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6313 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6317 for (i = 0; i < count; ++i) {
6318 MonoIMTCheckItem *item = imt_entries [i];
6319 #ifdef USE_JUMP_TABLES
6320 guint32 imt_method_jti = 0, target_code_jti = 0;
6322 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6324 gint32 vtable_offset;
6326 item->code_target = (guint8*)code;
6328 if (item->is_equals) {
6329 gboolean fail_case = !item->check_target_idx && fail_tramp;
6331 if (item->check_target_idx || fail_case) {
6332 if (!item->compare_done || fail_case) {
6333 #ifdef USE_JUMP_TABLES
6334 imt_method_jti = IMT_METHOD_JTI (i);
6335 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6338 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6340 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6342 #ifdef USE_JUMP_TABLES
6343 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6344 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6345 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6347 item->jmp_code = (guint8*)code;
6348 ARM_B_COND (code, ARMCOND_NE, 0);
6351 /*Enable the commented code to assert on wrong method*/
6352 #ifdef ENABLE_WRONG_METHOD_CHECK
6353 #ifdef USE_JUMP_TABLES
6354 imt_method_jti = IMT_METHOD_JTI (i);
6355 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6358 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6360 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6362 ARM_B_COND (code, ARMCOND_EQ, 0);
6364 /* Define this if your system is so bad that gdb is failing. */
6365 #ifdef BROKEN_DEV_ENV
6366 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6368 arm_patch (code - 1, mini_dump_bad_imt);
6372 arm_patch (cond, code);
6376 if (item->has_target_code) {
6377 /* Load target address */
6378 #ifdef USE_JUMP_TABLES
6379 target_code_jti = TARGET_CODE_JTI (i);
6380 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6381 /* Restore registers */
6382 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6384 ARM_BX (code, ARMREG_R1);
6385 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6387 target_code_ins = code;
6388 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6389 /* Save it to the fourth slot */
6390 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6391 /* Restore registers and branch */
6392 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6394 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6397 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6398 if (!arm_is_imm12 (vtable_offset)) {
6400 * We need to branch to a computed address but we don't have
6401 * a free register to store it, since IP must contain the
6402 * vtable address. So we push the two values to the stack, and
6403 * load them both using LDM.
6405 /* Compute target address */
6406 #ifdef USE_JUMP_TABLES
6407 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6408 if (vtable_offset >> 16)
6409 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6410 /* IP had vtable base. */
6411 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6412 /* Restore registers and branch */
6413 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6414 ARM_BX (code, ARMREG_IP);
6416 vtable_offset_ins = code;
6417 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6418 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6419 /* Save it to the fourth slot */
6420 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6421 /* Restore registers and branch */
6422 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6424 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6427 #ifdef USE_JUMP_TABLES
6428 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6429 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6430 ARM_BX (code, ARMREG_IP);
6432 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6434 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6435 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6441 #ifdef USE_JUMP_TABLES
6442 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6443 target_code_jti = TARGET_CODE_JTI (i);
6444 /* Load target address */
6445 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6446 /* Restore registers */
6447 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6449 ARM_BX (code, ARMREG_R1);
6450 set_jumptable_element (jte, target_code_jti, fail_tramp);
6452 arm_patch (item->jmp_code, (guchar*)code);
6454 target_code_ins = code;
6455 /* Load target address */
6456 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6457 /* Save it to the fourth slot */
6458 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6459 /* Restore registers and branch */
6460 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6462 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6464 item->jmp_code = NULL;
6467 #ifdef USE_JUMP_TABLES
6469 set_jumptable_element (jte, imt_method_jti, item->key);
6472 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6474 /*must emit after unconditional branch*/
6475 if (vtable_target) {
6476 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6477 item->chunk_size += 4;
6478 vtable_target = NULL;
6481 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6482 constant_pool_starts [i] = code;
6484 code += extra_space;
6489 #ifdef USE_JUMP_TABLES
6490 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6491 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6492 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6493 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6494 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6496 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6497 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6499 item->jmp_code = (guint8*)code;
6500 ARM_B_COND (code, ARMCOND_HS, 0);
6506 for (i = 0; i < count; ++i) {
6507 MonoIMTCheckItem *item = imt_entries [i];
6508 if (item->jmp_code) {
6509 if (item->check_target_idx)
6510 #ifdef USE_JUMP_TABLES
6511 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6513 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6516 if (i > 0 && item->is_equals) {
6518 #ifdef USE_JUMP_TABLES
6519 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6520 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6522 arminstr_t *space_start = constant_pool_starts [i];
6523 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6524 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6532 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6533 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6538 #ifndef USE_JUMP_TABLES
6539 g_free (constant_pool_starts);
6542 mono_arch_flush_icache ((guint8*)start, size);
6543 mono_stats.imt_thunks_size += code - start;
6545 g_assert (DISTANCE (start, code) <= size);
6552 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6554 return ctx->regs [reg];
6558 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6560 ctx->regs [reg] = val;
6564 * mono_arch_get_trampolines:
6566 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6570 mono_arch_get_trampolines (gboolean aot)
6572 return mono_arm_get_exception_trampolines (aot);
6576 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6578 * mono_arch_set_breakpoint:
6580 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6581 * The location should contain code emitted by OP_SEQ_POINT.
6584 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6587 guint32 native_offset = ip - (guint8*)ji->code_start;
6588 MonoDebugOptions *opt = mini_get_debug_options ();
6590 if (opt->soft_breakpoints) {
6591 g_assert (!ji->from_aot);
6593 ARM_BLX_REG (code, ARMREG_LR);
6594 mono_arch_flush_icache (code - 4, 4);
6595 } else if (ji->from_aot) {
6596 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6598 g_assert (native_offset % 4 == 0);
6599 g_assert (info->bp_addrs [native_offset / 4] == 0);
6600 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6602 int dreg = ARMREG_LR;
6604 /* Read from another trigger page */
6605 #ifdef USE_JUMP_TABLES
6606 gpointer *jte = mono_jumptable_add_entry ();
6607 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6608 jte [0] = bp_trigger_page;
6610 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6612 *(int*)code = (int)bp_trigger_page;
6615 ARM_LDR_IMM (code, dreg, dreg, 0);
6617 mono_arch_flush_icache (code - 16, 16);
6620 /* This is currently implemented by emitting an SWI instruction, which
6621 * qemu/linux seems to convert to a SIGILL.
6623 *(int*)code = (0xef << 24) | 8;
6625 mono_arch_flush_icache (code - 4, 4);
6631 * mono_arch_clear_breakpoint:
6633 * Clear the breakpoint at IP.
6636 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6638 MonoDebugOptions *opt = mini_get_debug_options ();
6642 if (opt->soft_breakpoints) {
6643 g_assert (!ji->from_aot);
6646 mono_arch_flush_icache (code - 4, 4);
6647 } else if (ji->from_aot) {
6648 guint32 native_offset = ip - (guint8*)ji->code_start;
6649 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6651 g_assert (native_offset % 4 == 0);
6652 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6653 info->bp_addrs [native_offset / 4] = 0;
6655 for (i = 0; i < 4; ++i)
6658 mono_arch_flush_icache (ip, code - ip);
6663 * mono_arch_start_single_stepping:
6665 * Start single stepping.
6668 mono_arch_start_single_stepping (void)
6670 if (ss_trigger_page)
6671 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6677 * mono_arch_stop_single_stepping:
6679 * Stop single stepping.
6682 mono_arch_stop_single_stepping (void)
6684 if (ss_trigger_page)
6685 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6691 #define DBG_SIGNAL SIGBUS
6693 #define DBG_SIGNAL SIGSEGV
6697 * mono_arch_is_single_step_event:
6699 * Return whenever the machine state in SIGCTX corresponds to a single
6703 mono_arch_is_single_step_event (void *info, void *sigctx)
6705 siginfo_t *sinfo = info;
6707 if (!ss_trigger_page)
6710 /* Sometimes the address is off by 4 */
6711 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6718 * mono_arch_is_breakpoint_event:
6720 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
6723 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6725 siginfo_t *sinfo = info;
6727 if (!ss_trigger_page)
6730 if (sinfo->si_signo == DBG_SIGNAL) {
6731 /* Sometimes the address is off by 4 */
6732 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6742 * mono_arch_skip_breakpoint:
6744 * See mini-amd64.c for docs.
6747 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
6749 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6753 * mono_arch_skip_single_step:
6755 * See mini-amd64.c for docs.
6758 mono_arch_skip_single_step (MonoContext *ctx)
6760 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6763 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
6766 * mono_arch_get_seq_point_info:
6768 * See mini-amd64.c for docs.
6771 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6776 // FIXME: Add a free function
6778 mono_domain_lock (domain);
6779 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
6781 mono_domain_unlock (domain);
6784 ji = mono_jit_info_table_find (domain, (char*)code);
6787 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
6789 info->ss_trigger_page = ss_trigger_page;
6790 info->bp_trigger_page = bp_trigger_page;
6792 mono_domain_lock (domain);
6793 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
6795 mono_domain_unlock (domain);
6802 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
6804 ext->lmf.previous_lmf = prev_lmf;
6805 /* Mark that this is a MonoLMFExt */
6806 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
6807 ext->lmf.sp = (gssize)ext;
6811 * mono_arch_set_target:
6813 * Set the target architecture the JIT backend should generate code for, in the form
6814 * of a GNU target triplet. Only used in AOT mode.
6817 mono_arch_set_target (char *mtriple)
6819 /* The GNU target triple format is not very well documented */
6820 if (strstr (mtriple, "armv7")) {
6821 v5_supported = TRUE;
6822 v6_supported = TRUE;
6823 v7_supported = TRUE;
6825 if (strstr (mtriple, "armv6")) {
6826 v5_supported = TRUE;
6827 v6_supported = TRUE;
6829 if (strstr (mtriple, "armv7s")) {
6830 v7s_supported = TRUE;
6832 if (strstr (mtriple, "thumbv7s")) {
6833 v5_supported = TRUE;
6834 v6_supported = TRUE;
6835 v7_supported = TRUE;
6836 v7s_supported = TRUE;
6837 thumb_supported = TRUE;
6838 thumb2_supported = TRUE;
6840 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
6841 v5_supported = TRUE;
6842 v6_supported = TRUE;
6843 thumb_supported = TRUE;
6846 if (strstr (mtriple, "gnueabi"))
6847 eabi_supported = TRUE;
6850 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
6852 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
6854 #endif /* !MONOTOUCH */