2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
18 #include <mono/utils/mono-hwcap-arm.h>
24 #include "debugger-agent.h"
26 #include "mono/arch/arm/arm-vfp-codegen.h"
28 /* Sanity check: This makes no sense */
29 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
30 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
33 #if defined(MONO_ARCH_SOFT_FLOAT_FALLBACK)
34 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
35 #define IS_VFP (!mono_arch_is_soft_float ())
37 #define IS_SOFT_FLOAT (FALSE)
41 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
42 #define HAVE_AEABI_READ_TP 1
45 #ifdef __native_client_codegen__
46 const guint kNaClAlignment = kNaClAlignmentARM;
47 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
48 gint8 nacl_align_byte = -1; /* 0xff */
51 mono_arch_nacl_pad (guint8 *code, int pad)
53 /* Not yet properly implemented. */
54 g_assert_not_reached ();
59 mono_arch_nacl_skip_nops (guint8 *code)
61 /* Not yet properly implemented. */
62 g_assert_not_reached ();
66 #endif /* __native_client_codegen__ */
68 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
71 void sys_icache_invalidate (void *start, size_t len);
74 static gint lmf_tls_offset = -1;
75 static gint lmf_addr_tls_offset = -1;
77 /* This mutex protects architecture specific caches */
78 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
79 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
80 static CRITICAL_SECTION mini_arch_mutex;
82 static gboolean v5_supported = FALSE;
83 static gboolean v6_supported = FALSE;
84 static gboolean v7_supported = FALSE;
85 static gboolean v7s_supported = FALSE;
86 static gboolean thumb_supported = FALSE;
87 static gboolean thumb2_supported = FALSE;
89 * Whenever to use the ARM EABI
91 static gboolean eabi_supported = FALSE;
94 * Whenever to use the iphone ABI extensions:
95 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
96 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
97 * This is required for debugging/profiling tools to work, but it has some overhead so it should
98 * only be turned on in debug builds.
100 static gboolean iphone_abi = FALSE;
103 * The FPU we are generating code for. This is NOT runtime configurable right now,
104 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
106 static MonoArmFPU arm_fpu;
110 static volatile int ss_trigger_var = 0;
112 static gpointer single_step_func_wrapper;
113 static gpointer breakpoint_func_wrapper;
116 * The code generated for sequence points reads from this location, which is
117 * made read-only when single stepping is enabled.
119 static gpointer ss_trigger_page;
121 /* Enabled breakpoints read from this trigger page */
122 static gpointer bp_trigger_page;
124 /* Structure used by the sequence points in AOTed code */
126 gpointer ss_trigger_page;
127 gpointer bp_trigger_page;
128 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
133 * floating point support: on ARM it is a mess, there are at least 3
134 * different setups, each of which binary incompat with the other.
135 * 1) FPA: old and ugly, but unfortunately what current distros use
136 * the double binary format has the two words swapped. 8 double registers.
137 * Implemented usually by kernel emulation.
138 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
139 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
140 * 3) VFP: the new and actually sensible and useful FP support. Implemented
141 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
143 * We do not care about FPA. We will support soft float and VFP.
145 int mono_exc_esp_offset = 0;
147 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
148 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
149 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
151 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
152 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
153 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
155 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
156 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
157 //#define DEBUG_IMT 0
159 /* A variant of ARM_LDR_IMM which can handle large offsets */
160 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
161 if (arm_is_imm12 ((offset))) { \
162 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
164 g_assert ((scratch_reg) != (basereg)); \
165 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
166 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
170 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
171 if (arm_is_imm12 ((offset))) { \
172 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
174 g_assert ((scratch_reg) != (basereg)); \
175 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
176 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
180 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
183 mono_arch_regname (int reg)
185 static const char * rnames[] = {
186 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
187 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
188 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
191 if (reg >= 0 && reg < 16)
197 mono_arch_fregname (int reg)
199 static const char * rnames[] = {
200 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
201 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
202 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
203 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
204 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
205 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
208 if (reg >= 0 && reg < 32)
216 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
218 int imm8, rot_amount;
219 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
220 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
223 g_assert (dreg != sreg);
224 code = mono_arm_emit_load_imm (code, dreg, imm);
225 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
230 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
232 /* we can use r0-r3, since this is called only for incoming args on the stack */
233 if (size > sizeof (gpointer) * 4) {
235 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
236 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
237 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
238 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
239 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
240 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
241 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
242 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
243 ARM_B_COND (code, ARMCOND_NE, 0);
244 arm_patch (code - 4, start_loop);
247 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
248 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
250 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
251 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
257 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
258 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
259 doffset = soffset = 0;
261 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
262 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
268 g_assert (size == 0);
273 emit_call_reg (guint8 *code, int reg)
276 ARM_BLX_REG (code, reg);
278 #ifdef USE_JUMP_TABLES
279 g_assert_not_reached ();
281 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
285 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
291 emit_call_seq (MonoCompile *cfg, guint8 *code)
293 #ifdef USE_JUMP_TABLES
294 code = mono_arm_patchable_bl (code, ARMCOND_AL);
296 if (cfg->method->dynamic) {
297 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
299 *(gpointer*)code = NULL;
301 code = emit_call_reg (code, ARMREG_IP);
310 mono_arm_patchable_b (guint8 *code, int cond)
312 #ifdef USE_JUMP_TABLES
315 jte = mono_jumptable_add_entry ();
316 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
317 ARM_BX_COND (code, cond, ARMREG_IP);
319 ARM_B_COND (code, cond, 0);
325 mono_arm_patchable_bl (guint8 *code, int cond)
327 #ifdef USE_JUMP_TABLES
330 jte = mono_jumptable_add_entry ();
331 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
332 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
334 ARM_BL_COND (code, cond, 0);
339 #ifdef USE_JUMP_TABLES
341 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
343 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
344 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
349 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
351 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
352 ARM_LDR_IMM (code, reg, reg, 0);
359 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
361 switch (ins->opcode) {
364 case OP_FCALL_MEMBASE:
366 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
367 ARM_FMSR (code, ins->dreg, ARMREG_R0);
368 ARM_CVTS (code, ins->dreg, ins->dreg);
370 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
382 * Emit code to push an LMF structure on the LMF stack.
383 * On arm, this is intermixed with the initialization of other fields of the structure.
386 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
388 gboolean get_lmf_fast = FALSE;
391 #ifdef HAVE_AEABI_READ_TP
392 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
394 if (lmf_addr_tls_offset != -1) {
397 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
398 (gpointer)"__aeabi_read_tp");
399 code = emit_call_seq (cfg, code);
401 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
407 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
410 /* Inline mono_get_lmf_addr () */
411 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
413 /* Load mono_jit_tls_id */
415 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
416 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
418 *(gpointer*)code = NULL;
420 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
421 /* call pthread_getspecific () */
422 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
423 (gpointer)"pthread_getspecific");
424 code = emit_call_seq (cfg, code);
425 /* lmf_addr = &jit_tls->lmf */
426 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
427 g_assert (arm_is_imm8 (lmf_offset));
428 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
435 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
436 (gpointer)"mono_get_lmf_addr");
437 code = emit_call_seq (cfg, code);
439 /* we build the MonoLMF structure on the stack - see mini-arm.h */
440 /* lmf_offset is the offset from the previous stack pointer,
441 * alloc_size is the total stack space allocated, so the offset
442 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
443 * The pointer to the struct is put in r1 (new_lmf).
444 * ip is used as scratch
445 * The callee-saved registers are already in the MonoLMF structure
447 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
448 /* r0 is the result from mono_get_lmf_addr () */
449 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
450 /* new_lmf->previous_lmf = *lmf_addr */
451 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
452 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
453 /* *(lmf_addr) = r1 */
454 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
455 /* Skip method (only needed for trampoline LMF frames) */
456 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
457 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
458 /* save the current IP */
459 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
460 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
462 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
463 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
471 * Emit code to pop an LMF structure from the LMF stack.
474 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
478 if (lmf_offset < 32) {
479 basereg = cfg->frame_reg;
484 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
487 /* ip = previous_lmf */
488 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
490 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
491 /* *(lmf_addr) = previous_lmf */
492 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
497 #endif /* #ifndef DISABLE_JIT */
500 * mono_arch_get_argument_info:
501 * @csig: a method signature
502 * @param_count: the number of parameters to consider
503 * @arg_info: an array to store the result infos
505 * Gathers information on parameters such as size, alignment and
506 * padding. arg_info should be large enought to hold param_count + 1 entries.
508 * Returns the size of the activation frame.
511 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
513 int k, frame_size = 0;
514 guint32 size, align, pad;
518 t = mini_type_get_underlying_type (gsctx, csig->ret);
519 if (MONO_TYPE_ISSTRUCT (t)) {
520 frame_size += sizeof (gpointer);
524 arg_info [0].offset = offset;
527 frame_size += sizeof (gpointer);
531 arg_info [0].size = frame_size;
533 for (k = 0; k < param_count; k++) {
534 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
536 /* ignore alignment for now */
539 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
540 arg_info [k].pad = pad;
542 arg_info [k + 1].pad = 0;
543 arg_info [k + 1].size = size;
545 arg_info [k + 1].offset = offset;
549 align = MONO_ARCH_FRAME_ALIGNMENT;
550 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
551 arg_info [k].pad = pad;
556 #define MAX_ARCH_DELEGATE_PARAMS 3
559 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
561 guint8 *code, *start;
564 start = code = mono_global_codeman_reserve (12);
566 /* Replace the this argument with the target */
567 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
568 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
569 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
571 g_assert ((code - start) <= 12);
573 mono_arch_flush_icache (start, 12);
577 size = 8 + param_count * 4;
578 start = code = mono_global_codeman_reserve (size);
580 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
581 /* slide down the arguments */
582 for (i = 0; i < param_count; ++i) {
583 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
585 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
587 g_assert ((code - start) <= size);
589 mono_arch_flush_icache (start, size);
593 *code_size = code - start;
599 * mono_arch_get_delegate_invoke_impls:
601 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
605 mono_arch_get_delegate_invoke_impls (void)
613 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
614 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
616 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
617 code = get_delegate_invoke_impl (FALSE, i, &code_len);
618 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
619 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
627 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
629 guint8 *code, *start;
631 /* FIXME: Support more cases */
632 if (MONO_TYPE_ISSTRUCT (sig->ret))
636 static guint8* cached = NULL;
637 mono_mini_arch_lock ();
639 mono_mini_arch_unlock ();
644 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
646 start = get_delegate_invoke_impl (TRUE, 0, NULL);
648 mono_mini_arch_unlock ();
651 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
654 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
656 for (i = 0; i < sig->param_count; ++i)
657 if (!mono_is_regsize_var (sig->params [i]))
660 mono_mini_arch_lock ();
661 code = cache [sig->param_count];
663 mono_mini_arch_unlock ();
668 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
669 start = mono_aot_get_trampoline (name);
672 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
674 cache [sig->param_count] = start;
675 mono_mini_arch_unlock ();
683 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
685 return (gpointer)regs [ARMREG_R0];
689 * Initialize the cpu to execute managed code.
692 mono_arch_cpu_init (void)
694 #if defined(__APPLE__)
697 i8_align = __alignof__ (gint64);
702 create_function_wrapper (gpointer function)
704 guint8 *start, *code;
706 start = code = mono_global_codeman_reserve (96);
709 * Construct the MonoContext structure on the stack.
712 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
714 /* save ip, lr and pc into their correspodings ctx.regs slots. */
715 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
716 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
717 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
719 /* save r0..r10 and fp */
720 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
721 ARM_STM (code, ARMREG_IP, 0x0fff);
723 /* now we can update fp. */
724 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
726 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
727 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
728 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
729 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
731 /* make ctx.eip hold the address of the call. */
732 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
733 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
735 /* r0 now points to the MonoContext */
736 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
739 #ifdef USE_JUMP_TABLES
741 gpointer *jte = mono_jumptable_add_entry ();
742 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
746 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
748 *(gpointer*)code = function;
751 ARM_BLX_REG (code, ARMREG_IP);
753 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
754 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
755 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
756 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
758 /* make ip point to the regs array, then restore everything, including pc. */
759 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
760 ARM_LDM (code, ARMREG_IP, 0xffff);
762 mono_arch_flush_icache (start, code - start);
768 * Initialize architecture specific code.
771 mono_arch_init (void)
773 const char *cpu_arch;
775 InitializeCriticalSection (&mini_arch_mutex);
776 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
777 if (mini_get_debug_options ()->soft_breakpoints) {
778 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
779 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
784 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
785 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
786 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
789 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
790 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
791 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
792 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
793 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
796 #if defined(__ARM_EABI__)
797 eabi_supported = TRUE;
800 #if defined(ARM_FPU_VFP_HARD)
801 arm_fpu = MONO_ARM_FPU_VFP_HARD;
803 arm_fpu = MONO_ARM_FPU_VFP;
805 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
806 /* If we're compiling with a soft float fallback and it
807 turns out that no VFP unit is available, we need to
808 switch to soft float. We don't do this for iOS, since
809 iOS devices always have a VFP unit. */
810 if (!mono_hwcap_arm_has_vfp)
811 arm_fpu = MONO_ARM_FPU_NONE;
815 v5_supported = mono_hwcap_arm_is_v5;
816 v6_supported = mono_hwcap_arm_is_v6;
817 v7_supported = mono_hwcap_arm_is_v7;
818 v7s_supported = mono_hwcap_arm_is_v7s;
820 #if defined(__APPLE__)
821 /* iOS is special-cased here because we don't yet
822 have a way to properly detect CPU features on it. */
823 thumb_supported = TRUE;
826 thumb_supported = mono_hwcap_arm_has_thumb;
827 thumb2_supported = mono_hwcap_arm_has_thumb2;
830 /* Format: armv(5|6|7[s])[-thumb[2]] */
831 cpu_arch = g_getenv ("MONO_CPU_ARCH");
833 /* Do this here so it overrides any detection. */
835 if (strncmp (cpu_arch, "armv", 4) == 0) {
836 v5_supported = cpu_arch [4] >= '5';
837 v6_supported = cpu_arch [4] >= '6';
838 v7_supported = cpu_arch [4] >= '7';
839 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
842 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
843 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
848 * Cleanup architecture specific code.
851 mono_arch_cleanup (void)
856 * This function returns the optimizations supported on this cpu.
859 mono_arch_cpu_optimizations (guint32 *exclude_mask)
861 /* no arm-specific optimizations yet */
867 * This function test for all SIMD functions supported.
869 * Returns a bitmask corresponding to all supported versions.
873 mono_arch_cpu_enumerate_simd_versions (void)
875 /* SIMD is currently unimplemented */
883 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
899 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
901 mono_arch_is_soft_float (void)
903 return arm_fpu == MONO_ARM_FPU_NONE;
908 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
911 t = mini_type_get_underlying_type (gsctx, t);
918 case MONO_TYPE_FNPTR:
920 case MONO_TYPE_OBJECT:
921 case MONO_TYPE_STRING:
922 case MONO_TYPE_CLASS:
923 case MONO_TYPE_SZARRAY:
924 case MONO_TYPE_ARRAY:
926 case MONO_TYPE_GENERICINST:
927 if (!mono_type_generic_inst_is_valuetype (t))
930 case MONO_TYPE_VALUETYPE:
937 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
942 for (i = 0; i < cfg->num_varinfo; i++) {
943 MonoInst *ins = cfg->varinfo [i];
944 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
947 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
950 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
953 /* we can only allocate 32 bit values */
954 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
955 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
956 g_assert (i == vmv->idx);
957 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
964 #define USE_EXTRA_TEMPS 0
967 mono_arch_get_global_int_regs (MonoCompile *cfg)
971 mono_arch_compute_omit_fp (cfg);
974 * FIXME: Interface calls might go through a static rgctx trampoline which
975 * sets V5, but it doesn't save it, so we need to save it ourselves, and
978 if (cfg->flags & MONO_CFG_HAS_CALLS)
979 cfg->uses_rgctx_reg = TRUE;
981 if (cfg->arch.omit_fp)
982 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
983 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
984 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
985 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
987 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
988 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
990 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
991 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
992 /* V5 is reserved for passing the vtable/rgctx/IMT method */
993 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
994 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
995 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1001 * mono_arch_regalloc_cost:
1003 * Return the cost, in number of memory references, of the action of
1004 * allocating the variable VMV into a register during global register
1008 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1014 #endif /* #ifndef DISABLE_JIT */
1016 #ifndef __GNUC_PREREQ
1017 #define __GNUC_PREREQ(maj, min) (0)
1021 mono_arch_flush_icache (guint8 *code, gint size)
1023 #if defined(__native_client__)
1024 // For Native Client we don't have to flush i-cache here,
1025 // as it's being done by dyncode interface.
1028 #ifdef MONO_CROSS_COMPILE
1030 sys_icache_invalidate (code, size);
1031 #elif __GNUC_PREREQ(4, 1)
1032 __clear_cache (code, code + size);
1033 #elif defined(PLATFORM_ANDROID)
1034 const int syscall = 0xf0002;
1042 : "r" (code), "r" (code + size), "r" (syscall)
1043 : "r0", "r1", "r7", "r2"
1046 __asm __volatile ("mov r0, %0\n"
1049 "swi 0x9f0002 @ sys_cacheflush"
1051 : "r" (code), "r" (code + size), "r" (0)
1052 : "r0", "r1", "r3" );
1054 #endif /* !__native_client__ */
1065 RegTypeStructByAddr,
1066 /* gsharedvt argument passed by addr in greg */
1067 RegTypeGSharedVtInReg,
1068 /* gsharedvt argument passed by addr on stack */
1069 RegTypeGSharedVtOnStack,
1074 guint16 vtsize; /* in param area */
1078 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1083 guint32 stack_usage;
1084 gboolean vtype_retaddr;
1085 /* The index of the vret arg in the argument list */
1095 /*#define __alignof__(a) sizeof(a)*/
1096 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1099 #define PARAM_REGS 4
1102 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1105 if (*gr > ARMREG_R3) {
1107 ainfo->offset = *stack_size;
1108 ainfo->reg = ARMREG_SP; /* in the caller */
1109 ainfo->storage = RegTypeBase;
1112 ainfo->storage = RegTypeGeneral;
1119 split = i8_align == 4;
1124 if (*gr == ARMREG_R3 && split) {
1125 /* first word in r3 and the second on the stack */
1126 ainfo->offset = *stack_size;
1127 ainfo->reg = ARMREG_SP; /* in the caller */
1128 ainfo->storage = RegTypeBaseGen;
1130 } else if (*gr >= ARMREG_R3) {
1131 if (eabi_supported) {
1132 /* darwin aligns longs to 4 byte only */
1133 if (i8_align == 8) {
1138 ainfo->offset = *stack_size;
1139 ainfo->reg = ARMREG_SP; /* in the caller */
1140 ainfo->storage = RegTypeBase;
1143 if (eabi_supported) {
1144 if (i8_align == 8 && ((*gr) & 1))
1147 ainfo->storage = RegTypeIRegPair;
1156 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1158 guint i, gr, pstart;
1159 int n = sig->hasthis + sig->param_count;
1160 MonoType *simpletype;
1161 guint32 stack_size = 0;
1163 gboolean is_pinvoke = sig->pinvoke;
1167 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1169 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1174 t = mini_type_get_underlying_type (gsctx, sig->ret);
1175 if (MONO_TYPE_ISSTRUCT (t)) {
1178 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1179 cinfo->ret.storage = RegTypeStructByVal;
1181 cinfo->vtype_retaddr = TRUE;
1183 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1184 cinfo->vtype_retaddr = TRUE;
1190 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1191 * the first argument, allowing 'this' to be always passed in the first arg reg.
1192 * Also do this if the first argument is a reference type, since virtual calls
1193 * are sometimes made using calli without sig->hasthis set, like in the delegate
1196 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1198 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1200 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1204 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1205 cinfo->vret_arg_index = 1;
1209 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1213 if (cinfo->vtype_retaddr)
1214 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1217 DEBUG(printf("params: %d\n", sig->param_count));
1218 for (i = pstart; i < sig->param_count; ++i) {
1219 ArgInfo *ainfo = &cinfo->args [n];
1221 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1222 /* Prevent implicit arguments and sig_cookie from
1223 being passed in registers */
1225 /* Emit the signature cookie just before the implicit arguments */
1226 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1228 DEBUG(printf("param %d: ", i));
1229 if (sig->params [i]->byref) {
1230 DEBUG(printf("byref\n"));
1231 add_general (&gr, &stack_size, ainfo, TRUE);
1235 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1236 switch (simpletype->type) {
1237 case MONO_TYPE_BOOLEAN:
1240 cinfo->args [n].size = 1;
1241 add_general (&gr, &stack_size, ainfo, TRUE);
1244 case MONO_TYPE_CHAR:
1247 cinfo->args [n].size = 2;
1248 add_general (&gr, &stack_size, ainfo, TRUE);
1253 cinfo->args [n].size = 4;
1254 add_general (&gr, &stack_size, ainfo, TRUE);
1260 case MONO_TYPE_FNPTR:
1261 case MONO_TYPE_CLASS:
1262 case MONO_TYPE_OBJECT:
1263 case MONO_TYPE_STRING:
1264 case MONO_TYPE_SZARRAY:
1265 case MONO_TYPE_ARRAY:
1267 cinfo->args [n].size = sizeof (gpointer);
1268 add_general (&gr, &stack_size, ainfo, TRUE);
1271 case MONO_TYPE_GENERICINST:
1272 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1273 cinfo->args [n].size = sizeof (gpointer);
1274 add_general (&gr, &stack_size, ainfo, TRUE);
1278 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1279 /* gsharedvt arguments are passed by ref */
1280 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1281 add_general (&gr, &stack_size, ainfo, TRUE);
1282 switch (ainfo->storage) {
1283 case RegTypeGeneral:
1284 ainfo->storage = RegTypeGSharedVtInReg;
1287 ainfo->storage = RegTypeGSharedVtOnStack;
1290 g_assert_not_reached ();
1296 case MONO_TYPE_TYPEDBYREF:
1297 case MONO_TYPE_VALUETYPE: {
1303 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1304 size = sizeof (MonoTypedRef);
1305 align = sizeof (gpointer);
1307 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1309 size = mono_class_native_size (klass, &align);
1311 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1313 DEBUG(printf ("load %d bytes struct\n", size));
1316 align_size += (sizeof (gpointer) - 1);
1317 align_size &= ~(sizeof (gpointer) - 1);
1318 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1319 ainfo->storage = RegTypeStructByVal;
1320 ainfo->struct_size = size;
1321 /* FIXME: align stack_size if needed */
1322 if (eabi_supported) {
1323 if (align >= 8 && (gr & 1))
1326 if (gr > ARMREG_R3) {
1328 ainfo->vtsize = nwords;
1330 int rest = ARMREG_R3 - gr + 1;
1331 int n_in_regs = rest >= nwords? nwords: rest;
1333 ainfo->size = n_in_regs;
1334 ainfo->vtsize = nwords - n_in_regs;
1337 nwords -= n_in_regs;
1339 ainfo->offset = stack_size;
1340 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1341 stack_size += nwords * sizeof (gpointer);
1349 add_general (&gr, &stack_size, ainfo, FALSE);
1353 case MONO_TYPE_MVAR:
1354 /* gsharedvt arguments are passed by ref */
1355 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1356 add_general (&gr, &stack_size, ainfo, TRUE);
1357 switch (ainfo->storage) {
1358 case RegTypeGeneral:
1359 ainfo->storage = RegTypeGSharedVtInReg;
1362 ainfo->storage = RegTypeGSharedVtOnStack;
1365 g_assert_not_reached ();
1370 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1374 /* Handle the case where there are no implicit arguments */
1375 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1376 /* Prevent implicit arguments and sig_cookie from
1377 being passed in registers */
1379 /* Emit the signature cookie just before the implicit arguments */
1380 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1384 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1385 switch (simpletype->type) {
1386 case MONO_TYPE_BOOLEAN:
1391 case MONO_TYPE_CHAR:
1397 case MONO_TYPE_FNPTR:
1398 case MONO_TYPE_CLASS:
1399 case MONO_TYPE_OBJECT:
1400 case MONO_TYPE_SZARRAY:
1401 case MONO_TYPE_ARRAY:
1402 case MONO_TYPE_STRING:
1403 cinfo->ret.storage = RegTypeGeneral;
1404 cinfo->ret.reg = ARMREG_R0;
1408 cinfo->ret.storage = RegTypeIRegPair;
1409 cinfo->ret.reg = ARMREG_R0;
1413 cinfo->ret.storage = RegTypeFP;
1414 cinfo->ret.reg = ARMREG_R0;
1415 /* FIXME: cinfo->ret.reg = ???;
1416 cinfo->ret.storage = RegTypeFP;*/
1418 case MONO_TYPE_GENERICINST:
1419 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1420 cinfo->ret.storage = RegTypeGeneral;
1421 cinfo->ret.reg = ARMREG_R0;
1424 // FIXME: Only for variable types
1425 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1426 cinfo->ret.storage = RegTypeStructByAddr;
1427 g_assert (cinfo->vtype_retaddr);
1431 case MONO_TYPE_VALUETYPE:
1432 case MONO_TYPE_TYPEDBYREF:
1433 if (cinfo->ret.storage != RegTypeStructByVal)
1434 cinfo->ret.storage = RegTypeStructByAddr;
1437 case MONO_TYPE_MVAR:
1438 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1439 cinfo->ret.storage = RegTypeStructByAddr;
1440 g_assert (cinfo->vtype_retaddr);
1442 case MONO_TYPE_VOID:
1445 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1449 /* align stack size to 8 */
1450 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1451 stack_size = (stack_size + 7) & ~7;
1453 cinfo->stack_usage = stack_size;
1460 debug_omit_fp (void)
1463 return mono_debug_count ();
1470 * mono_arch_compute_omit_fp:
1472 * Determine whenever the frame pointer can be eliminated.
1475 mono_arch_compute_omit_fp (MonoCompile *cfg)
1477 MonoMethodSignature *sig;
1478 MonoMethodHeader *header;
1482 if (cfg->arch.omit_fp_computed)
1485 header = cfg->header;
1487 sig = mono_method_signature (cfg->method);
1489 if (!cfg->arch.cinfo)
1490 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1491 cinfo = cfg->arch.cinfo;
1494 * FIXME: Remove some of the restrictions.
1496 cfg->arch.omit_fp = TRUE;
1497 cfg->arch.omit_fp_computed = TRUE;
1499 if (cfg->disable_omit_fp)
1500 cfg->arch.omit_fp = FALSE;
1501 if (!debug_omit_fp ())
1502 cfg->arch.omit_fp = FALSE;
1504 if (cfg->method->save_lmf)
1505 cfg->arch.omit_fp = FALSE;
1507 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1508 cfg->arch.omit_fp = FALSE;
1509 if (header->num_clauses)
1510 cfg->arch.omit_fp = FALSE;
1511 if (cfg->param_area)
1512 cfg->arch.omit_fp = FALSE;
1513 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1514 cfg->arch.omit_fp = FALSE;
1515 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1516 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1517 cfg->arch.omit_fp = FALSE;
1518 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1519 ArgInfo *ainfo = &cinfo->args [i];
1521 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1523 * The stack offset can only be determined when the frame
1526 cfg->arch.omit_fp = FALSE;
1531 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1532 MonoInst *ins = cfg->varinfo [i];
1535 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1540 * Set var information according to the calling convention. arm version.
1541 * The locals var stuff should most likely be split in another method.
1544 mono_arch_allocate_vars (MonoCompile *cfg)
1546 MonoMethodSignature *sig;
1547 MonoMethodHeader *header;
1549 int i, offset, size, align, curinst;
1553 sig = mono_method_signature (cfg->method);
1555 if (!cfg->arch.cinfo)
1556 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1557 cinfo = cfg->arch.cinfo;
1559 mono_arch_compute_omit_fp (cfg);
1561 if (cfg->arch.omit_fp)
1562 cfg->frame_reg = ARMREG_SP;
1564 cfg->frame_reg = ARMREG_FP;
1566 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1568 /* allow room for the vararg method args: void* and long/double */
1569 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1570 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1572 header = cfg->header;
1574 /* See mono_arch_get_global_int_regs () */
1575 if (cfg->flags & MONO_CFG_HAS_CALLS)
1576 cfg->uses_rgctx_reg = TRUE;
1578 if (cfg->frame_reg != ARMREG_SP)
1579 cfg->used_int_regs |= 1 << cfg->frame_reg;
1581 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1582 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1583 cfg->used_int_regs |= (1 << ARMREG_V5);
1587 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1588 if (sig->ret->type != MONO_TYPE_VOID) {
1589 cfg->ret->opcode = OP_REGVAR;
1590 cfg->ret->inst_c0 = ARMREG_R0;
1593 /* local vars are at a positive offset from the stack pointer */
1595 * also note that if the function uses alloca, we use FP
1596 * to point at the local variables.
1598 offset = 0; /* linkage area */
1599 /* align the offset to 16 bytes: not sure this is needed here */
1601 //offset &= ~(8 - 1);
1603 /* add parameter area size for called functions */
1604 offset += cfg->param_area;
1607 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1610 /* allow room to save the return value */
1611 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1614 /* the MonoLMF structure is stored just below the stack pointer */
1615 if (cinfo->ret.storage == RegTypeStructByVal) {
1616 cfg->ret->opcode = OP_REGOFFSET;
1617 cfg->ret->inst_basereg = cfg->frame_reg;
1618 offset += sizeof (gpointer) - 1;
1619 offset &= ~(sizeof (gpointer) - 1);
1620 cfg->ret->inst_offset = - offset;
1621 offset += sizeof(gpointer);
1622 } else if (cinfo->vtype_retaddr) {
1623 ins = cfg->vret_addr;
1624 offset += sizeof(gpointer) - 1;
1625 offset &= ~(sizeof(gpointer) - 1);
1626 ins->inst_offset = offset;
1627 ins->opcode = OP_REGOFFSET;
1628 ins->inst_basereg = cfg->frame_reg;
1629 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1630 printf ("vret_addr =");
1631 mono_print_ins (cfg->vret_addr);
1633 offset += sizeof(gpointer);
1636 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1637 if (cfg->arch.seq_point_info_var) {
1640 ins = cfg->arch.seq_point_info_var;
1644 offset += align - 1;
1645 offset &= ~(align - 1);
1646 ins->opcode = OP_REGOFFSET;
1647 ins->inst_basereg = cfg->frame_reg;
1648 ins->inst_offset = offset;
1651 ins = cfg->arch.ss_trigger_page_var;
1654 offset += align - 1;
1655 offset &= ~(align - 1);
1656 ins->opcode = OP_REGOFFSET;
1657 ins->inst_basereg = cfg->frame_reg;
1658 ins->inst_offset = offset;
1662 if (cfg->arch.seq_point_read_var) {
1665 ins = cfg->arch.seq_point_read_var;
1669 offset += align - 1;
1670 offset &= ~(align - 1);
1671 ins->opcode = OP_REGOFFSET;
1672 ins->inst_basereg = cfg->frame_reg;
1673 ins->inst_offset = offset;
1676 ins = cfg->arch.seq_point_ss_method_var;
1679 offset += align - 1;
1680 offset &= ~(align - 1);
1681 ins->opcode = OP_REGOFFSET;
1682 ins->inst_basereg = cfg->frame_reg;
1683 ins->inst_offset = offset;
1686 ins = cfg->arch.seq_point_bp_method_var;
1689 offset += align - 1;
1690 offset &= ~(align - 1);
1691 ins->opcode = OP_REGOFFSET;
1692 ins->inst_basereg = cfg->frame_reg;
1693 ins->inst_offset = offset;
1697 cfg->locals_min_stack_offset = offset;
1699 curinst = cfg->locals_start;
1700 for (i = curinst; i < cfg->num_varinfo; ++i) {
1703 ins = cfg->varinfo [i];
1704 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1707 t = ins->inst_vtype;
1708 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1711 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1712 * pinvoke wrappers when they call functions returning structure */
1713 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1714 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
1718 size = mono_type_size (t, &align);
1720 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1721 * since it loads/stores misaligned words, which don't do the right thing.
1723 if (align < 4 && size >= 4)
1725 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1726 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1727 offset += align - 1;
1728 offset &= ~(align - 1);
1729 ins->opcode = OP_REGOFFSET;
1730 ins->inst_offset = offset;
1731 ins->inst_basereg = cfg->frame_reg;
1733 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1736 cfg->locals_max_stack_offset = offset;
1740 ins = cfg->args [curinst];
1741 if (ins->opcode != OP_REGVAR) {
1742 ins->opcode = OP_REGOFFSET;
1743 ins->inst_basereg = cfg->frame_reg;
1744 offset += sizeof (gpointer) - 1;
1745 offset &= ~(sizeof (gpointer) - 1);
1746 ins->inst_offset = offset;
1747 offset += sizeof (gpointer);
1752 if (sig->call_convention == MONO_CALL_VARARG) {
1756 /* Allocate a local slot to hold the sig cookie address */
1757 offset += align - 1;
1758 offset &= ~(align - 1);
1759 cfg->sig_cookie = offset;
1763 for (i = 0; i < sig->param_count; ++i) {
1764 ins = cfg->args [curinst];
1766 if (ins->opcode != OP_REGVAR) {
1767 ins->opcode = OP_REGOFFSET;
1768 ins->inst_basereg = cfg->frame_reg;
1769 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
1771 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1772 * since it loads/stores misaligned words, which don't do the right thing.
1774 if (align < 4 && size >= 4)
1776 /* The code in the prolog () stores words when storing vtypes received in a register */
1777 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1779 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1780 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1781 offset += align - 1;
1782 offset &= ~(align - 1);
1783 ins->inst_offset = offset;
1789 /* align the offset to 8 bytes */
1790 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
1791 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1796 cfg->stack_offset = offset;
1800 mono_arch_create_vars (MonoCompile *cfg)
1802 MonoMethodSignature *sig;
1805 sig = mono_method_signature (cfg->method);
1807 if (!cfg->arch.cinfo)
1808 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1809 cinfo = cfg->arch.cinfo;
1811 if (cinfo->ret.storage == RegTypeStructByVal)
1812 cfg->ret_var_is_local = TRUE;
1814 if (cinfo->vtype_retaddr) {
1815 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1816 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1817 printf ("vret_addr = ");
1818 mono_print_ins (cfg->vret_addr);
1822 if (cfg->gen_seq_points) {
1823 if (cfg->soft_breakpoints) {
1824 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1825 ins->flags |= MONO_INST_VOLATILE;
1826 cfg->arch.seq_point_read_var = ins;
1828 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1829 ins->flags |= MONO_INST_VOLATILE;
1830 cfg->arch.seq_point_ss_method_var = ins;
1832 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1833 ins->flags |= MONO_INST_VOLATILE;
1834 cfg->arch.seq_point_bp_method_var = ins;
1836 g_assert (!cfg->compile_aot);
1837 } else if (cfg->compile_aot) {
1838 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1839 ins->flags |= MONO_INST_VOLATILE;
1840 cfg->arch.seq_point_info_var = ins;
1842 /* Allocate a separate variable for this to save 1 load per seq point */
1843 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1844 ins->flags |= MONO_INST_VOLATILE;
1845 cfg->arch.ss_trigger_page_var = ins;
1851 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1853 MonoMethodSignature *tmp_sig;
1856 if (call->tail_call)
1859 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1862 * mono_ArgIterator_Setup assumes the signature cookie is
1863 * passed first and all the arguments which were before it are
1864 * passed on the stack after the signature. So compensate by
1865 * passing a different signature.
1867 tmp_sig = mono_metadata_signature_dup (call->signature);
1868 tmp_sig->param_count -= call->signature->sentinelpos;
1869 tmp_sig->sentinelpos = 0;
1870 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1872 sig_reg = mono_alloc_ireg (cfg);
1873 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
1875 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
1880 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1885 LLVMCallInfo *linfo;
1887 n = sig->param_count + sig->hasthis;
1889 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1891 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1894 * LLVM always uses the native ABI while we use our own ABI, the
1895 * only difference is the handling of vtypes:
1896 * - we only pass/receive them in registers in some cases, and only
1897 * in 1 or 2 integer registers.
1899 if (cinfo->vtype_retaddr) {
1900 /* Vtype returned using a hidden argument */
1901 linfo->ret.storage = LLVMArgVtypeRetAddr;
1902 linfo->vret_arg_index = cinfo->vret_arg_index;
1903 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1904 cfg->exception_message = g_strdup ("unknown ret conv");
1905 cfg->disable_llvm = TRUE;
1909 for (i = 0; i < n; ++i) {
1910 ainfo = cinfo->args + i;
1912 linfo->args [i].storage = LLVMArgNone;
1914 switch (ainfo->storage) {
1915 case RegTypeGeneral:
1916 case RegTypeIRegPair:
1918 linfo->args [i].storage = LLVMArgInIReg;
1920 case RegTypeStructByVal:
1921 // FIXME: Passing entirely on the stack or split reg/stack
1922 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
1923 linfo->args [i].storage = LLVMArgVtypeInReg;
1924 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
1925 if (ainfo->size == 2)
1926 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
1928 linfo->args [i].pair_storage [1] = LLVMArgNone;
1930 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
1931 cfg->disable_llvm = TRUE;
1935 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1936 cfg->disable_llvm = TRUE;
1946 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1949 MonoMethodSignature *sig;
1953 sig = call->signature;
1954 n = sig->param_count + sig->hasthis;
1956 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
1958 for (i = 0; i < n; ++i) {
1959 ArgInfo *ainfo = cinfo->args + i;
1962 if (i >= sig->hasthis)
1963 t = sig->params [i - sig->hasthis];
1965 t = &mono_defaults.int_class->byval_arg;
1966 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1968 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1969 /* Emit the signature cookie just before the implicit arguments */
1970 emit_sig_cookie (cfg, call, cinfo);
1973 in = call->args [i];
1975 switch (ainfo->storage) {
1976 case RegTypeGeneral:
1977 case RegTypeIRegPair:
1978 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1979 MONO_INST_NEW (cfg, ins, OP_MOVE);
1980 ins->dreg = mono_alloc_ireg (cfg);
1981 ins->sreg1 = in->dreg + 1;
1982 MONO_ADD_INS (cfg->cbb, ins);
1983 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1985 MONO_INST_NEW (cfg, ins, OP_MOVE);
1986 ins->dreg = mono_alloc_ireg (cfg);
1987 ins->sreg1 = in->dreg + 2;
1988 MONO_ADD_INS (cfg->cbb, ins);
1989 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1990 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1991 if (ainfo->size == 4) {
1992 if (IS_SOFT_FLOAT) {
1993 /* mono_emit_call_args () have already done the r8->r4 conversion */
1994 /* The converted value is in an int vreg */
1995 MONO_INST_NEW (cfg, ins, OP_MOVE);
1996 ins->dreg = mono_alloc_ireg (cfg);
1997 ins->sreg1 = in->dreg;
1998 MONO_ADD_INS (cfg->cbb, ins);
1999 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2003 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2004 creg = mono_alloc_ireg (cfg);
2005 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2006 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2009 if (IS_SOFT_FLOAT) {
2010 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2011 ins->dreg = mono_alloc_ireg (cfg);
2012 ins->sreg1 = in->dreg;
2013 MONO_ADD_INS (cfg->cbb, ins);
2014 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2016 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2017 ins->dreg = mono_alloc_ireg (cfg);
2018 ins->sreg1 = in->dreg;
2019 MONO_ADD_INS (cfg->cbb, ins);
2020 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2025 creg = mono_alloc_ireg (cfg);
2026 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2027 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2028 creg = mono_alloc_ireg (cfg);
2029 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2030 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2033 cfg->flags |= MONO_CFG_HAS_FPOUT;
2035 MONO_INST_NEW (cfg, ins, OP_MOVE);
2036 ins->dreg = mono_alloc_ireg (cfg);
2037 ins->sreg1 = in->dreg;
2038 MONO_ADD_INS (cfg->cbb, ins);
2040 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2043 case RegTypeStructByAddr:
2046 /* FIXME: where si the data allocated? */
2047 arg->backend.reg3 = ainfo->reg;
2048 call->used_iregs |= 1 << ainfo->reg;
2049 g_assert_not_reached ();
2052 case RegTypeStructByVal:
2053 case RegTypeGSharedVtInReg:
2054 case RegTypeGSharedVtOnStack:
2055 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2056 ins->opcode = OP_OUTARG_VT;
2057 ins->sreg1 = in->dreg;
2058 ins->klass = in->klass;
2059 ins->inst_p0 = call;
2060 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2061 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2062 mono_call_inst_add_outarg_vt (cfg, call, ins);
2063 MONO_ADD_INS (cfg->cbb, ins);
2066 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2067 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2068 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2069 if (t->type == MONO_TYPE_R8) {
2070 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2073 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2075 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2078 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2081 case RegTypeBaseGen:
2082 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2083 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2084 MONO_INST_NEW (cfg, ins, OP_MOVE);
2085 ins->dreg = mono_alloc_ireg (cfg);
2086 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2087 MONO_ADD_INS (cfg->cbb, ins);
2088 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2089 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2092 /* This should work for soft-float as well */
2094 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2095 creg = mono_alloc_ireg (cfg);
2096 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2097 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2098 creg = mono_alloc_ireg (cfg);
2099 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2100 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2101 cfg->flags |= MONO_CFG_HAS_FPOUT;
2103 g_assert_not_reached ();
2110 arg->backend.reg3 = ainfo->reg;
2111 /* FP args are passed in int regs */
2112 call->used_iregs |= 1 << ainfo->reg;
2113 if (ainfo->size == 8) {
2114 arg->opcode = OP_OUTARG_R8;
2115 call->used_iregs |= 1 << (ainfo->reg + 1);
2117 arg->opcode = OP_OUTARG_R4;
2120 cfg->flags |= MONO_CFG_HAS_FPOUT;
2124 g_assert_not_reached ();
2128 /* Handle the case where there are no implicit arguments */
2129 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2130 emit_sig_cookie (cfg, call, cinfo);
2132 if (cinfo->ret.storage == RegTypeStructByVal) {
2133 /* The JIT will transform this into a normal call */
2134 call->vret_in_reg = TRUE;
2135 } else if (cinfo->vtype_retaddr) {
2137 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2138 vtarg->sreg1 = call->vret_var->dreg;
2139 vtarg->dreg = mono_alloc_preg (cfg);
2140 MONO_ADD_INS (cfg->cbb, vtarg);
2142 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2145 call->stack_usage = cinfo->stack_usage;
2151 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2153 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2154 ArgInfo *ainfo = ins->inst_p1;
2155 int ovf_size = ainfo->vtsize;
2156 int doffset = ainfo->offset;
2157 int struct_size = ainfo->struct_size;
2158 int i, soffset, dreg, tmpreg;
2160 if (ainfo->storage == RegTypeGSharedVtInReg) {
2162 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2165 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2166 /* Pass by addr on stack */
2167 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2172 for (i = 0; i < ainfo->size; ++i) {
2173 dreg = mono_alloc_ireg (cfg);
2174 switch (struct_size) {
2176 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2179 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2182 tmpreg = mono_alloc_ireg (cfg);
2183 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2184 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2185 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2186 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2187 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2188 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2189 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2192 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2195 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2196 soffset += sizeof (gpointer);
2197 struct_size -= sizeof (gpointer);
2199 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2201 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2205 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2207 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2210 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2213 if (COMPILE_LLVM (cfg)) {
2214 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2216 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2217 ins->sreg1 = val->dreg + 1;
2218 ins->sreg2 = val->dreg + 2;
2219 MONO_ADD_INS (cfg->cbb, ins);
2224 case MONO_ARM_FPU_NONE:
2225 if (ret->type == MONO_TYPE_R8) {
2228 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2229 ins->dreg = cfg->ret->dreg;
2230 ins->sreg1 = val->dreg;
2231 MONO_ADD_INS (cfg->cbb, ins);
2234 if (ret->type == MONO_TYPE_R4) {
2235 /* Already converted to an int in method_to_ir () */
2236 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2240 case MONO_ARM_FPU_VFP:
2241 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2244 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2245 ins->dreg = cfg->ret->dreg;
2246 ins->sreg1 = val->dreg;
2247 MONO_ADD_INS (cfg->cbb, ins);
2252 g_assert_not_reached ();
2256 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2259 #endif /* #ifndef DISABLE_JIT */
2262 mono_arch_is_inst_imm (gint64 imm)
2267 #define DYN_CALL_STACK_ARGS 6
2270 MonoMethodSignature *sig;
2275 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2281 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2285 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2288 switch (cinfo->ret.storage) {
2290 case RegTypeGeneral:
2291 case RegTypeIRegPair:
2292 case RegTypeStructByAddr:
2303 for (i = 0; i < cinfo->nargs; ++i) {
2304 switch (cinfo->args [i].storage) {
2305 case RegTypeGeneral:
2307 case RegTypeIRegPair:
2310 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2313 case RegTypeStructByVal:
2314 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2322 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2323 for (i = 0; i < sig->param_count; ++i) {
2324 MonoType *t = sig->params [i];
2350 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2352 ArchDynCallInfo *info;
2355 cinfo = get_call_info (NULL, NULL, sig);
2357 if (!dyn_call_supported (cinfo, sig)) {
2362 info = g_new0 (ArchDynCallInfo, 1);
2363 // FIXME: Preprocess the info to speed up start_dyn_call ()
2365 info->cinfo = cinfo;
2367 return (MonoDynCallInfo*)info;
2371 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2373 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2375 g_free (ainfo->cinfo);
2380 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2382 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2383 DynCallArgs *p = (DynCallArgs*)buf;
2384 int arg_index, greg, i, j, pindex;
2385 MonoMethodSignature *sig = dinfo->sig;
2387 g_assert (buf_len >= sizeof (DynCallArgs));
2396 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2397 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2402 if (dinfo->cinfo->vtype_retaddr)
2403 p->regs [greg ++] = (mgreg_t)ret;
2405 for (i = pindex; i < sig->param_count; i++) {
2406 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2407 gpointer *arg = args [arg_index ++];
2408 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2411 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2413 else if (ainfo->storage == RegTypeBase)
2414 slot = PARAM_REGS + (ainfo->offset / 4);
2416 g_assert_not_reached ();
2419 p->regs [slot] = (mgreg_t)*arg;
2424 case MONO_TYPE_STRING:
2425 case MONO_TYPE_CLASS:
2426 case MONO_TYPE_ARRAY:
2427 case MONO_TYPE_SZARRAY:
2428 case MONO_TYPE_OBJECT:
2432 p->regs [slot] = (mgreg_t)*arg;
2434 case MONO_TYPE_BOOLEAN:
2436 p->regs [slot] = *(guint8*)arg;
2439 p->regs [slot] = *(gint8*)arg;
2442 p->regs [slot] = *(gint16*)arg;
2445 case MONO_TYPE_CHAR:
2446 p->regs [slot] = *(guint16*)arg;
2449 p->regs [slot] = *(gint32*)arg;
2452 p->regs [slot] = *(guint32*)arg;
2456 p->regs [slot ++] = (mgreg_t)arg [0];
2457 p->regs [slot] = (mgreg_t)arg [1];
2460 p->regs [slot] = *(mgreg_t*)arg;
2463 p->regs [slot ++] = (mgreg_t)arg [0];
2464 p->regs [slot] = (mgreg_t)arg [1];
2466 case MONO_TYPE_GENERICINST:
2467 if (MONO_TYPE_IS_REFERENCE (t)) {
2468 p->regs [slot] = (mgreg_t)*arg;
2473 case MONO_TYPE_VALUETYPE:
2474 g_assert (ainfo->storage == RegTypeStructByVal);
2476 if (ainfo->size == 0)
2477 slot = PARAM_REGS + (ainfo->offset / 4);
2481 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2482 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2485 g_assert_not_reached ();
2491 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2493 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2494 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2495 guint8 *ret = ((DynCallArgs*)buf)->ret;
2496 mgreg_t res = ((DynCallArgs*)buf)->res;
2497 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2499 switch (mono_type_get_underlying_type (sig->ret)->type) {
2500 case MONO_TYPE_VOID:
2501 *(gpointer*)ret = NULL;
2503 case MONO_TYPE_STRING:
2504 case MONO_TYPE_CLASS:
2505 case MONO_TYPE_ARRAY:
2506 case MONO_TYPE_SZARRAY:
2507 case MONO_TYPE_OBJECT:
2511 *(gpointer*)ret = (gpointer)res;
2517 case MONO_TYPE_BOOLEAN:
2518 *(guint8*)ret = res;
2521 *(gint16*)ret = res;
2524 case MONO_TYPE_CHAR:
2525 *(guint16*)ret = res;
2528 *(gint32*)ret = res;
2531 *(guint32*)ret = res;
2535 /* This handles endianness as well */
2536 ((gint32*)ret) [0] = res;
2537 ((gint32*)ret) [1] = res2;
2539 case MONO_TYPE_GENERICINST:
2540 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2541 *(gpointer*)ret = (gpointer)res;
2546 case MONO_TYPE_VALUETYPE:
2547 g_assert (ainfo->cinfo->vtype_retaddr);
2552 *(float*)ret = *(float*)&res;
2554 case MONO_TYPE_R8: {
2561 *(double*)ret = *(double*)®s;
2565 g_assert_not_reached ();
2572 * Allow tracing to work with this interface (with an optional argument)
2576 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2580 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2581 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2582 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2583 code = emit_call_reg (code, ARMREG_R2);
2596 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2599 int save_mode = SAVE_NONE;
2601 MonoMethod *method = cfg->method;
2602 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2603 int save_offset = cfg->param_area;
2607 offset = code - cfg->native_code;
2608 /* we need about 16 instructions */
2609 if (offset > (cfg->code_size - 16 * 4)) {
2610 cfg->code_size *= 2;
2611 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2612 code = cfg->native_code + offset;
2615 case MONO_TYPE_VOID:
2616 /* special case string .ctor icall */
2617 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2618 save_mode = SAVE_ONE;
2620 save_mode = SAVE_NONE;
2624 save_mode = SAVE_TWO;
2628 save_mode = SAVE_FP;
2630 case MONO_TYPE_VALUETYPE:
2631 save_mode = SAVE_STRUCT;
2634 save_mode = SAVE_ONE;
2638 switch (save_mode) {
2640 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2641 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2642 if (enable_arguments) {
2643 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2644 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2648 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2649 if (enable_arguments) {
2650 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2654 /* FIXME: what reg? */
2655 if (enable_arguments) {
2656 /* FIXME: what reg? */
2660 if (enable_arguments) {
2661 /* FIXME: get the actual address */
2662 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2670 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2671 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2672 code = emit_call_reg (code, ARMREG_IP);
2674 switch (save_mode) {
2676 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2677 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2680 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2694 * The immediate field for cond branches is big enough for all reasonable methods
2696 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2697 if (0 && ins->inst_true_bb->native_offset) { \
2698 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2700 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2701 ARM_B_COND (code, (condcode), 0); \
2704 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2706 /* emit an exception if condition is fail
2708 * We assign the extra code used to throw the implicit exceptions
2709 * to cfg->bb_exit as far as the big branch handling is concerned
2711 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2713 mono_add_patch_info (cfg, code - cfg->native_code, \
2714 MONO_PATCH_INFO_EXC, exc_name); \
2715 ARM_BL_COND (code, (condcode), 0); \
2718 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2721 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2726 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2728 MonoInst *ins, *n, *last_ins = NULL;
2730 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2731 switch (ins->opcode) {
2734 /* Already done by an arch-independent pass */
2736 case OP_LOAD_MEMBASE:
2737 case OP_LOADI4_MEMBASE:
2739 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2740 * OP_LOAD_MEMBASE offset(basereg), reg
2742 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2743 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2744 ins->inst_basereg == last_ins->inst_destbasereg &&
2745 ins->inst_offset == last_ins->inst_offset) {
2746 if (ins->dreg == last_ins->sreg1) {
2747 MONO_DELETE_INS (bb, ins);
2750 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2751 ins->opcode = OP_MOVE;
2752 ins->sreg1 = last_ins->sreg1;
2756 * Note: reg1 must be different from the basereg in the second load
2757 * OP_LOAD_MEMBASE offset(basereg), reg1
2758 * OP_LOAD_MEMBASE offset(basereg), reg2
2760 * OP_LOAD_MEMBASE offset(basereg), reg1
2761 * OP_MOVE reg1, reg2
2763 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2764 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2765 ins->inst_basereg != last_ins->dreg &&
2766 ins->inst_basereg == last_ins->inst_basereg &&
2767 ins->inst_offset == last_ins->inst_offset) {
2769 if (ins->dreg == last_ins->dreg) {
2770 MONO_DELETE_INS (bb, ins);
2773 ins->opcode = OP_MOVE;
2774 ins->sreg1 = last_ins->dreg;
2777 //g_assert_not_reached ();
2781 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2782 * OP_LOAD_MEMBASE offset(basereg), reg
2784 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2785 * OP_ICONST reg, imm
2787 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2788 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2789 ins->inst_basereg == last_ins->inst_destbasereg &&
2790 ins->inst_offset == last_ins->inst_offset) {
2791 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2792 ins->opcode = OP_ICONST;
2793 ins->inst_c0 = last_ins->inst_imm;
2794 g_assert_not_reached (); // check this rule
2798 case OP_LOADU1_MEMBASE:
2799 case OP_LOADI1_MEMBASE:
2800 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2801 ins->inst_basereg == last_ins->inst_destbasereg &&
2802 ins->inst_offset == last_ins->inst_offset) {
2803 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2804 ins->sreg1 = last_ins->sreg1;
2807 case OP_LOADU2_MEMBASE:
2808 case OP_LOADI2_MEMBASE:
2809 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2810 ins->inst_basereg == last_ins->inst_destbasereg &&
2811 ins->inst_offset == last_ins->inst_offset) {
2812 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2813 ins->sreg1 = last_ins->sreg1;
2817 ins->opcode = OP_MOVE;
2821 if (ins->dreg == ins->sreg1) {
2822 MONO_DELETE_INS (bb, ins);
2826 * OP_MOVE sreg, dreg
2827 * OP_MOVE dreg, sreg
2829 if (last_ins && last_ins->opcode == OP_MOVE &&
2830 ins->sreg1 == last_ins->dreg &&
2831 ins->dreg == last_ins->sreg1) {
2832 MONO_DELETE_INS (bb, ins);
2840 bb->last_ins = last_ins;
2844 * the branch_cc_table should maintain the order of these
2858 branch_cc_table [] = {
2872 #define ADD_NEW_INS(cfg,dest,op) do { \
2873 MONO_INST_NEW ((cfg), (dest), (op)); \
2874 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2878 map_to_reg_reg_op (int op)
2887 case OP_COMPARE_IMM:
2889 case OP_ICOMPARE_IMM:
2903 case OP_LOAD_MEMBASE:
2904 return OP_LOAD_MEMINDEX;
2905 case OP_LOADI4_MEMBASE:
2906 return OP_LOADI4_MEMINDEX;
2907 case OP_LOADU4_MEMBASE:
2908 return OP_LOADU4_MEMINDEX;
2909 case OP_LOADU1_MEMBASE:
2910 return OP_LOADU1_MEMINDEX;
2911 case OP_LOADI2_MEMBASE:
2912 return OP_LOADI2_MEMINDEX;
2913 case OP_LOADU2_MEMBASE:
2914 return OP_LOADU2_MEMINDEX;
2915 case OP_LOADI1_MEMBASE:
2916 return OP_LOADI1_MEMINDEX;
2917 case OP_STOREI1_MEMBASE_REG:
2918 return OP_STOREI1_MEMINDEX;
2919 case OP_STOREI2_MEMBASE_REG:
2920 return OP_STOREI2_MEMINDEX;
2921 case OP_STOREI4_MEMBASE_REG:
2922 return OP_STOREI4_MEMINDEX;
2923 case OP_STORE_MEMBASE_REG:
2924 return OP_STORE_MEMINDEX;
2925 case OP_STORER4_MEMBASE_REG:
2926 return OP_STORER4_MEMINDEX;
2927 case OP_STORER8_MEMBASE_REG:
2928 return OP_STORER8_MEMINDEX;
2929 case OP_STORE_MEMBASE_IMM:
2930 return OP_STORE_MEMBASE_REG;
2931 case OP_STOREI1_MEMBASE_IMM:
2932 return OP_STOREI1_MEMBASE_REG;
2933 case OP_STOREI2_MEMBASE_IMM:
2934 return OP_STOREI2_MEMBASE_REG;
2935 case OP_STOREI4_MEMBASE_IMM:
2936 return OP_STOREI4_MEMBASE_REG;
2938 g_assert_not_reached ();
2942 * Remove from the instruction list the instructions that can't be
2943 * represented with very simple instructions with no register
2947 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2949 MonoInst *ins, *temp, *last_ins = NULL;
2950 int rot_amount, imm8, low_imm;
2952 MONO_BB_FOR_EACH_INS (bb, ins) {
2954 switch (ins->opcode) {
2958 case OP_COMPARE_IMM:
2959 case OP_ICOMPARE_IMM:
2973 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2974 ADD_NEW_INS (cfg, temp, OP_ICONST);
2975 temp->inst_c0 = ins->inst_imm;
2976 temp->dreg = mono_alloc_ireg (cfg);
2977 ins->sreg2 = temp->dreg;
2978 ins->opcode = mono_op_imm_to_op (ins->opcode);
2980 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2986 if (ins->inst_imm == 1) {
2987 ins->opcode = OP_MOVE;
2990 if (ins->inst_imm == 0) {
2991 ins->opcode = OP_ICONST;
2995 imm8 = mono_is_power_of_two (ins->inst_imm);
2997 ins->opcode = OP_SHL_IMM;
2998 ins->inst_imm = imm8;
3001 ADD_NEW_INS (cfg, temp, OP_ICONST);
3002 temp->inst_c0 = ins->inst_imm;
3003 temp->dreg = mono_alloc_ireg (cfg);
3004 ins->sreg2 = temp->dreg;
3005 ins->opcode = OP_IMUL;
3011 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3012 /* ARM sets the C flag to 1 if there was _no_ overflow */
3013 ins->next->opcode = OP_COND_EXC_NC;
3016 case OP_IDIV_UN_IMM:
3018 case OP_IREM_UN_IMM:
3019 ADD_NEW_INS (cfg, temp, OP_ICONST);
3020 temp->inst_c0 = ins->inst_imm;
3021 temp->dreg = mono_alloc_ireg (cfg);
3022 ins->sreg2 = temp->dreg;
3023 ins->opcode = mono_op_imm_to_op (ins->opcode);
3025 case OP_LOCALLOC_IMM:
3026 ADD_NEW_INS (cfg, temp, OP_ICONST);
3027 temp->inst_c0 = ins->inst_imm;
3028 temp->dreg = mono_alloc_ireg (cfg);
3029 ins->sreg1 = temp->dreg;
3030 ins->opcode = OP_LOCALLOC;
3032 case OP_LOAD_MEMBASE:
3033 case OP_LOADI4_MEMBASE:
3034 case OP_LOADU4_MEMBASE:
3035 case OP_LOADU1_MEMBASE:
3036 /* we can do two things: load the immed in a register
3037 * and use an indexed load, or see if the immed can be
3038 * represented as an ad_imm + a load with a smaller offset
3039 * that fits. We just do the first for now, optimize later.
3041 if (arm_is_imm12 (ins->inst_offset))
3043 ADD_NEW_INS (cfg, temp, OP_ICONST);
3044 temp->inst_c0 = ins->inst_offset;
3045 temp->dreg = mono_alloc_ireg (cfg);
3046 ins->sreg2 = temp->dreg;
3047 ins->opcode = map_to_reg_reg_op (ins->opcode);
3049 case OP_LOADI2_MEMBASE:
3050 case OP_LOADU2_MEMBASE:
3051 case OP_LOADI1_MEMBASE:
3052 if (arm_is_imm8 (ins->inst_offset))
3054 ADD_NEW_INS (cfg, temp, OP_ICONST);
3055 temp->inst_c0 = ins->inst_offset;
3056 temp->dreg = mono_alloc_ireg (cfg);
3057 ins->sreg2 = temp->dreg;
3058 ins->opcode = map_to_reg_reg_op (ins->opcode);
3060 case OP_LOADR4_MEMBASE:
3061 case OP_LOADR8_MEMBASE:
3062 if (arm_is_fpimm8 (ins->inst_offset))
3064 low_imm = ins->inst_offset & 0x1ff;
3065 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3066 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3067 temp->inst_imm = ins->inst_offset & ~0x1ff;
3068 temp->sreg1 = ins->inst_basereg;
3069 temp->dreg = mono_alloc_ireg (cfg);
3070 ins->inst_basereg = temp->dreg;
3071 ins->inst_offset = low_imm;
3075 ADD_NEW_INS (cfg, temp, OP_ICONST);
3076 temp->inst_c0 = ins->inst_offset;
3077 temp->dreg = mono_alloc_ireg (cfg);
3079 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3080 add_ins->sreg1 = ins->inst_basereg;
3081 add_ins->sreg2 = temp->dreg;
3082 add_ins->dreg = mono_alloc_ireg (cfg);
3084 ins->inst_basereg = add_ins->dreg;
3085 ins->inst_offset = 0;
3088 case OP_STORE_MEMBASE_REG:
3089 case OP_STOREI4_MEMBASE_REG:
3090 case OP_STOREI1_MEMBASE_REG:
3091 if (arm_is_imm12 (ins->inst_offset))
3093 ADD_NEW_INS (cfg, temp, OP_ICONST);
3094 temp->inst_c0 = ins->inst_offset;
3095 temp->dreg = mono_alloc_ireg (cfg);
3096 ins->sreg2 = temp->dreg;
3097 ins->opcode = map_to_reg_reg_op (ins->opcode);
3099 case OP_STOREI2_MEMBASE_REG:
3100 if (arm_is_imm8 (ins->inst_offset))
3102 ADD_NEW_INS (cfg, temp, OP_ICONST);
3103 temp->inst_c0 = ins->inst_offset;
3104 temp->dreg = mono_alloc_ireg (cfg);
3105 ins->sreg2 = temp->dreg;
3106 ins->opcode = map_to_reg_reg_op (ins->opcode);
3108 case OP_STORER4_MEMBASE_REG:
3109 case OP_STORER8_MEMBASE_REG:
3110 if (arm_is_fpimm8 (ins->inst_offset))
3112 low_imm = ins->inst_offset & 0x1ff;
3113 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3114 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3115 temp->inst_imm = ins->inst_offset & ~0x1ff;
3116 temp->sreg1 = ins->inst_destbasereg;
3117 temp->dreg = mono_alloc_ireg (cfg);
3118 ins->inst_destbasereg = temp->dreg;
3119 ins->inst_offset = low_imm;
3123 ADD_NEW_INS (cfg, temp, OP_ICONST);
3124 temp->inst_c0 = ins->inst_offset;
3125 temp->dreg = mono_alloc_ireg (cfg);
3127 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3128 add_ins->sreg1 = ins->inst_destbasereg;
3129 add_ins->sreg2 = temp->dreg;
3130 add_ins->dreg = mono_alloc_ireg (cfg);
3132 ins->inst_destbasereg = add_ins->dreg;
3133 ins->inst_offset = 0;
3136 case OP_STORE_MEMBASE_IMM:
3137 case OP_STOREI1_MEMBASE_IMM:
3138 case OP_STOREI2_MEMBASE_IMM:
3139 case OP_STOREI4_MEMBASE_IMM:
3140 ADD_NEW_INS (cfg, temp, OP_ICONST);
3141 temp->inst_c0 = ins->inst_imm;
3142 temp->dreg = mono_alloc_ireg (cfg);
3143 ins->sreg1 = temp->dreg;
3144 ins->opcode = map_to_reg_reg_op (ins->opcode);
3146 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3148 gboolean swap = FALSE;
3152 /* Optimized away */
3157 /* Some fp compares require swapped operands */
3158 switch (ins->next->opcode) {
3160 ins->next->opcode = OP_FBLT;
3164 ins->next->opcode = OP_FBLT_UN;
3168 ins->next->opcode = OP_FBGE;
3172 ins->next->opcode = OP_FBGE_UN;
3180 ins->sreg1 = ins->sreg2;
3189 bb->last_ins = last_ins;
3190 bb->max_vreg = cfg->next_vreg;
3194 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3198 if (long_ins->opcode == OP_LNEG) {
3200 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3201 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3207 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3209 /* sreg is a float, dreg is an integer reg */
3212 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
3214 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
3215 ARM_FMRS (code, dreg, ARM_VFP_F0);
3219 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3220 else if (size == 2) {
3221 ARM_SHL_IMM (code, dreg, dreg, 16);
3222 ARM_SHR_IMM (code, dreg, dreg, 16);
3226 ARM_SHL_IMM (code, dreg, dreg, 24);
3227 ARM_SAR_IMM (code, dreg, dreg, 24);
3228 } else if (size == 2) {
3229 ARM_SHL_IMM (code, dreg, dreg, 16);
3230 ARM_SAR_IMM (code, dreg, dreg, 16);
3236 #endif /* #ifndef DISABLE_JIT */
3240 const guchar *target;
3245 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3248 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3249 PatchData *pdata = (PatchData*)user_data;
3250 guchar *code = data;
3251 guint32 *thunks = data;
3252 guint32 *endthunks = (guint32*)(code + bsize);
3254 int difflow, diffhigh;
3256 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3257 difflow = (char*)pdata->code - (char*)thunks;
3258 diffhigh = (char*)pdata->code - (char*)endthunks;
3259 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3263 * The thunk is composed of 3 words:
3264 * load constant from thunks [2] into ARM_IP
3267 * Note that the LR register is already setup
3269 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3270 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3271 while (thunks < endthunks) {
3272 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3273 if (thunks [2] == (guint32)pdata->target) {
3274 arm_patch (pdata->code, (guchar*)thunks);
3275 mono_arch_flush_icache (pdata->code, 4);
3278 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3279 /* found a free slot instead: emit thunk */
3280 /* ARMREG_IP is fine to use since this can't be an IMT call
3283 code = (guchar*)thunks;
3284 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3285 if (thumb_supported)
3286 ARM_BX (code, ARMREG_IP);
3288 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3289 thunks [2] = (guint32)pdata->target;
3290 mono_arch_flush_icache ((guchar*)thunks, 12);
3292 arm_patch (pdata->code, (guchar*)thunks);
3293 mono_arch_flush_icache (pdata->code, 4);
3297 /* skip 12 bytes, the size of the thunk */
3301 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3307 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3312 domain = mono_domain_get ();
3315 pdata.target = target;
3316 pdata.absolute = absolute;
3320 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3323 if (pdata.found != 1) {
3324 mono_domain_lock (domain);
3325 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3328 /* this uses the first available slot */
3330 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3332 mono_domain_unlock (domain);
3335 if (pdata.found != 1) {
3337 GHashTableIter iter;
3338 MonoJitDynamicMethodInfo *ji;
3341 * This might be a dynamic method, search its code manager. We can only
3342 * use the dynamic method containing CODE, since the others might be freed later.
3346 mono_domain_lock (domain);
3347 hash = domain_jit_info (domain)->dynamic_code_hash;
3349 /* FIXME: Speed this up */
3350 g_hash_table_iter_init (&iter, hash);
3351 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3352 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3353 if (pdata.found == 1)
3357 mono_domain_unlock (domain);
3359 if (pdata.found != 1)
3360 g_print ("thunk failed for %p from %p\n", target, code);
3361 g_assert (pdata.found == 1);
3365 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3367 guint32 *code32 = (void*)code;
3368 guint32 ins = *code32;
3369 guint32 prim = (ins >> 25) & 7;
3370 guint32 tval = GPOINTER_TO_UINT (target);
3372 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3373 if (prim == 5) { /* 101b */
3374 /* the diff starts 8 bytes from the branch opcode */
3375 gint diff = target - code - 8;
3377 gint tmask = 0xffffffff;
3378 if (tval & 1) { /* entering thumb mode */
3379 diff = target - 1 - code - 8;
3380 g_assert (thumb_supported);
3381 tbits = 0xf << 28; /* bl->blx bit pattern */
3382 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3383 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3387 tmask = ~(1 << 24); /* clear the link bit */
3388 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3393 if (diff <= 33554431) {
3395 ins = (ins & 0xff000000) | diff;
3397 *code32 = ins | tbits;
3401 /* diff between 0 and -33554432 */
3402 if (diff >= -33554432) {
3404 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3406 *code32 = ins | tbits;
3411 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3415 #ifdef USE_JUMP_TABLES
3417 gpointer *jte = mono_jumptable_get_entry (code);
3419 jte [0] = (gpointer) target;
3423 * The alternative call sequences looks like this:
3425 * ldr ip, [pc] // loads the address constant
3426 * b 1f // jumps around the constant
3427 * address constant embedded in the code
3432 * There are two cases for patching:
3433 * a) at the end of method emission: in this case code points to the start
3434 * of the call sequence
3435 * b) during runtime patching of the call site: in this case code points
3436 * to the mov pc, ip instruction
3438 * We have to handle also the thunk jump code sequence:
3442 * address constant // execution never reaches here
3444 if ((ins & 0x0ffffff0) == 0x12fff10) {
3445 /* Branch and exchange: the address is constructed in a reg
3446 * We can patch BX when the code sequence is the following:
3447 * ldr ip, [pc, #0] ; 0x8
3454 guint8 *emit = (guint8*)ccode;
3455 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3457 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3458 ARM_BX (emit, ARMREG_IP);
3460 /*patching from magic trampoline*/
3461 if (ins == ccode [3]) {
3462 g_assert (code32 [-4] == ccode [0]);
3463 g_assert (code32 [-3] == ccode [1]);
3464 g_assert (code32 [-1] == ccode [2]);
3465 code32 [-2] = (guint32)target;
3468 /*patching from JIT*/
3469 if (ins == ccode [0]) {
3470 g_assert (code32 [1] == ccode [1]);
3471 g_assert (code32 [3] == ccode [2]);
3472 g_assert (code32 [4] == ccode [3]);
3473 code32 [2] = (guint32)target;
3476 g_assert_not_reached ();
3477 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3485 guint8 *emit = (guint8*)ccode;
3486 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3488 ARM_BLX_REG (emit, ARMREG_IP);
3490 g_assert (code32 [-3] == ccode [0]);
3491 g_assert (code32 [-2] == ccode [1]);
3492 g_assert (code32 [0] == ccode [2]);
3494 code32 [-1] = (guint32)target;
3497 guint32 *tmp = ccode;
3498 guint8 *emit = (guint8*)tmp;
3499 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3500 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3501 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3502 ARM_BX (emit, ARMREG_IP);
3503 if (ins == ccode [2]) {
3504 g_assert_not_reached (); // should be -2 ...
3505 code32 [-1] = (guint32)target;
3508 if (ins == ccode [0]) {
3509 /* handles both thunk jump code and the far call sequence */
3510 code32 [2] = (guint32)target;
3513 g_assert_not_reached ();
3515 // g_print ("patched with 0x%08x\n", ins);
3520 arm_patch (guchar *code, const guchar *target)
3522 arm_patch_general (NULL, code, target, NULL);
3526 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3527 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3528 * to be used with the emit macros.
3529 * Return -1 otherwise.
3532 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3535 for (i = 0; i < 31; i+= 2) {
3536 res = (val << (32 - i)) | (val >> i);
3539 *rot_amount = i? 32 - i: 0;
3546 * Emits in code a sequence of instructions that load the value 'val'
3547 * into the dreg register. Uses at most 4 instructions.
3550 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3552 int imm8, rot_amount;
3554 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3555 /* skip the constant pool */
3561 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3562 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3563 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3564 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3567 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3569 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3573 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3575 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3577 if (val & 0xFF0000) {
3578 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3580 if (val & 0xFF000000) {
3581 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3583 } else if (val & 0xFF00) {
3584 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3585 if (val & 0xFF0000) {
3586 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3588 if (val & 0xFF000000) {
3589 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3591 } else if (val & 0xFF0000) {
3592 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3593 if (val & 0xFF000000) {
3594 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3597 //g_assert_not_reached ();
3603 mono_arm_thumb_supported (void)
3605 return thumb_supported;
3611 * emit_load_volatile_arguments:
3613 * Load volatile arguments from the stack to the original input registers.
3614 * Required before a tail call.
3617 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3619 MonoMethod *method = cfg->method;
3620 MonoMethodSignature *sig;
3625 /* FIXME: Generate intermediate code instead */
3627 sig = mono_method_signature (method);
3629 /* This is the opposite of the code in emit_prolog */
3633 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3635 if (cinfo->vtype_retaddr) {
3636 ArgInfo *ainfo = &cinfo->ret;
3637 inst = cfg->vret_addr;
3638 g_assert (arm_is_imm12 (inst->inst_offset));
3639 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3641 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3642 ArgInfo *ainfo = cinfo->args + i;
3643 inst = cfg->args [pos];
3645 if (cfg->verbose_level > 2)
3646 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3647 if (inst->opcode == OP_REGVAR) {
3648 if (ainfo->storage == RegTypeGeneral)
3649 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3650 else if (ainfo->storage == RegTypeFP) {
3651 g_assert_not_reached ();
3652 } else if (ainfo->storage == RegTypeBase) {
3656 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3657 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3659 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3660 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3664 g_assert_not_reached ();
3666 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3667 switch (ainfo->size) {
3674 g_assert (arm_is_imm12 (inst->inst_offset));
3675 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3676 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3677 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3680 if (arm_is_imm12 (inst->inst_offset)) {
3681 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3683 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3684 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3688 } else if (ainfo->storage == RegTypeBaseGen) {
3691 } else if (ainfo->storage == RegTypeBase) {
3693 } else if (ainfo->storage == RegTypeFP) {
3694 g_assert_not_reached ();
3695 } else if (ainfo->storage == RegTypeStructByVal) {
3696 int doffset = inst->inst_offset;
3700 if (mono_class_from_mono_type (inst->inst_vtype))
3701 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3702 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3703 if (arm_is_imm12 (doffset)) {
3704 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3706 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3707 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3709 soffset += sizeof (gpointer);
3710 doffset += sizeof (gpointer);
3715 } else if (ainfo->storage == RegTypeStructByAddr) {
3730 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3735 guint8 *code = cfg->native_code + cfg->code_len;
3736 MonoInst *last_ins = NULL;
3737 guint last_offset = 0;
3739 int imm8, rot_amount;
3741 /* we don't align basic blocks of loops on arm */
3743 if (cfg->verbose_level > 2)
3744 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3746 cpos = bb->max_offset;
3748 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3749 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3750 //g_assert (!mono_compile_aot);
3753 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3754 /* this is not thread save, but good enough */
3755 /* fixme: howto handle overflows? */
3756 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3759 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3760 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3761 (gpointer)"mono_break");
3762 code = emit_call_seq (cfg, code);
3765 MONO_BB_FOR_EACH_INS (bb, ins) {
3766 offset = code - cfg->native_code;
3768 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3770 if (offset > (cfg->code_size - max_len - 16)) {
3771 cfg->code_size *= 2;
3772 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3773 code = cfg->native_code + offset;
3775 // if (ins->cil_code)
3776 // g_print ("cil code\n");
3777 mono_debug_record_line_number (cfg, ins, offset);
3779 switch (ins->opcode) {
3780 case OP_MEMORY_BARRIER:
3782 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
3783 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
3787 #ifdef HAVE_AEABI_READ_TP
3788 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3789 (gpointer)"__aeabi_read_tp");
3790 code = emit_call_seq (cfg, code);
3792 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3794 g_assert_not_reached ();
3798 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3799 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3802 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3803 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3805 case OP_STOREI1_MEMBASE_IMM:
3806 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3807 g_assert (arm_is_imm12 (ins->inst_offset));
3808 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3810 case OP_STOREI2_MEMBASE_IMM:
3811 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3812 g_assert (arm_is_imm8 (ins->inst_offset));
3813 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3815 case OP_STORE_MEMBASE_IMM:
3816 case OP_STOREI4_MEMBASE_IMM:
3817 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3818 g_assert (arm_is_imm12 (ins->inst_offset));
3819 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3821 case OP_STOREI1_MEMBASE_REG:
3822 g_assert (arm_is_imm12 (ins->inst_offset));
3823 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3825 case OP_STOREI2_MEMBASE_REG:
3826 g_assert (arm_is_imm8 (ins->inst_offset));
3827 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3829 case OP_STORE_MEMBASE_REG:
3830 case OP_STOREI4_MEMBASE_REG:
3831 /* this case is special, since it happens for spill code after lowering has been called */
3832 if (arm_is_imm12 (ins->inst_offset)) {
3833 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3835 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3836 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3839 case OP_STOREI1_MEMINDEX:
3840 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3842 case OP_STOREI2_MEMINDEX:
3843 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3845 case OP_STORE_MEMINDEX:
3846 case OP_STOREI4_MEMINDEX:
3847 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3850 g_assert_not_reached ();
3852 case OP_LOAD_MEMINDEX:
3853 case OP_LOADI4_MEMINDEX:
3854 case OP_LOADU4_MEMINDEX:
3855 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3857 case OP_LOADI1_MEMINDEX:
3858 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3860 case OP_LOADU1_MEMINDEX:
3861 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3863 case OP_LOADI2_MEMINDEX:
3864 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3866 case OP_LOADU2_MEMINDEX:
3867 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3869 case OP_LOAD_MEMBASE:
3870 case OP_LOADI4_MEMBASE:
3871 case OP_LOADU4_MEMBASE:
3872 /* this case is special, since it happens for spill code after lowering has been called */
3873 if (arm_is_imm12 (ins->inst_offset)) {
3874 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3876 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3877 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3880 case OP_LOADI1_MEMBASE:
3881 g_assert (arm_is_imm8 (ins->inst_offset));
3882 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3884 case OP_LOADU1_MEMBASE:
3885 g_assert (arm_is_imm12 (ins->inst_offset));
3886 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3888 case OP_LOADU2_MEMBASE:
3889 g_assert (arm_is_imm8 (ins->inst_offset));
3890 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3892 case OP_LOADI2_MEMBASE:
3893 g_assert (arm_is_imm8 (ins->inst_offset));
3894 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3896 case OP_ICONV_TO_I1:
3897 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3898 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3900 case OP_ICONV_TO_I2:
3901 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3902 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3904 case OP_ICONV_TO_U1:
3905 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3907 case OP_ICONV_TO_U2:
3908 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3909 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3913 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3915 case OP_COMPARE_IMM:
3916 case OP_ICOMPARE_IMM:
3917 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3918 g_assert (imm8 >= 0);
3919 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3923 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3924 * So instead of emitting a trap, we emit a call a C function and place a
3927 //*(int*)code = 0xef9f0001;
3930 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3931 (gpointer)"mono_break");
3932 code = emit_call_seq (cfg, code);
3934 case OP_RELAXED_NOP:
3939 case OP_DUMMY_STORE:
3940 case OP_NOT_REACHED:
3943 case OP_SEQ_POINT: {
3945 MonoInst *info_var = cfg->arch.seq_point_info_var;
3946 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3947 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
3948 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
3949 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
3951 int dreg = ARMREG_LR;
3953 if (cfg->soft_breakpoints) {
3954 g_assert (!cfg->compile_aot);
3958 * For AOT, we use one got slot per method, which will point to a
3959 * SeqPointInfo structure, containing all the information required
3960 * by the code below.
3962 if (cfg->compile_aot) {
3963 g_assert (info_var);
3964 g_assert (info_var->opcode == OP_REGOFFSET);
3965 g_assert (arm_is_imm12 (info_var->inst_offset));
3968 if (!cfg->soft_breakpoints) {
3970 * Read from the single stepping trigger page. This will cause a
3971 * SIGSEGV when single stepping is enabled.
3972 * We do this _before_ the breakpoint, so single stepping after
3973 * a breakpoint is hit will step to the next IL offset.
3975 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3978 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3979 if (cfg->soft_breakpoints) {
3980 /* Load the address of the sequence point trigger variable. */
3983 g_assert (var->opcode == OP_REGOFFSET);
3984 g_assert (arm_is_imm12 (var->inst_offset));
3985 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3987 /* Read the value and check whether it is non-zero. */
3988 ARM_LDR_IMM (code, dreg, dreg, 0);
3989 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3991 /* Load the address of the sequence point method. */
3992 var = ss_method_var;
3994 g_assert (var->opcode == OP_REGOFFSET);
3995 g_assert (arm_is_imm12 (var->inst_offset));
3996 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3998 /* Call it conditionally. */
3999 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4001 if (cfg->compile_aot) {
4002 /* Load the trigger page addr from the variable initialized in the prolog */
4003 var = ss_trigger_page_var;
4005 g_assert (var->opcode == OP_REGOFFSET);
4006 g_assert (arm_is_imm12 (var->inst_offset));
4007 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4009 #ifdef USE_JUMP_TABLES
4010 gpointer *jte = mono_jumptable_add_entry ();
4011 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4012 jte [0] = ss_trigger_page;
4014 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4016 *(int*)code = (int)ss_trigger_page;
4020 ARM_LDR_IMM (code, dreg, dreg, 0);
4024 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4026 if (cfg->soft_breakpoints) {
4027 /* Load the address of the breakpoint method into ip. */
4028 var = bp_method_var;
4030 g_assert (var->opcode == OP_REGOFFSET);
4031 g_assert (arm_is_imm12 (var->inst_offset));
4032 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4035 * A placeholder for a possible breakpoint inserted by
4036 * mono_arch_set_breakpoint ().
4039 } else if (cfg->compile_aot) {
4040 guint32 offset = code - cfg->native_code;
4043 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4044 /* Add the offset */
4045 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4046 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4047 if (arm_is_imm12 ((int)val)) {
4048 ARM_LDR_IMM (code, dreg, dreg, val);
4050 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4052 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4054 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4055 g_assert (!(val & 0xFF000000));
4057 ARM_LDR_IMM (code, dreg, dreg, 0);
4059 /* What is faster, a branch or a load ? */
4060 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4061 /* The breakpoint instruction */
4062 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4065 * A placeholder for a possible breakpoint inserted by
4066 * mono_arch_set_breakpoint ().
4068 for (i = 0; i < 4; ++i)
4075 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4078 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4082 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4085 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4086 g_assert (imm8 >= 0);
4087 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4091 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4092 g_assert (imm8 >= 0);
4093 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4097 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4098 g_assert (imm8 >= 0);
4099 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4102 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4103 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4105 case OP_IADD_OVF_UN:
4106 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4107 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4110 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4111 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4113 case OP_ISUB_OVF_UN:
4114 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4115 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4117 case OP_ADD_OVF_CARRY:
4118 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4119 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4121 case OP_ADD_OVF_UN_CARRY:
4122 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4123 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4125 case OP_SUB_OVF_CARRY:
4126 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4127 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4129 case OP_SUB_OVF_UN_CARRY:
4130 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4131 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4135 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4138 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4139 g_assert (imm8 >= 0);
4140 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4143 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4147 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4151 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4152 g_assert (imm8 >= 0);
4153 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4157 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4158 g_assert (imm8 >= 0);
4159 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4161 case OP_ARM_RSBS_IMM:
4162 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4163 g_assert (imm8 >= 0);
4164 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4166 case OP_ARM_RSC_IMM:
4167 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4168 g_assert (imm8 >= 0);
4169 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4172 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4176 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4177 g_assert (imm8 >= 0);
4178 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4181 g_assert (v7s_supported);
4182 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4185 g_assert (v7s_supported);
4186 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4189 g_assert (v7s_supported);
4190 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4191 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4194 g_assert (v7s_supported);
4195 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4196 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4200 g_assert_not_reached ();
4202 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4206 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4207 g_assert (imm8 >= 0);
4208 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4211 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4215 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4216 g_assert (imm8 >= 0);
4217 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4220 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4225 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4226 else if (ins->dreg != ins->sreg1)
4227 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4230 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4235 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4236 else if (ins->dreg != ins->sreg1)
4237 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4240 case OP_ISHR_UN_IMM:
4242 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4243 else if (ins->dreg != ins->sreg1)
4244 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4247 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4250 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4253 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4256 if (ins->dreg == ins->sreg2)
4257 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4259 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4262 g_assert_not_reached ();
4265 /* FIXME: handle ovf/ sreg2 != dreg */
4266 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4267 /* FIXME: MUL doesn't set the C/O flags on ARM */
4269 case OP_IMUL_OVF_UN:
4270 /* FIXME: handle ovf/ sreg2 != dreg */
4271 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4272 /* FIXME: MUL doesn't set the C/O flags on ARM */
4275 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4278 /* Load the GOT offset */
4279 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4280 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4282 *(gpointer*)code = NULL;
4284 /* Load the value from the GOT */
4285 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4287 case OP_OBJC_GET_SELECTOR:
4288 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4289 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4291 *(gpointer*)code = NULL;
4293 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4295 case OP_ICONV_TO_I4:
4296 case OP_ICONV_TO_U4:
4298 if (ins->dreg != ins->sreg1)
4299 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4302 int saved = ins->sreg2;
4303 if (ins->sreg2 == ARM_LSW_REG) {
4304 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4307 if (ins->sreg1 != ARM_LSW_REG)
4308 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4309 if (saved != ARM_MSW_REG)
4310 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4315 ARM_CPYD (code, ins->dreg, ins->sreg1);
4317 case OP_FCONV_TO_R4:
4319 ARM_CVTD (code, ins->dreg, ins->sreg1);
4320 ARM_CVTS (code, ins->dreg, ins->dreg);
4325 * Keep in sync with mono_arch_emit_epilog
4327 g_assert (!cfg->method->save_lmf);
4329 code = emit_load_volatile_arguments (cfg, code);
4331 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4333 if (cfg->used_int_regs)
4334 ARM_POP (code, cfg->used_int_regs);
4335 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4337 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4339 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4340 if (cfg->compile_aot) {
4341 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4343 *(gpointer*)code = NULL;
4345 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4347 code = mono_arm_patchable_b (code, ARMCOND_AL);
4351 /* ensure ins->sreg1 is not NULL */
4352 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4355 g_assert (cfg->sig_cookie < 128);
4356 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4357 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4366 call = (MonoCallInst*)ins;
4367 if (ins->flags & MONO_INST_HAS_METHOD)
4368 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4370 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4371 code = emit_call_seq (cfg, code);
4372 ins->flags |= MONO_INST_GC_CALLSITE;
4373 ins->backend.pc_offset = code - cfg->native_code;
4374 code = emit_move_return_value (cfg, ins, code);
4380 case OP_VOIDCALL_REG:
4382 code = emit_call_reg (code, ins->sreg1);
4383 ins->flags |= MONO_INST_GC_CALLSITE;
4384 ins->backend.pc_offset = code - cfg->native_code;
4385 code = emit_move_return_value (cfg, ins, code);
4387 case OP_FCALL_MEMBASE:
4388 case OP_LCALL_MEMBASE:
4389 case OP_VCALL_MEMBASE:
4390 case OP_VCALL2_MEMBASE:
4391 case OP_VOIDCALL_MEMBASE:
4392 case OP_CALL_MEMBASE: {
4393 gboolean imt_arg = FALSE;
4395 g_assert (ins->sreg1 != ARMREG_LR);
4396 call = (MonoCallInst*)ins;
4397 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4399 if (!arm_is_imm12 (ins->inst_offset))
4400 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4401 #ifdef USE_JUMP_TABLES
4407 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4409 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4411 if (!arm_is_imm12 (ins->inst_offset))
4412 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4414 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4417 * We can't embed the method in the code stream in PIC code, or
4419 * Instead, we put it in V5 in code emitted by
4420 * mono_arch_emit_imt_argument (), and embed NULL here to
4421 * signal the IMT thunk that the value is in V5.
4423 #ifdef USE_JUMP_TABLES
4424 /* In case of jumptables we always use value in V5. */
4427 if (call->dynamic_imt_arg)
4428 *((gpointer*)code) = NULL;
4430 *((gpointer*)code) = (gpointer)call->method;
4434 ins->flags |= MONO_INST_GC_CALLSITE;
4435 ins->backend.pc_offset = code - cfg->native_code;
4436 code = emit_move_return_value (cfg, ins, code);
4440 /* keep alignment */
4441 int alloca_waste = cfg->param_area;
4444 /* round the size to 8 bytes */
4445 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4446 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4448 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4449 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4450 /* memzero the area: dreg holds the size, sp is the pointer */
4451 if (ins->flags & MONO_INST_INIT) {
4452 guint8 *start_loop, *branch_to_cond;
4453 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4454 branch_to_cond = code;
4457 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4458 arm_patch (branch_to_cond, code);
4459 /* decrement by 4 and set flags */
4460 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4461 ARM_B_COND (code, ARMCOND_GE, 0);
4462 arm_patch (code - 4, start_loop);
4464 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4469 MonoInst *var = cfg->dyn_call_var;
4471 g_assert (var->opcode == OP_REGOFFSET);
4472 g_assert (arm_is_imm12 (var->inst_offset));
4474 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4475 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4477 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4479 /* Save args buffer */
4480 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4482 /* Set stack slots using R0 as scratch reg */
4483 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4484 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4485 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4486 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4489 /* Set argument registers */
4490 for (i = 0; i < PARAM_REGS; ++i)
4491 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4494 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4495 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4498 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4499 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4500 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4504 if (ins->sreg1 != ARMREG_R0)
4505 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4506 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4507 (gpointer)"mono_arch_throw_exception");
4508 code = emit_call_seq (cfg, code);
4512 if (ins->sreg1 != ARMREG_R0)
4513 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4514 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4515 (gpointer)"mono_arch_rethrow_exception");
4516 code = emit_call_seq (cfg, code);
4519 case OP_START_HANDLER: {
4520 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4523 /* Reserve a param area, see filter-stack.exe */
4524 if (cfg->param_area) {
4525 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4526 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4528 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4529 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4533 if (arm_is_imm12 (spvar->inst_offset)) {
4534 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
4536 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4537 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
4541 case OP_ENDFILTER: {
4542 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4545 /* Free the param area */
4546 if (cfg->param_area) {
4547 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4548 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4550 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4551 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4555 if (ins->sreg1 != ARMREG_R0)
4556 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4557 if (arm_is_imm12 (spvar->inst_offset)) {
4558 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4560 g_assert (ARMREG_IP != spvar->inst_basereg);
4561 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4562 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4564 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4567 case OP_ENDFINALLY: {
4568 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4571 /* Free the param area */
4572 if (cfg->param_area) {
4573 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4574 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4576 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4577 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4581 if (arm_is_imm12 (spvar->inst_offset)) {
4582 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4584 g_assert (ARMREG_IP != spvar->inst_basereg);
4585 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4586 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4588 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4591 case OP_CALL_HANDLER:
4592 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4593 code = mono_arm_patchable_bl (code, ARMCOND_AL);
4594 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4597 ins->inst_c0 = code - cfg->native_code;
4600 /*if (ins->inst_target_bb->native_offset) {
4602 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4604 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4605 code = mono_arm_patchable_b (code, ARMCOND_AL);
4609 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
4613 * In the normal case we have:
4614 * ldr pc, [pc, ins->sreg1 << 2]
4617 * ldr lr, [pc, ins->sreg1 << 2]
4619 * After follows the data.
4620 * FIXME: add aot support.
4622 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
4623 #ifdef USE_JUMP_TABLES
4625 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
4626 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4627 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
4631 max_len += 4 * GPOINTER_TO_INT (ins->klass);
4632 if (offset + max_len > (cfg->code_size - 16)) {
4633 cfg->code_size += max_len;
4634 cfg->code_size *= 2;
4635 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4636 code = cfg->native_code + offset;
4638 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
4640 code += 4 * GPOINTER_TO_INT (ins->klass);
4645 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4646 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4650 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4651 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
4655 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4656 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
4660 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4661 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
4665 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4666 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
4668 case OP_COND_EXC_EQ:
4669 case OP_COND_EXC_NE_UN:
4670 case OP_COND_EXC_LT:
4671 case OP_COND_EXC_LT_UN:
4672 case OP_COND_EXC_GT:
4673 case OP_COND_EXC_GT_UN:
4674 case OP_COND_EXC_GE:
4675 case OP_COND_EXC_GE_UN:
4676 case OP_COND_EXC_LE:
4677 case OP_COND_EXC_LE_UN:
4678 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4680 case OP_COND_EXC_IEQ:
4681 case OP_COND_EXC_INE_UN:
4682 case OP_COND_EXC_ILT:
4683 case OP_COND_EXC_ILT_UN:
4684 case OP_COND_EXC_IGT:
4685 case OP_COND_EXC_IGT_UN:
4686 case OP_COND_EXC_IGE:
4687 case OP_COND_EXC_IGE_UN:
4688 case OP_COND_EXC_ILE:
4689 case OP_COND_EXC_ILE_UN:
4690 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4693 case OP_COND_EXC_IC:
4694 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
4696 case OP_COND_EXC_OV:
4697 case OP_COND_EXC_IOV:
4698 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
4700 case OP_COND_EXC_NC:
4701 case OP_COND_EXC_INC:
4702 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
4704 case OP_COND_EXC_NO:
4705 case OP_COND_EXC_INO:
4706 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
4718 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4721 /* floating point opcodes */
4723 if (cfg->compile_aot) {
4724 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4726 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4728 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4731 /* FIXME: we can optimize the imm load by dealing with part of
4732 * the displacement in LDFD (aligning to 512).
4734 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4735 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4739 if (cfg->compile_aot) {
4740 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4742 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4744 ARM_CVTS (code, ins->dreg, ins->dreg);
4746 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4747 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4748 ARM_CVTS (code, ins->dreg, ins->dreg);
4751 case OP_STORER8_MEMBASE_REG:
4752 /* This is generated by the local regalloc pass which runs after the lowering pass */
4753 if (!arm_is_fpimm8 (ins->inst_offset)) {
4754 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4755 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4756 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4758 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4761 case OP_LOADR8_MEMBASE:
4762 /* This is generated by the local regalloc pass which runs after the lowering pass */
4763 if (!arm_is_fpimm8 (ins->inst_offset)) {
4764 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4765 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4766 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4768 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4771 case OP_STORER4_MEMBASE_REG:
4772 g_assert (arm_is_fpimm8 (ins->inst_offset));
4773 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4774 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4776 case OP_LOADR4_MEMBASE:
4777 g_assert (arm_is_fpimm8 (ins->inst_offset));
4778 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4779 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4781 case OP_ICONV_TO_R_UN: {
4782 g_assert_not_reached ();
4785 case OP_ICONV_TO_R4:
4786 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4787 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4788 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4790 case OP_ICONV_TO_R8:
4791 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4792 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4796 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4797 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4798 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4800 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4803 case OP_FCONV_TO_I1:
4804 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4806 case OP_FCONV_TO_U1:
4807 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4809 case OP_FCONV_TO_I2:
4810 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4812 case OP_FCONV_TO_U2:
4813 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4815 case OP_FCONV_TO_I4:
4817 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4819 case OP_FCONV_TO_U4:
4821 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4823 case OP_FCONV_TO_I8:
4824 case OP_FCONV_TO_U8:
4825 g_assert_not_reached ();
4826 /* Implemented as helper calls */
4828 case OP_LCONV_TO_R_UN:
4829 g_assert_not_reached ();
4830 /* Implemented as helper calls */
4832 case OP_LCONV_TO_OVF_I4_2: {
4833 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4835 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4838 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4839 high_bit_not_set = code;
4840 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4842 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4843 valid_negative = code;
4844 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4845 invalid_negative = code;
4846 ARM_B_COND (code, ARMCOND_AL, 0);
4848 arm_patch (high_bit_not_set, code);
4850 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4851 valid_positive = code;
4852 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4854 arm_patch (invalid_negative, code);
4855 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4857 arm_patch (valid_negative, code);
4858 arm_patch (valid_positive, code);
4860 if (ins->dreg != ins->sreg1)
4861 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4865 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4868 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4871 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4874 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4877 ARM_NEGD (code, ins->dreg, ins->sreg1);
4881 g_assert_not_reached ();
4885 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4891 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4894 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4895 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4899 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4902 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4903 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4907 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4910 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4911 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4912 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4916 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4919 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4920 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4924 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4927 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4928 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4929 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4931 /* ARM FPA flags table:
4932 * N Less than ARMCOND_MI
4933 * Z Equal ARMCOND_EQ
4934 * C Greater Than or Equal ARMCOND_CS
4935 * V Unordered ARMCOND_VS
4938 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4941 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4944 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4947 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4948 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4954 g_assert_not_reached ();
4958 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4960 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4961 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4962 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4966 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4967 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4972 #ifdef USE_JUMP_TABLES
4974 gpointer *jte = mono_jumptable_add_entries (2);
4975 jte [0] = GUINT_TO_POINTER (0xffffffff);
4976 jte [1] = GUINT_TO_POINTER (0x7fefffff);
4977 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4978 ARM_FLDD (code, ARM_VFP_D0, ARMREG_IP, 0);
4981 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4982 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4984 *(guint32*)code = 0xffffffff;
4986 *(guint32*)code = 0x7fefffff;
4989 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4991 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4992 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4994 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4995 ARM_CPYD (code, ins->dreg, ins->sreg1);
5000 case OP_GC_LIVENESS_DEF:
5001 case OP_GC_LIVENESS_USE:
5002 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5003 ins->backend.pc_offset = code - cfg->native_code;
5005 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5006 ins->backend.pc_offset = code - cfg->native_code;
5007 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5011 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5012 g_assert_not_reached ();
5015 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5016 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5017 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5018 g_assert_not_reached ();
5024 last_offset = offset;
5027 cfg->code_len = code - cfg->native_code;
5030 #endif /* DISABLE_JIT */
5032 #ifdef HAVE_AEABI_READ_TP
5033 void __aeabi_read_tp (void);
5037 mono_arch_register_lowlevel_calls (void)
5039 /* The signature doesn't matter */
5040 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5041 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5043 #ifndef MONO_CROSS_COMPILE
5044 #ifdef HAVE_AEABI_READ_TP
5045 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5050 #define patch_lis_ori(ip,val) do {\
5051 guint16 *__lis_ori = (guint16*)(ip); \
5052 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5053 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5057 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5059 MonoJumpInfo *patch_info;
5060 gboolean compile_aot = !run_cctors;
5062 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5063 unsigned char *ip = patch_info->ip.i + code;
5064 const unsigned char *target;
5066 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5067 #ifdef USE_JUMP_TABLES
5068 gpointer *jt = mono_jumptable_get_entry (ip);
5070 gpointer *jt = (gpointer*)(ip + 8);
5073 /* jt is the inlined jump table, 2 instructions after ip
5074 * In the normal case we store the absolute addresses,
5075 * otherwise the displacements.
5077 for (i = 0; i < patch_info->data.table->table_size; i++)
5078 jt [i] = code + (int)patch_info->data.table->table [i];
5083 switch (patch_info->type) {
5084 case MONO_PATCH_INFO_BB:
5085 case MONO_PATCH_INFO_LABEL:
5088 /* No need to patch these */
5093 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5095 switch (patch_info->type) {
5096 case MONO_PATCH_INFO_IP:
5097 g_assert_not_reached ();
5098 patch_lis_ori (ip, ip);
5100 case MONO_PATCH_INFO_METHOD_REL:
5101 g_assert_not_reached ();
5102 *((gpointer *)(ip)) = code + patch_info->data.offset;
5104 case MONO_PATCH_INFO_METHODCONST:
5105 case MONO_PATCH_INFO_CLASS:
5106 case MONO_PATCH_INFO_IMAGE:
5107 case MONO_PATCH_INFO_FIELD:
5108 case MONO_PATCH_INFO_VTABLE:
5109 case MONO_PATCH_INFO_IID:
5110 case MONO_PATCH_INFO_SFLDA:
5111 case MONO_PATCH_INFO_LDSTR:
5112 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5113 case MONO_PATCH_INFO_LDTOKEN:
5114 g_assert_not_reached ();
5115 /* from OP_AOTCONST : lis + ori */
5116 patch_lis_ori (ip, target);
5118 case MONO_PATCH_INFO_R4:
5119 case MONO_PATCH_INFO_R8:
5120 g_assert_not_reached ();
5121 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5123 case MONO_PATCH_INFO_EXC_NAME:
5124 g_assert_not_reached ();
5125 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5127 case MONO_PATCH_INFO_NONE:
5128 case MONO_PATCH_INFO_BB_OVF:
5129 case MONO_PATCH_INFO_EXC_OVF:
5130 /* everything is dealt with at epilog output time */
5135 arm_patch_general (domain, ip, target, dyn_code_mp);
5142 * Stack frame layout:
5144 * ------------------- fp
5145 * MonoLMF structure or saved registers
5146 * -------------------
5148 * -------------------
5150 * -------------------
5151 * optional 8 bytes for tracing
5152 * -------------------
5153 * param area size is cfg->param_area
5154 * ------------------- sp
5157 mono_arch_emit_prolog (MonoCompile *cfg)
5159 MonoMethod *method = cfg->method;
5161 MonoMethodSignature *sig;
5163 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5168 int prev_sp_offset, reg_offset;
5170 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5173 sig = mono_method_signature (method);
5174 cfg->code_size = 256 + sig->param_count * 64;
5175 code = cfg->native_code = g_malloc (cfg->code_size);
5177 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5179 alloc_size = cfg->stack_offset;
5185 * The iphone uses R7 as the frame pointer, and it points at the saved
5190 * We can't use r7 as a frame pointer since it points into the middle of
5191 * the frame, so we keep using our own frame pointer.
5192 * FIXME: Optimize this.
5194 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5195 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5196 prev_sp_offset += 8; /* r7 and lr */
5197 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5198 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5201 if (!method->save_lmf) {
5203 /* No need to push LR again */
5204 if (cfg->used_int_regs)
5205 ARM_PUSH (code, cfg->used_int_regs);
5207 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5208 prev_sp_offset += 4;
5210 for (i = 0; i < 16; ++i) {
5211 if (cfg->used_int_regs & (1 << i))
5212 prev_sp_offset += 4;
5214 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5216 for (i = 0; i < 16; ++i) {
5217 if ((cfg->used_int_regs & (1 << i))) {
5218 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5219 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5224 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5225 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5227 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5228 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5231 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5232 ARM_PUSH (code, 0x5ff0);
5233 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5234 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5236 for (i = 0; i < 16; ++i) {
5237 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5238 /* The original r7 is saved at the start */
5239 if (!(iphone_abi && i == ARMREG_R7))
5240 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5244 g_assert (reg_offset == 4 * 10);
5245 pos += sizeof (MonoLMF) - (4 * 10);
5249 orig_alloc_size = alloc_size;
5250 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5251 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5252 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5253 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5256 /* the stack used in the pushed regs */
5257 if (prev_sp_offset & 4)
5259 cfg->stack_usage = alloc_size;
5261 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5262 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5264 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5265 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5267 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5269 if (cfg->frame_reg != ARMREG_SP) {
5270 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5271 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5273 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5274 prev_sp_offset += alloc_size;
5276 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5277 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5279 /* compute max_offset in order to use short forward jumps
5280 * we could skip do it on arm because the immediate displacement
5281 * for jumps is large enough, it may be useful later for constant pools
5284 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5285 MonoInst *ins = bb->code;
5286 bb->max_offset = max_offset;
5288 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5291 MONO_BB_FOR_EACH_INS (bb, ins)
5292 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5295 /* store runtime generic context */
5296 if (cfg->rgctx_var) {
5297 MonoInst *ins = cfg->rgctx_var;
5299 g_assert (ins->opcode == OP_REGOFFSET);
5301 if (arm_is_imm12 (ins->inst_offset)) {
5302 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5304 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5305 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5309 /* load arguments allocated to register from the stack */
5312 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5314 if (cinfo->vtype_retaddr) {
5315 ArgInfo *ainfo = &cinfo->ret;
5316 inst = cfg->vret_addr;
5317 g_assert (arm_is_imm12 (inst->inst_offset));
5318 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5321 if (sig->call_convention == MONO_CALL_VARARG) {
5322 ArgInfo *cookie = &cinfo->sig_cookie;
5324 /* Save the sig cookie address */
5325 g_assert (cookie->storage == RegTypeBase);
5327 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5328 g_assert (arm_is_imm12 (cfg->sig_cookie));
5329 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5330 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5333 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5334 ArgInfo *ainfo = cinfo->args + i;
5335 inst = cfg->args [pos];
5337 if (cfg->verbose_level > 2)
5338 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5339 if (inst->opcode == OP_REGVAR) {
5340 if (ainfo->storage == RegTypeGeneral)
5341 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5342 else if (ainfo->storage == RegTypeFP) {
5343 g_assert_not_reached ();
5344 } else if (ainfo->storage == RegTypeBase) {
5345 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5346 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5348 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5349 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5352 g_assert_not_reached ();
5354 if (cfg->verbose_level > 2)
5355 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5357 /* the argument should be put on the stack: FIXME handle size != word */
5358 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5359 switch (ainfo->size) {
5361 if (arm_is_imm12 (inst->inst_offset))
5362 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5364 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5365 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5369 if (arm_is_imm8 (inst->inst_offset)) {
5370 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5372 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5373 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5377 if (arm_is_imm12 (inst->inst_offset)) {
5378 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5380 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5381 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5383 if (arm_is_imm12 (inst->inst_offset + 4)) {
5384 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5386 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5387 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5391 if (arm_is_imm12 (inst->inst_offset)) {
5392 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5394 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5395 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5399 } else if (ainfo->storage == RegTypeBaseGen) {
5400 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
5401 g_assert (arm_is_imm12 (inst->inst_offset));
5402 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5403 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5404 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5405 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5406 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5407 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5409 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5410 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5413 switch (ainfo->size) {
5415 if (arm_is_imm8 (inst->inst_offset)) {
5416 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5418 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5419 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5423 if (arm_is_imm8 (inst->inst_offset)) {
5424 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5426 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5427 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5431 if (arm_is_imm12 (inst->inst_offset)) {
5432 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5434 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5435 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5437 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5438 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5440 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5441 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5443 if (arm_is_imm12 (inst->inst_offset + 4)) {
5444 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5446 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5447 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5451 if (arm_is_imm12 (inst->inst_offset)) {
5452 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5454 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5455 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5459 } else if (ainfo->storage == RegTypeFP) {
5460 g_assert_not_reached ();
5461 } else if (ainfo->storage == RegTypeStructByVal) {
5462 int doffset = inst->inst_offset;
5466 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5467 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5468 if (arm_is_imm12 (doffset)) {
5469 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5471 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5472 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5474 soffset += sizeof (gpointer);
5475 doffset += sizeof (gpointer);
5477 if (ainfo->vtsize) {
5478 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5479 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
5480 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
5482 } else if (ainfo->storage == RegTypeStructByAddr) {
5483 g_assert_not_reached ();
5484 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5485 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
5487 g_assert_not_reached ();
5492 if (method->save_lmf)
5493 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
5496 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5498 if (cfg->arch.seq_point_info_var) {
5499 MonoInst *ins = cfg->arch.seq_point_info_var;
5501 /* Initialize the variable from a GOT slot */
5502 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
5503 #ifdef USE_JUMP_TABLES
5505 gpointer *jte = mono_jumptable_add_entry ();
5506 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
5507 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
5509 /** XXX: is it correct? */
5511 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5513 *(gpointer*)code = NULL;
5516 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
5518 g_assert (ins->opcode == OP_REGOFFSET);
5520 if (arm_is_imm12 (ins->inst_offset)) {
5521 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5523 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5524 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5528 /* Initialize ss_trigger_page_var */
5529 if (!cfg->soft_breakpoints) {
5530 MonoInst *info_var = cfg->arch.seq_point_info_var;
5531 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
5532 int dreg = ARMREG_LR;
5535 g_assert (info_var->opcode == OP_REGOFFSET);
5536 g_assert (arm_is_imm12 (info_var->inst_offset));
5538 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
5539 /* Load the trigger page addr */
5540 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
5541 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
5545 if (cfg->arch.seq_point_read_var) {
5546 MonoInst *read_ins = cfg->arch.seq_point_read_var;
5547 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
5548 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
5549 #ifdef USE_JUMP_TABLES
5552 g_assert (read_ins->opcode == OP_REGOFFSET);
5553 g_assert (arm_is_imm12 (read_ins->inst_offset));
5554 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
5555 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
5556 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
5557 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
5559 #ifdef USE_JUMP_TABLES
5560 jte = mono_jumptable_add_entries (3);
5561 jte [0] = (gpointer)&ss_trigger_var;
5562 jte [1] = single_step_func_wrapper;
5563 jte [2] = breakpoint_func_wrapper;
5564 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
5566 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5568 *(volatile int **)code = &ss_trigger_var;
5570 *(gpointer*)code = single_step_func_wrapper;
5572 *(gpointer*)code = breakpoint_func_wrapper;
5576 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
5577 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
5578 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
5579 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
5580 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
5581 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
5584 cfg->code_len = code - cfg->native_code;
5585 g_assert (cfg->code_len < cfg->code_size);
5592 mono_arch_emit_epilog (MonoCompile *cfg)
5594 MonoMethod *method = cfg->method;
5595 int pos, i, rot_amount;
5596 int max_epilog_size = 16 + 20*4;
5600 if (cfg->method->save_lmf)
5601 max_epilog_size += 128;
5603 if (mono_jit_trace_calls != NULL)
5604 max_epilog_size += 50;
5606 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5607 max_epilog_size += 50;
5609 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5610 cfg->code_size *= 2;
5611 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5612 cfg->stat_code_reallocs++;
5616 * Keep in sync with OP_JMP
5618 code = cfg->native_code + cfg->code_len;
5620 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5621 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5625 /* Load returned vtypes into registers if needed */
5626 cinfo = cfg->arch.cinfo;
5627 if (cinfo->ret.storage == RegTypeStructByVal) {
5628 MonoInst *ins = cfg->ret;
5630 if (arm_is_imm12 (ins->inst_offset)) {
5631 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5633 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5634 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5638 if (method->save_lmf) {
5639 int lmf_offset, reg, sp_adj, regmask;
5640 /* all but r0-r3, sp and pc */
5641 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5644 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
5646 /* This points to r4 inside MonoLMF->iregs */
5647 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5649 regmask = 0x9ff0; /* restore lr to pc */
5650 /* Skip caller saved registers not used by the method */
5651 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
5652 regmask &= ~(1 << reg);
5657 /* Restored later */
5658 regmask &= ~(1 << ARMREG_PC);
5659 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
5660 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
5662 ARM_POP (code, regmask);
5664 /* Restore saved r7, restore LR to PC */
5665 /* Skip lr from the lmf */
5666 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
5667 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5670 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
5671 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
5673 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
5674 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
5678 /* Restore saved gregs */
5679 if (cfg->used_int_regs)
5680 ARM_POP (code, cfg->used_int_regs);
5681 /* Restore saved r7, restore LR to PC */
5682 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5684 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
5688 cfg->code_len = code - cfg->native_code;
5690 g_assert (cfg->code_len < cfg->code_size);
5695 mono_arch_emit_exceptions (MonoCompile *cfg)
5697 MonoJumpInfo *patch_info;
5700 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5701 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5702 int max_epilog_size = 50;
5704 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5705 exc_throw_pos [i] = NULL;
5706 exc_throw_found [i] = 0;
5709 /* count the number of exception infos */
5712 * make sure we have enough space for exceptions
5714 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5715 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5716 i = mini_exception_id_by_name (patch_info->data.target);
5717 if (!exc_throw_found [i]) {
5718 max_epilog_size += 32;
5719 exc_throw_found [i] = TRUE;
5724 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5725 cfg->code_size *= 2;
5726 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5727 cfg->stat_code_reallocs++;
5730 code = cfg->native_code + cfg->code_len;
5732 /* add code to raise exceptions */
5733 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5734 switch (patch_info->type) {
5735 case MONO_PATCH_INFO_EXC: {
5736 MonoClass *exc_class;
5737 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5739 i = mini_exception_id_by_name (patch_info->data.target);
5740 if (exc_throw_pos [i]) {
5741 arm_patch (ip, exc_throw_pos [i]);
5742 patch_info->type = MONO_PATCH_INFO_NONE;
5745 exc_throw_pos [i] = code;
5747 arm_patch (ip, code);
5749 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5750 g_assert (exc_class);
5752 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5753 #ifdef USE_JUMP_TABLES
5755 gpointer *jte = mono_jumptable_add_entries (2);
5756 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5757 patch_info->data.name = "mono_arch_throw_corlib_exception";
5758 patch_info->ip.i = code - cfg->native_code;
5759 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
5760 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
5761 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
5762 ARM_BLX_REG (code, ARMREG_IP);
5763 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
5766 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5767 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5768 patch_info->data.name = "mono_arch_throw_corlib_exception";
5769 patch_info->ip.i = code - cfg->native_code;
5771 *(guint32*)(gpointer)code = exc_class->type_token;
5782 cfg->code_len = code - cfg->native_code;
5784 g_assert (cfg->code_len < cfg->code_size);
5788 #endif /* #ifndef DISABLE_JIT */
5791 mono_arch_finish_init (void)
5793 lmf_tls_offset = mono_get_lmf_tls_offset ();
5794 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5798 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5803 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5810 mono_arch_print_tree (MonoInst *tree, int arity)
5820 mono_arch_get_patch_offset (guint8 *code)
5827 mono_arch_flush_register_windows (void)
5831 #ifdef MONO_ARCH_HAVE_IMT
5836 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5838 int method_reg = mono_alloc_ireg (cfg);
5839 #ifdef USE_JUMP_TABLES
5840 int use_jumptables = TRUE;
5842 int use_jumptables = FALSE;
5845 if (cfg->compile_aot) {
5848 call->dynamic_imt_arg = TRUE;
5851 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
5853 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5854 ins->dreg = method_reg;
5855 ins->inst_p0 = call->method;
5856 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5857 MONO_ADD_INS (cfg->cbb, ins);
5859 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5860 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
5861 /* Always pass in a register for simplicity */
5862 call->dynamic_imt_arg = TRUE;
5864 cfg->uses_rgctx_reg = TRUE;
5867 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
5871 MONO_INST_NEW (cfg, ins, OP_PCONST);
5872 ins->inst_p0 = call->method;
5873 ins->dreg = method_reg;
5874 MONO_ADD_INS (cfg->cbb, ins);
5877 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5881 #endif /* DISABLE_JIT */
5884 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5886 #ifdef USE_JUMP_TABLES
5887 return (MonoMethod*)regs [ARMREG_V5];
5890 guint32 *code_ptr = (guint32*)code;
5892 method = GUINT_TO_POINTER (code_ptr [1]);
5896 return (MonoMethod*)regs [ARMREG_V5];
5898 /* The IMT value is stored in the code stream right after the LDC instruction. */
5899 /* This is no longer true for the gsharedvt_in trampoline */
5901 if (!IS_LDR_PC (code_ptr [0])) {
5902 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5903 g_assert (IS_LDR_PC (code_ptr [0]));
5907 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
5908 return (MonoMethod*)regs [ARMREG_V5];
5910 return (MonoMethod*) method;
5915 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5917 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5920 #define ENABLE_WRONG_METHOD_CHECK 0
5921 #define BASE_SIZE (6 * 4)
5922 #define BSEARCH_ENTRY_SIZE (4 * 4)
5923 #define CMP_SIZE (3 * 4)
5924 #define BRANCH_SIZE (1 * 4)
5925 #define CALL_SIZE (2 * 4)
5926 #define WMC_SIZE (8 * 4)
5927 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5929 #ifdef USE_JUMP_TABLES
5931 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
5933 g_assert (base [index] == NULL);
5934 base [index] = value;
5937 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
5939 if (arm_is_imm12 (jti * 4)) {
5940 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
5942 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
5943 if ((jti * 4) >> 16)
5944 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
5945 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
5951 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5953 guint32 delta = DISTANCE (target, code);
5955 g_assert (delta >= 0 && delta <= 0xFFF);
5956 *target = *target | delta;
5962 #ifdef ENABLE_WRONG_METHOD_CHECK
5964 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
5966 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
5972 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5973 gpointer fail_tramp)
5976 arminstr_t *code, *start;
5977 #ifdef USE_JUMP_TABLES
5980 gboolean large_offsets = FALSE;
5981 guint32 **constant_pool_starts;
5982 arminstr_t *vtable_target = NULL;
5983 int extra_space = 0;
5985 #ifdef ENABLE_WRONG_METHOD_CHECK
5990 #ifdef USE_JUMP_TABLES
5991 for (i = 0; i < count; ++i) {
5992 MonoIMTCheckItem *item = imt_entries [i];
5993 item->chunk_size += 4 * 16;
5994 if (!item->is_equals)
5995 imt_entries [item->check_target_idx]->compare_done = TRUE;
5996 size += item->chunk_size;
5999 constant_pool_starts = g_new0 (guint32*, count);
6001 for (i = 0; i < count; ++i) {
6002 MonoIMTCheckItem *item = imt_entries [i];
6003 if (item->is_equals) {
6004 gboolean fail_case = !item->check_target_idx && fail_tramp;
6006 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6007 item->chunk_size += 32;
6008 large_offsets = TRUE;
6011 if (item->check_target_idx || fail_case) {
6012 if (!item->compare_done || fail_case)
6013 item->chunk_size += CMP_SIZE;
6014 item->chunk_size += BRANCH_SIZE;
6016 #if ENABLE_WRONG_METHOD_CHECK
6017 item->chunk_size += WMC_SIZE;
6021 item->chunk_size += 16;
6022 large_offsets = TRUE;
6024 item->chunk_size += CALL_SIZE;
6026 item->chunk_size += BSEARCH_ENTRY_SIZE;
6027 imt_entries [item->check_target_idx]->compare_done = TRUE;
6029 size += item->chunk_size;
6033 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6037 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6039 code = mono_domain_code_reserve (domain, size);
6043 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6044 for (i = 0; i < count; ++i) {
6045 MonoIMTCheckItem *item = imt_entries [i];
6046 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6050 #ifdef USE_JUMP_TABLES
6051 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6052 /* If jumptables we always pass the IMT method in R5 */
6053 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6054 #define VTABLE_JTI 0
6055 #define IMT_METHOD_OFFSET 0
6056 #define TARGET_CODE_OFFSET 1
6057 #define JUMP_CODE_OFFSET 2
6058 #define RECORDS_PER_ENTRY 3
6059 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6060 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6061 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6063 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6064 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6065 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6066 set_jumptable_element (jte, VTABLE_JTI, vtable);
6069 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6071 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6072 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6073 vtable_target = code;
6074 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6076 if (mono_use_llvm) {
6077 /* LLVM always passes the IMT method in R5 */
6078 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6080 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6081 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6082 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6086 for (i = 0; i < count; ++i) {
6087 MonoIMTCheckItem *item = imt_entries [i];
6088 #ifdef USE_JUMP_TABLES
6089 guint32 imt_method_jti = 0, target_code_jti = 0;
6091 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6093 gint32 vtable_offset;
6095 item->code_target = (guint8*)code;
6097 if (item->is_equals) {
6098 gboolean fail_case = !item->check_target_idx && fail_tramp;
6100 if (item->check_target_idx || fail_case) {
6101 if (!item->compare_done || fail_case) {
6102 #ifdef USE_JUMP_TABLES
6103 imt_method_jti = IMT_METHOD_JTI (i);
6104 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6107 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6109 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6111 #ifdef USE_JUMP_TABLES
6112 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6113 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6114 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6116 item->jmp_code = (guint8*)code;
6117 ARM_B_COND (code, ARMCOND_NE, 0);
6120 /*Enable the commented code to assert on wrong method*/
6121 #if ENABLE_WRONG_METHOD_CHECK
6122 #ifdef USE_JUMP_TABLES
6123 imt_method_jti = IMT_METHOD_JTI (i);
6124 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6127 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6129 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6131 ARM_B_COND (code, ARMCOND_EQ, 0);
6133 /* Define this if your system is so bad that gdb is failing. */
6134 #ifdef BROKEN_DEV_ENV
6135 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6137 arm_patch (code - 1, mini_dump_bad_imt);
6141 arm_patch (cond, code);
6145 if (item->has_target_code) {
6146 /* Load target address */
6147 #ifdef USE_JUMP_TABLES
6148 target_code_jti = TARGET_CODE_JTI (i);
6149 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6150 /* Restore registers */
6151 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6153 ARM_BX (code, ARMREG_R1);
6154 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6156 target_code_ins = code;
6157 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6158 /* Save it to the fourth slot */
6159 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6160 /* Restore registers and branch */
6161 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6163 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6166 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6167 if (!arm_is_imm12 (vtable_offset)) {
6169 * We need to branch to a computed address but we don't have
6170 * a free register to store it, since IP must contain the
6171 * vtable address. So we push the two values to the stack, and
6172 * load them both using LDM.
6174 /* Compute target address */
6175 #ifdef USE_JUMP_TABLES
6176 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6177 if (vtable_offset >> 16)
6178 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6179 /* IP had vtable base. */
6180 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6181 /* Restore registers and branch */
6182 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6183 ARM_BX (code, ARMREG_IP);
6185 vtable_offset_ins = code;
6186 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6187 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6188 /* Save it to the fourth slot */
6189 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6190 /* Restore registers and branch */
6191 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6193 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6196 #ifdef USE_JUMP_TABLES
6197 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6198 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6199 ARM_BX (code, ARMREG_IP);
6201 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6203 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6204 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6210 #ifdef USE_JUMP_TABLES
6211 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6212 target_code_jti = TARGET_CODE_JTI (i);
6213 /* Load target address */
6214 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6215 /* Restore registers */
6216 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6218 ARM_BX (code, ARMREG_R1);
6219 set_jumptable_element (jte, target_code_jti, fail_tramp);
6221 arm_patch (item->jmp_code, (guchar*)code);
6223 target_code_ins = code;
6224 /* Load target address */
6225 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6226 /* Save it to the fourth slot */
6227 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6228 /* Restore registers and branch */
6229 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6231 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6233 item->jmp_code = NULL;
6236 #ifdef USE_JUMP_TABLES
6238 set_jumptable_element (jte, imt_method_jti, item->key);
6241 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6243 /*must emit after unconditional branch*/
6244 if (vtable_target) {
6245 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6246 item->chunk_size += 4;
6247 vtable_target = NULL;
6250 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6251 constant_pool_starts [i] = code;
6253 code += extra_space;
6258 #ifdef USE_JUMP_TABLES
6259 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6260 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6261 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6262 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6263 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6265 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6266 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6268 item->jmp_code = (guint8*)code;
6269 ARM_B_COND (code, ARMCOND_HS, 0);
6275 for (i = 0; i < count; ++i) {
6276 MonoIMTCheckItem *item = imt_entries [i];
6277 if (item->jmp_code) {
6278 if (item->check_target_idx)
6279 #ifdef USE_JUMP_TABLES
6280 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6282 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6285 if (i > 0 && item->is_equals) {
6287 #ifdef USE_JUMP_TABLES
6288 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6289 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6291 arminstr_t *space_start = constant_pool_starts [i];
6292 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6293 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6301 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6302 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6307 #ifndef USE_JUMP_TABLES
6308 g_free (constant_pool_starts);
6311 mono_arch_flush_icache ((guint8*)start, size);
6312 mono_stats.imt_thunks_size += code - start;
6314 g_assert (DISTANCE (start, code) <= size);
6321 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6323 return ctx->regs [reg];
6327 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6329 ctx->regs [reg] = val;
6333 * mono_arch_get_trampolines:
6335 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6339 mono_arch_get_trampolines (gboolean aot)
6341 return mono_arm_get_exception_trampolines (aot);
6345 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6347 * mono_arch_set_breakpoint:
6349 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6350 * The location should contain code emitted by OP_SEQ_POINT.
6353 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6356 guint32 native_offset = ip - (guint8*)ji->code_start;
6357 MonoDebugOptions *opt = mini_get_debug_options ();
6359 if (opt->soft_breakpoints) {
6360 g_assert (!ji->from_aot);
6362 ARM_BLX_REG (code, ARMREG_LR);
6363 mono_arch_flush_icache (code - 4, 4);
6364 } else if (ji->from_aot) {
6365 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6367 g_assert (native_offset % 4 == 0);
6368 g_assert (info->bp_addrs [native_offset / 4] == 0);
6369 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6371 int dreg = ARMREG_LR;
6373 /* Read from another trigger page */
6374 #ifdef USE_JUMP_TABLES
6375 gpointer *jte = mono_jumptable_add_entry ();
6376 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6377 jte [0] = bp_trigger_page;
6379 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6381 *(int*)code = (int)bp_trigger_page;
6384 ARM_LDR_IMM (code, dreg, dreg, 0);
6386 mono_arch_flush_icache (code - 16, 16);
6389 /* This is currently implemented by emitting an SWI instruction, which
6390 * qemu/linux seems to convert to a SIGILL.
6392 *(int*)code = (0xef << 24) | 8;
6394 mono_arch_flush_icache (code - 4, 4);
6400 * mono_arch_clear_breakpoint:
6402 * Clear the breakpoint at IP.
6405 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6407 MonoDebugOptions *opt = mini_get_debug_options ();
6411 if (opt->soft_breakpoints) {
6412 g_assert (!ji->from_aot);
6415 mono_arch_flush_icache (code - 4, 4);
6416 } else if (ji->from_aot) {
6417 guint32 native_offset = ip - (guint8*)ji->code_start;
6418 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6420 g_assert (native_offset % 4 == 0);
6421 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6422 info->bp_addrs [native_offset / 4] = 0;
6424 for (i = 0; i < 4; ++i)
6427 mono_arch_flush_icache (ip, code - ip);
6432 * mono_arch_start_single_stepping:
6434 * Start single stepping.
6437 mono_arch_start_single_stepping (void)
6439 if (ss_trigger_page)
6440 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6446 * mono_arch_stop_single_stepping:
6448 * Stop single stepping.
6451 mono_arch_stop_single_stepping (void)
6453 if (ss_trigger_page)
6454 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6460 #define DBG_SIGNAL SIGBUS
6462 #define DBG_SIGNAL SIGSEGV
6466 * mono_arch_is_single_step_event:
6468 * Return whenever the machine state in SIGCTX corresponds to a single
6472 mono_arch_is_single_step_event (void *info, void *sigctx)
6474 siginfo_t *sinfo = info;
6476 if (!ss_trigger_page)
6479 /* Sometimes the address is off by 4 */
6480 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6487 * mono_arch_is_breakpoint_event:
6489 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
6492 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6494 siginfo_t *sinfo = info;
6496 if (!ss_trigger_page)
6499 if (sinfo->si_signo == DBG_SIGNAL) {
6500 /* Sometimes the address is off by 4 */
6501 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6511 * mono_arch_skip_breakpoint:
6513 * See mini-amd64.c for docs.
6516 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
6518 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6522 * mono_arch_skip_single_step:
6524 * See mini-amd64.c for docs.
6527 mono_arch_skip_single_step (MonoContext *ctx)
6529 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6532 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
6535 * mono_arch_get_seq_point_info:
6537 * See mini-amd64.c for docs.
6540 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6545 // FIXME: Add a free function
6547 mono_domain_lock (domain);
6548 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
6550 mono_domain_unlock (domain);
6553 ji = mono_jit_info_table_find (domain, (char*)code);
6556 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
6558 info->ss_trigger_page = ss_trigger_page;
6559 info->bp_trigger_page = bp_trigger_page;
6561 mono_domain_lock (domain);
6562 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
6564 mono_domain_unlock (domain);
6571 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
6573 ext->lmf.previous_lmf = prev_lmf;
6574 /* Mark that this is a MonoLMFExt */
6575 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
6576 ext->lmf.sp = (gssize)ext;
6580 * mono_arch_set_target:
6582 * Set the target architecture the JIT backend should generate code for, in the form
6583 * of a GNU target triplet. Only used in AOT mode.
6586 mono_arch_set_target (char *mtriple)
6588 /* The GNU target triple format is not very well documented */
6589 if (strstr (mtriple, "armv7")) {
6590 v5_supported = TRUE;
6591 v6_supported = TRUE;
6592 v7_supported = TRUE;
6594 if (strstr (mtriple, "armv6")) {
6595 v5_supported = TRUE;
6596 v6_supported = TRUE;
6598 if (strstr (mtriple, "armv7s")) {
6599 v7s_supported = TRUE;
6601 if (strstr (mtriple, "thumbv7s")) {
6602 v5_supported = TRUE;
6603 v6_supported = TRUE;
6604 v7_supported = TRUE;
6605 v7s_supported = TRUE;
6606 thumb_supported = TRUE;
6607 thumb2_supported = TRUE;
6609 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
6610 v5_supported = TRUE;
6611 v6_supported = TRUE;
6612 thumb_supported = TRUE;
6615 if (strstr (mtriple, "gnueabi"))
6616 eabi_supported = TRUE;
6619 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
6621 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
6623 #endif /* !MONOTOUCH */