2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/appdomain.h>
16 #include <mono/metadata/debug-helpers.h>
17 #include <mono/utils/mono-mmap.h>
23 #include "debugger-agent.h"
25 #include "mono/arch/arm/arm-vfp-codegen.h"
27 /* Sanity check: This makes no sense */
28 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
29 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
32 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
33 #define HAVE_AEABI_READ_TP 1
36 #ifdef ARM_FPU_VFP_HARD
46 #ifdef MONO_ARCH_SOFT_FLOAT
47 #define IS_SOFT_FLOAT 1
49 #define IS_SOFT_FLOAT 0
52 #ifdef __native_client_codegen__
53 const guint kNaClAlignment = kNaClAlignmentARM;
54 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
55 gint8 nacl_align_byte = -1; /* 0xff */
58 mono_arch_nacl_pad (guint8 *code, int pad)
60 /* Not yet properly implemented. */
61 g_assert_not_reached ();
66 mono_arch_nacl_skip_nops (guint8 *code)
68 /* Not yet properly implemented. */
69 g_assert_not_reached ();
73 #endif /* __native_client_codegen__ */
75 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
78 void sys_icache_invalidate (void *start, size_t len);
81 static gint lmf_tls_offset = -1;
82 static gint lmf_addr_tls_offset = -1;
84 /* This mutex protects architecture specific caches */
85 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
86 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
87 static CRITICAL_SECTION mini_arch_mutex;
89 static int v5_supported = 0;
90 static int v6_supported = 0;
91 static int v7_supported = 0;
92 static int v7s_supported = 0;
93 static int thumb_supported = 0;
94 static int thumb2_supported = 0;
96 * Whenever to use the ARM EABI
98 static int eabi_supported = 0;
101 * Whenever we are on arm/darwin aka the iphone.
103 static int darwin = 0;
105 * Whenever to use the iphone ABI extensions:
106 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
107 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
108 * This is required for debugging/profiling tools to work, but it has some overhead so it should
109 * only be turned on in debug builds.
111 static int iphone_abi = 0;
114 * The FPU we are generating code for. This is NOT runtime configurable right now,
115 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
117 static MonoArmFPU arm_fpu;
121 static volatile int ss_trigger_var = 0;
123 static gpointer single_step_func_wrapper;
124 static gpointer breakpoint_func_wrapper;
127 * The code generated for sequence points reads from this location, which is
128 * made read-only when single stepping is enabled.
130 static gpointer ss_trigger_page;
132 /* Enabled breakpoints read from this trigger page */
133 static gpointer bp_trigger_page;
135 /* Structure used by the sequence points in AOTed code */
137 gpointer ss_trigger_page;
138 gpointer bp_trigger_page;
139 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
144 * floating point support: on ARM it is a mess, there are at least 3
145 * different setups, each of which binary incompat with the other.
146 * 1) FPA: old and ugly, but unfortunately what current distros use
147 * the double binary format has the two words swapped. 8 double registers.
148 * Implemented usually by kernel emulation.
149 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
150 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
151 * 3) VFP: the new and actually sensible and useful FP support. Implemented
152 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
154 * We do not care about FPA. We will support soft float and VFP.
156 int mono_exc_esp_offset = 0;
158 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
159 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
160 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
162 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
163 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
164 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
166 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
167 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
170 /* A variant of ARM_LDR_IMM which can handle large offsets */
171 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
172 if (arm_is_imm12 ((offset))) { \
173 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
175 g_assert ((scratch_reg) != (basereg)); \
176 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
177 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
181 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
182 if (arm_is_imm12 ((offset))) { \
183 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
185 g_assert ((scratch_reg) != (basereg)); \
186 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
187 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
191 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
194 mono_arch_regname (int reg)
196 static const char * rnames[] = {
197 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
198 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
199 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
202 if (reg >= 0 && reg < 16)
208 mono_arch_fregname (int reg)
210 static const char * rnames[] = {
211 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
212 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
213 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
214 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
215 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
216 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
219 if (reg >= 0 && reg < 32)
227 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
229 int imm8, rot_amount;
230 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
231 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
234 g_assert (dreg != sreg);
235 code = mono_arm_emit_load_imm (code, dreg, imm);
236 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
241 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
243 /* we can use r0-r3, since this is called only for incoming args on the stack */
244 if (size > sizeof (gpointer) * 4) {
246 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
247 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
248 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
249 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
250 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
251 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
252 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
253 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
254 ARM_B_COND (code, ARMCOND_NE, 0);
255 arm_patch (code - 4, start_loop);
258 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
259 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
261 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
262 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
268 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
269 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
270 doffset = soffset = 0;
272 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
273 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
279 g_assert (size == 0);
284 emit_call_reg (guint8 *code, int reg)
287 ARM_BLX_REG (code, reg);
289 #ifdef USE_JUMP_TABLES
290 g_assert_not_reached ();
292 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
296 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
302 emit_call_seq (MonoCompile *cfg, guint8 *code)
304 #ifdef USE_JUMP_TABLES
305 code = mono_arm_patchable_bl (code, ARMCOND_AL);
307 if (cfg->method->dynamic) {
308 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
310 *(gpointer*)code = NULL;
312 code = emit_call_reg (code, ARMREG_IP);
321 mono_arm_patchable_b (guint8 *code, int cond)
323 #ifdef USE_JUMP_TABLES
326 jte = mono_jumptable_add_entry ();
327 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
328 ARM_BX_COND (code, cond, ARMREG_IP);
330 ARM_B_COND (code, cond, 0);
336 mono_arm_patchable_bl (guint8 *code, int cond)
338 #ifdef USE_JUMP_TABLES
341 jte = mono_jumptable_add_entry ();
342 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
343 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
345 ARM_BL_COND (code, cond, 0);
350 #ifdef USE_JUMP_TABLES
352 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
354 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
355 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
360 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
362 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
363 ARM_LDR_IMM (code, reg, reg, 0);
370 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
372 switch (ins->opcode) {
375 case OP_FCALL_MEMBASE:
377 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
378 ARM_FMSR (code, ins->dreg, ARMREG_R0);
379 ARM_CVTS (code, ins->dreg, ins->dreg);
381 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
393 * Emit code to push an LMF structure on the LMF stack.
394 * On arm, this is intermixed with the initialization of other fields of the structure.
397 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
399 gboolean get_lmf_fast = FALSE;
402 #ifdef HAVE_AEABI_READ_TP
403 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
405 if (lmf_addr_tls_offset != -1) {
408 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
409 (gpointer)"__aeabi_read_tp");
410 code = emit_call_seq (cfg, code);
412 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
418 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
421 /* Inline mono_get_lmf_addr () */
422 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
424 /* Load mono_jit_tls_id */
426 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
427 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
429 *(gpointer*)code = NULL;
431 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
432 /* call pthread_getspecific () */
433 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
434 (gpointer)"pthread_getspecific");
435 code = emit_call_seq (cfg, code);
436 /* lmf_addr = &jit_tls->lmf */
437 lmf_offset = G_STRUCT_OFFSET (MonoJitTlsData, lmf);
438 g_assert (arm_is_imm8 (lmf_offset));
439 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
446 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
447 (gpointer)"mono_get_lmf_addr");
448 code = emit_call_seq (cfg, code);
450 /* we build the MonoLMF structure on the stack - see mini-arm.h */
451 /* lmf_offset is the offset from the previous stack pointer,
452 * alloc_size is the total stack space allocated, so the offset
453 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
454 * The pointer to the struct is put in r1 (new_lmf).
455 * ip is used as scratch
456 * The callee-saved registers are already in the MonoLMF structure
458 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
459 /* r0 is the result from mono_get_lmf_addr () */
460 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
461 /* new_lmf->previous_lmf = *lmf_addr */
462 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
463 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
464 /* *(lmf_addr) = r1 */
465 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
466 /* Skip method (only needed for trampoline LMF frames) */
467 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, sp));
468 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, fp));
469 /* save the current IP */
470 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
471 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ip));
473 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
474 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
482 * Emit code to pop an LMF structure from the LMF stack.
485 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
489 if (lmf_offset < 32) {
490 basereg = cfg->frame_reg;
495 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
498 /* ip = previous_lmf */
499 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
501 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
502 /* *(lmf_addr) = previous_lmf */
503 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
508 #endif /* #ifndef DISABLE_JIT */
511 * mono_arch_get_argument_info:
512 * @csig: a method signature
513 * @param_count: the number of parameters to consider
514 * @arg_info: an array to store the result infos
516 * Gathers information on parameters such as size, alignment and
517 * padding. arg_info should be large enought to hold param_count + 1 entries.
519 * Returns the size of the activation frame.
522 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
524 int k, frame_size = 0;
525 guint32 size, align, pad;
529 t = mini_type_get_underlying_type (gsctx, csig->ret);
530 if (MONO_TYPE_ISSTRUCT (t)) {
531 frame_size += sizeof (gpointer);
535 arg_info [0].offset = offset;
538 frame_size += sizeof (gpointer);
542 arg_info [0].size = frame_size;
544 for (k = 0; k < param_count; k++) {
545 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
547 /* ignore alignment for now */
550 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
551 arg_info [k].pad = pad;
553 arg_info [k + 1].pad = 0;
554 arg_info [k + 1].size = size;
556 arg_info [k + 1].offset = offset;
560 align = MONO_ARCH_FRAME_ALIGNMENT;
561 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
562 arg_info [k].pad = pad;
567 #define MAX_ARCH_DELEGATE_PARAMS 3
570 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
572 guint8 *code, *start;
575 start = code = mono_global_codeman_reserve (12);
577 /* Replace the this argument with the target */
578 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
579 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
580 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
582 g_assert ((code - start) <= 12);
584 mono_arch_flush_icache (start, 12);
588 size = 8 + param_count * 4;
589 start = code = mono_global_codeman_reserve (size);
591 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
592 /* slide down the arguments */
593 for (i = 0; i < param_count; ++i) {
594 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
596 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
598 g_assert ((code - start) <= size);
600 mono_arch_flush_icache (start, size);
604 *code_size = code - start;
610 * mono_arch_get_delegate_invoke_impls:
612 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
616 mono_arch_get_delegate_invoke_impls (void)
623 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
624 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
626 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
627 code = get_delegate_invoke_impl (FALSE, i, &code_len);
628 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
635 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
637 guint8 *code, *start;
639 /* FIXME: Support more cases */
640 if (MONO_TYPE_ISSTRUCT (sig->ret))
644 static guint8* cached = NULL;
645 mono_mini_arch_lock ();
647 mono_mini_arch_unlock ();
652 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
654 start = get_delegate_invoke_impl (TRUE, 0, NULL);
656 mono_mini_arch_unlock ();
659 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
662 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
664 for (i = 0; i < sig->param_count; ++i)
665 if (!mono_is_regsize_var (sig->params [i]))
668 mono_mini_arch_lock ();
669 code = cache [sig->param_count];
671 mono_mini_arch_unlock ();
676 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
677 start = mono_aot_get_trampoline (name);
680 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
682 cache [sig->param_count] = start;
683 mono_mini_arch_unlock ();
691 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
693 return (gpointer)regs [ARMREG_R0];
697 * Initialize the cpu to execute managed code.
700 mono_arch_cpu_init (void)
702 #if defined(__ARM_EABI__)
703 eabi_supported = TRUE;
705 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
708 i8_align = __alignof__ (gint64);
713 create_function_wrapper (gpointer function)
715 guint8 *start, *code;
717 start = code = mono_global_codeman_reserve (96);
720 * Construct the MonoContext structure on the stack.
723 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
725 /* save ip, lr and pc into their correspodings ctx.regs slots. */
726 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
727 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
728 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
730 /* save r0..r10 and fp */
731 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, regs));
732 ARM_STM (code, ARMREG_IP, 0x0fff);
734 /* now we can update fp. */
735 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
737 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
738 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
739 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
740 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
742 /* make ctx.eip hold the address of the call. */
743 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
744 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, G_STRUCT_OFFSET (MonoContext, pc));
746 /* r0 now points to the MonoContext */
747 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
750 #ifdef USE_JUMP_TABLES
752 gpointer *jte = mono_jumptable_add_entry ();
753 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
757 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
759 *(gpointer*)code = function;
762 ARM_BLX_REG (code, ARMREG_IP);
764 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
765 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, pc));
766 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
767 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
769 /* make ip point to the regs array, then restore everything, including pc. */
770 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, G_STRUCT_OFFSET (MonoContext, regs));
771 ARM_LDM (code, ARMREG_IP, 0xffff);
773 mono_arch_flush_icache (start, code - start);
779 * Initialize architecture specific code.
782 mono_arch_init (void)
784 InitializeCriticalSection (&mini_arch_mutex);
785 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
786 if (mini_get_debug_options ()->soft_breakpoints) {
787 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
788 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
793 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
794 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
795 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
798 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
799 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
800 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
801 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
802 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
805 #if defined(ARM_FPU_VFP_HARD)
806 arm_fpu = MONO_ARM_FPU_VFP_HARD;
807 #elif defined(ARM_FPU_VFP)
808 arm_fpu = MONO_ARM_FPU_VFP;
810 arm_fpu = MONO_ARM_FPU_NONE;
815 * Cleanup architecture specific code.
818 mono_arch_cleanup (void)
823 * This function returns the optimizations supported on this cpu.
826 mono_arch_cpu_optimizations (guint32 *exclude_mask)
829 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
830 if (cpu_arch != NULL) {
831 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
832 if (strncmp (cpu_arch, "armv", 4) == 0) {
833 v5_supported = cpu_arch [4] >= '5';
834 v6_supported = cpu_arch [4] >= '6';
835 v7_supported = cpu_arch [4] >= '7';
839 thumb_supported = TRUE;
846 FILE *file = fopen ("/proc/cpuinfo", "r");
848 while ((line = fgets (buf, 512, file))) {
849 if (strncmp (line, "Processor", 9) == 0) {
850 char *ver = strstr (line, "(v");
851 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
853 if (ver && (ver [2] == '6' || ver [2] == '7'))
855 if (ver && (ver [2] == '7'))
859 if (strncmp (line, "Features", 8) == 0) {
860 char *th = strstr (line, "thumb");
862 thumb_supported = TRUE;
870 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
875 /* no arm-specific optimizations yet */
881 * This function test for all SIMD functions supported.
883 * Returns a bitmask corresponding to all supported versions.
887 mono_arch_cpu_enumerate_simd_versions (void)
889 /* SIMD is currently unimplemented */
897 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
914 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
917 t = mini_type_get_underlying_type (gsctx, t);
924 case MONO_TYPE_FNPTR:
926 case MONO_TYPE_OBJECT:
927 case MONO_TYPE_STRING:
928 case MONO_TYPE_CLASS:
929 case MONO_TYPE_SZARRAY:
930 case MONO_TYPE_ARRAY:
932 case MONO_TYPE_GENERICINST:
933 if (!mono_type_generic_inst_is_valuetype (t))
936 case MONO_TYPE_VALUETYPE:
943 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
948 for (i = 0; i < cfg->num_varinfo; i++) {
949 MonoInst *ins = cfg->varinfo [i];
950 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
953 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
956 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
959 /* we can only allocate 32 bit values */
960 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
961 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
962 g_assert (i == vmv->idx);
963 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
970 #define USE_EXTRA_TEMPS 0
973 mono_arch_get_global_int_regs (MonoCompile *cfg)
977 mono_arch_compute_omit_fp (cfg);
980 * FIXME: Interface calls might go through a static rgctx trampoline which
981 * sets V5, but it doesn't save it, so we need to save it ourselves, and
984 if (cfg->flags & MONO_CFG_HAS_CALLS)
985 cfg->uses_rgctx_reg = TRUE;
987 if (cfg->arch.omit_fp)
988 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
989 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
990 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
991 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
993 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
994 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
996 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
997 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
998 /* V5 is reserved for passing the vtable/rgctx/IMT method */
999 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1000 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1001 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1007 * mono_arch_regalloc_cost:
1009 * Return the cost, in number of memory references, of the action of
1010 * allocating the variable VMV into a register during global register
1014 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1020 #endif /* #ifndef DISABLE_JIT */
1022 #ifndef __GNUC_PREREQ
1023 #define __GNUC_PREREQ(maj, min) (0)
1027 mono_arch_flush_icache (guint8 *code, gint size)
1029 #if defined(__native_client__)
1030 // For Native Client we don't have to flush i-cache here,
1031 // as it's being done by dyncode interface.
1034 #ifdef MONO_CROSS_COMPILE
1036 sys_icache_invalidate (code, size);
1037 #elif __GNUC_PREREQ(4, 1)
1038 __clear_cache (code, code + size);
1039 #elif defined(PLATFORM_ANDROID)
1040 const int syscall = 0xf0002;
1048 : "r" (code), "r" (code + size), "r" (syscall)
1049 : "r0", "r1", "r7", "r2"
1052 __asm __volatile ("mov r0, %0\n"
1055 "swi 0x9f0002 @ sys_cacheflush"
1057 : "r" (code), "r" (code + size), "r" (0)
1058 : "r0", "r1", "r3" );
1060 #endif /* !__native_client__ */
1076 guint16 vtsize; /* in param area */
1080 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1085 guint32 stack_usage;
1086 gboolean vtype_retaddr;
1087 /* The index of the vret arg in the argument list */
1097 /*#define __alignof__(a) sizeof(a)*/
1098 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
1101 #define PARAM_REGS 4
1104 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1107 if (*gr > ARMREG_R3) {
1109 ainfo->offset = *stack_size;
1110 ainfo->reg = ARMREG_SP; /* in the caller */
1111 ainfo->storage = RegTypeBase;
1114 ainfo->storage = RegTypeGeneral;
1121 split = i8_align == 4;
1126 if (*gr == ARMREG_R3 && split) {
1127 /* first word in r3 and the second on the stack */
1128 ainfo->offset = *stack_size;
1129 ainfo->reg = ARMREG_SP; /* in the caller */
1130 ainfo->storage = RegTypeBaseGen;
1132 } else if (*gr >= ARMREG_R3) {
1133 if (eabi_supported) {
1134 /* darwin aligns longs to 4 byte only */
1135 if (i8_align == 8) {
1140 ainfo->offset = *stack_size;
1141 ainfo->reg = ARMREG_SP; /* in the caller */
1142 ainfo->storage = RegTypeBase;
1145 if (eabi_supported) {
1146 if (i8_align == 8 && ((*gr) & 1))
1149 ainfo->storage = RegTypeIRegPair;
1158 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1160 guint i, gr, pstart;
1161 int n = sig->hasthis + sig->param_count;
1162 MonoType *simpletype;
1163 guint32 stack_size = 0;
1165 gboolean is_pinvoke = sig->pinvoke;
1169 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1171 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1176 /* FIXME: handle returning a struct */
1177 t = mini_type_get_underlying_type (gsctx, sig->ret);
1178 if (MONO_TYPE_ISSTRUCT (t)) {
1181 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1182 cinfo->ret.storage = RegTypeStructByVal;
1184 cinfo->vtype_retaddr = TRUE;
1191 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1192 * the first argument, allowing 'this' to be always passed in the first arg reg.
1193 * Also do this if the first argument is a reference type, since virtual calls
1194 * are sometimes made using calli without sig->hasthis set, like in the delegate
1197 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1199 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1201 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1205 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1206 cinfo->vret_arg_index = 1;
1210 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1214 if (cinfo->vtype_retaddr)
1215 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1218 DEBUG(printf("params: %d\n", sig->param_count));
1219 for (i = pstart; i < sig->param_count; ++i) {
1220 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1221 /* Prevent implicit arguments and sig_cookie from
1222 being passed in registers */
1224 /* Emit the signature cookie just before the implicit arguments */
1225 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1227 DEBUG(printf("param %d: ", i));
1228 if (sig->params [i]->byref) {
1229 DEBUG(printf("byref\n"));
1230 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1234 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1235 switch (simpletype->type) {
1236 case MONO_TYPE_BOOLEAN:
1239 cinfo->args [n].size = 1;
1240 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1243 case MONO_TYPE_CHAR:
1246 cinfo->args [n].size = 2;
1247 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1252 cinfo->args [n].size = 4;
1253 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1259 case MONO_TYPE_FNPTR:
1260 case MONO_TYPE_CLASS:
1261 case MONO_TYPE_OBJECT:
1262 case MONO_TYPE_STRING:
1263 case MONO_TYPE_SZARRAY:
1264 case MONO_TYPE_ARRAY:
1266 cinfo->args [n].size = sizeof (gpointer);
1267 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1270 case MONO_TYPE_GENERICINST:
1271 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1272 cinfo->args [n].size = sizeof (gpointer);
1273 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
1278 case MONO_TYPE_TYPEDBYREF:
1279 case MONO_TYPE_VALUETYPE: {
1285 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1286 size = sizeof (MonoTypedRef);
1287 align = sizeof (gpointer);
1289 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1291 size = mono_class_native_size (klass, &align);
1293 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1295 DEBUG(printf ("load %d bytes struct\n", size));
1298 align_size += (sizeof (gpointer) - 1);
1299 align_size &= ~(sizeof (gpointer) - 1);
1300 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1301 cinfo->args [n].storage = RegTypeStructByVal;
1302 cinfo->args [n].struct_size = size;
1303 /* FIXME: align stack_size if needed */
1304 if (eabi_supported) {
1305 if (align >= 8 && (gr & 1))
1308 if (gr > ARMREG_R3) {
1309 cinfo->args [n].size = 0;
1310 cinfo->args [n].vtsize = nwords;
1312 int rest = ARMREG_R3 - gr + 1;
1313 int n_in_regs = rest >= nwords? nwords: rest;
1315 cinfo->args [n].size = n_in_regs;
1316 cinfo->args [n].vtsize = nwords - n_in_regs;
1317 cinfo->args [n].reg = gr;
1319 nwords -= n_in_regs;
1321 cinfo->args [n].offset = stack_size;
1322 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1323 stack_size += nwords * sizeof (gpointer);
1330 cinfo->args [n].size = 8;
1331 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
1335 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1339 /* Handle the case where there are no implicit arguments */
1340 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1341 /* Prevent implicit arguments and sig_cookie from
1342 being passed in registers */
1344 /* Emit the signature cookie just before the implicit arguments */
1345 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1349 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1350 switch (simpletype->type) {
1351 case MONO_TYPE_BOOLEAN:
1356 case MONO_TYPE_CHAR:
1362 case MONO_TYPE_FNPTR:
1363 case MONO_TYPE_CLASS:
1364 case MONO_TYPE_OBJECT:
1365 case MONO_TYPE_SZARRAY:
1366 case MONO_TYPE_ARRAY:
1367 case MONO_TYPE_STRING:
1368 cinfo->ret.storage = RegTypeGeneral;
1369 cinfo->ret.reg = ARMREG_R0;
1373 cinfo->ret.storage = RegTypeIRegPair;
1374 cinfo->ret.reg = ARMREG_R0;
1378 cinfo->ret.storage = RegTypeFP;
1379 cinfo->ret.reg = ARMREG_R0;
1380 /* FIXME: cinfo->ret.reg = ???;
1381 cinfo->ret.storage = RegTypeFP;*/
1383 case MONO_TYPE_GENERICINST:
1384 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1385 cinfo->ret.storage = RegTypeGeneral;
1386 cinfo->ret.reg = ARMREG_R0;
1390 case MONO_TYPE_VALUETYPE:
1391 case MONO_TYPE_TYPEDBYREF:
1392 if (cinfo->ret.storage != RegTypeStructByVal)
1393 cinfo->ret.storage = RegTypeStructByAddr;
1395 case MONO_TYPE_VOID:
1398 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1402 /* align stack size to 8 */
1403 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1404 stack_size = (stack_size + 7) & ~7;
1406 cinfo->stack_usage = stack_size;
1413 debug_omit_fp (void)
1416 return mono_debug_count ();
1423 * mono_arch_compute_omit_fp:
1425 * Determine whenever the frame pointer can be eliminated.
1428 mono_arch_compute_omit_fp (MonoCompile *cfg)
1430 MonoMethodSignature *sig;
1431 MonoMethodHeader *header;
1435 if (cfg->arch.omit_fp_computed)
1438 header = cfg->header;
1440 sig = mono_method_signature (cfg->method);
1442 if (!cfg->arch.cinfo)
1443 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1444 cinfo = cfg->arch.cinfo;
1447 * FIXME: Remove some of the restrictions.
1449 cfg->arch.omit_fp = TRUE;
1450 cfg->arch.omit_fp_computed = TRUE;
1452 if (cfg->disable_omit_fp)
1453 cfg->arch.omit_fp = FALSE;
1454 if (!debug_omit_fp ())
1455 cfg->arch.omit_fp = FALSE;
1457 if (cfg->method->save_lmf)
1458 cfg->arch.omit_fp = FALSE;
1460 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1461 cfg->arch.omit_fp = FALSE;
1462 if (header->num_clauses)
1463 cfg->arch.omit_fp = FALSE;
1464 if (cfg->param_area)
1465 cfg->arch.omit_fp = FALSE;
1466 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1467 cfg->arch.omit_fp = FALSE;
1468 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1469 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1470 cfg->arch.omit_fp = FALSE;
1471 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1472 ArgInfo *ainfo = &cinfo->args [i];
1474 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1476 * The stack offset can only be determined when the frame
1479 cfg->arch.omit_fp = FALSE;
1484 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1485 MonoInst *ins = cfg->varinfo [i];
1488 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1493 * Set var information according to the calling convention. arm version.
1494 * The locals var stuff should most likely be split in another method.
1497 mono_arch_allocate_vars (MonoCompile *cfg)
1499 MonoMethodSignature *sig;
1500 MonoMethodHeader *header;
1502 int i, offset, size, align, curinst;
1506 sig = mono_method_signature (cfg->method);
1508 if (!cfg->arch.cinfo)
1509 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1510 cinfo = cfg->arch.cinfo;
1512 mono_arch_compute_omit_fp (cfg);
1514 if (cfg->arch.omit_fp)
1515 cfg->frame_reg = ARMREG_SP;
1517 cfg->frame_reg = ARMREG_FP;
1519 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1521 /* allow room for the vararg method args: void* and long/double */
1522 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1523 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1525 header = cfg->header;
1527 /* See mono_arch_get_global_int_regs () */
1528 if (cfg->flags & MONO_CFG_HAS_CALLS)
1529 cfg->uses_rgctx_reg = TRUE;
1531 if (cfg->frame_reg != ARMREG_SP)
1532 cfg->used_int_regs |= 1 << cfg->frame_reg;
1534 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1535 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1536 cfg->used_int_regs |= (1 << ARMREG_V5);
1540 if (!MONO_TYPE_ISSTRUCT (sig->ret) && !cinfo->vtype_retaddr) {
1541 if (sig->ret->type != MONO_TYPE_VOID) {
1542 cfg->ret->opcode = OP_REGVAR;
1543 cfg->ret->inst_c0 = ARMREG_R0;
1546 /* local vars are at a positive offset from the stack pointer */
1548 * also note that if the function uses alloca, we use FP
1549 * to point at the local variables.
1551 offset = 0; /* linkage area */
1552 /* align the offset to 16 bytes: not sure this is needed here */
1554 //offset &= ~(8 - 1);
1556 /* add parameter area size for called functions */
1557 offset += cfg->param_area;
1560 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1563 /* allow room to save the return value */
1564 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1567 /* the MonoLMF structure is stored just below the stack pointer */
1568 if (cinfo->ret.storage == RegTypeStructByVal) {
1569 cfg->ret->opcode = OP_REGOFFSET;
1570 cfg->ret->inst_basereg = cfg->frame_reg;
1571 offset += sizeof (gpointer) - 1;
1572 offset &= ~(sizeof (gpointer) - 1);
1573 cfg->ret->inst_offset = - offset;
1574 offset += sizeof(gpointer);
1575 } else if (cinfo->vtype_retaddr) {
1576 ins = cfg->vret_addr;
1577 offset += sizeof(gpointer) - 1;
1578 offset &= ~(sizeof(gpointer) - 1);
1579 ins->inst_offset = offset;
1580 ins->opcode = OP_REGOFFSET;
1581 ins->inst_basereg = cfg->frame_reg;
1582 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1583 printf ("vret_addr =");
1584 mono_print_ins (cfg->vret_addr);
1586 offset += sizeof(gpointer);
1589 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1590 if (cfg->arch.seq_point_info_var) {
1593 ins = cfg->arch.seq_point_info_var;
1597 offset += align - 1;
1598 offset &= ~(align - 1);
1599 ins->opcode = OP_REGOFFSET;
1600 ins->inst_basereg = cfg->frame_reg;
1601 ins->inst_offset = offset;
1604 ins = cfg->arch.ss_trigger_page_var;
1607 offset += align - 1;
1608 offset &= ~(align - 1);
1609 ins->opcode = OP_REGOFFSET;
1610 ins->inst_basereg = cfg->frame_reg;
1611 ins->inst_offset = offset;
1615 if (cfg->arch.seq_point_read_var) {
1618 ins = cfg->arch.seq_point_read_var;
1622 offset += align - 1;
1623 offset &= ~(align - 1);
1624 ins->opcode = OP_REGOFFSET;
1625 ins->inst_basereg = cfg->frame_reg;
1626 ins->inst_offset = offset;
1629 ins = cfg->arch.seq_point_ss_method_var;
1632 offset += align - 1;
1633 offset &= ~(align - 1);
1634 ins->opcode = OP_REGOFFSET;
1635 ins->inst_basereg = cfg->frame_reg;
1636 ins->inst_offset = offset;
1639 ins = cfg->arch.seq_point_bp_method_var;
1642 offset += align - 1;
1643 offset &= ~(align - 1);
1644 ins->opcode = OP_REGOFFSET;
1645 ins->inst_basereg = cfg->frame_reg;
1646 ins->inst_offset = offset;
1650 cfg->locals_min_stack_offset = offset;
1652 curinst = cfg->locals_start;
1653 for (i = curinst; i < cfg->num_varinfo; ++i) {
1656 ins = cfg->varinfo [i];
1657 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1660 t = ins->inst_vtype;
1661 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
1662 t = mini_get_gsharedvt_alloc_type_for_type (cfg, t);
1664 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1665 * pinvoke wrappers when they call functions returning structure */
1666 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
1667 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
1671 size = mono_type_size (t, &align);
1673 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1674 * since it loads/stores misaligned words, which don't do the right thing.
1676 if (align < 4 && size >= 4)
1678 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1679 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1680 offset += align - 1;
1681 offset &= ~(align - 1);
1682 ins->opcode = OP_REGOFFSET;
1683 ins->inst_offset = offset;
1684 ins->inst_basereg = cfg->frame_reg;
1686 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1689 cfg->locals_max_stack_offset = offset;
1693 ins = cfg->args [curinst];
1694 if (ins->opcode != OP_REGVAR) {
1695 ins->opcode = OP_REGOFFSET;
1696 ins->inst_basereg = cfg->frame_reg;
1697 offset += sizeof (gpointer) - 1;
1698 offset &= ~(sizeof (gpointer) - 1);
1699 ins->inst_offset = offset;
1700 offset += sizeof (gpointer);
1705 if (sig->call_convention == MONO_CALL_VARARG) {
1709 /* Allocate a local slot to hold the sig cookie address */
1710 offset += align - 1;
1711 offset &= ~(align - 1);
1712 cfg->sig_cookie = offset;
1716 for (i = 0; i < sig->param_count; ++i) {
1717 ins = cfg->args [curinst];
1719 if (ins->opcode != OP_REGVAR) {
1720 ins->opcode = OP_REGOFFSET;
1721 ins->inst_basereg = cfg->frame_reg;
1722 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
1724 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1725 * since it loads/stores misaligned words, which don't do the right thing.
1727 if (align < 4 && size >= 4)
1729 /* The code in the prolog () stores words when storing vtypes received in a register */
1730 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1732 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
1733 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1734 offset += align - 1;
1735 offset &= ~(align - 1);
1736 ins->inst_offset = offset;
1742 /* align the offset to 8 bytes */
1743 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
1744 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
1749 cfg->stack_offset = offset;
1753 mono_arch_create_vars (MonoCompile *cfg)
1755 MonoMethodSignature *sig;
1758 sig = mono_method_signature (cfg->method);
1760 if (!cfg->arch.cinfo)
1761 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1762 cinfo = cfg->arch.cinfo;
1764 if (cinfo->ret.storage == RegTypeStructByVal)
1765 cfg->ret_var_is_local = TRUE;
1767 if (cinfo->vtype_retaddr) {
1768 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1769 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1770 printf ("vret_addr = ");
1771 mono_print_ins (cfg->vret_addr);
1775 if (cfg->gen_seq_points) {
1776 if (cfg->soft_breakpoints) {
1777 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1778 ins->flags |= MONO_INST_VOLATILE;
1779 cfg->arch.seq_point_read_var = ins;
1781 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1782 ins->flags |= MONO_INST_VOLATILE;
1783 cfg->arch.seq_point_ss_method_var = ins;
1785 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1786 ins->flags |= MONO_INST_VOLATILE;
1787 cfg->arch.seq_point_bp_method_var = ins;
1789 g_assert (!cfg->compile_aot);
1790 } else if (cfg->compile_aot) {
1791 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1792 ins->flags |= MONO_INST_VOLATILE;
1793 cfg->arch.seq_point_info_var = ins;
1795 /* Allocate a separate variable for this to save 1 load per seq point */
1796 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1797 ins->flags |= MONO_INST_VOLATILE;
1798 cfg->arch.ss_trigger_page_var = ins;
1804 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1806 MonoMethodSignature *tmp_sig;
1809 if (call->tail_call)
1812 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1815 * mono_ArgIterator_Setup assumes the signature cookie is
1816 * passed first and all the arguments which were before it are
1817 * passed on the stack after the signature. So compensate by
1818 * passing a different signature.
1820 tmp_sig = mono_metadata_signature_dup (call->signature);
1821 tmp_sig->param_count -= call->signature->sentinelpos;
1822 tmp_sig->sentinelpos = 0;
1823 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1825 sig_reg = mono_alloc_ireg (cfg);
1826 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
1828 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
1833 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1838 LLVMCallInfo *linfo;
1840 n = sig->param_count + sig->hasthis;
1842 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1844 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1847 * LLVM always uses the native ABI while we use our own ABI, the
1848 * only difference is the handling of vtypes:
1849 * - we only pass/receive them in registers in some cases, and only
1850 * in 1 or 2 integer registers.
1852 if (cinfo->vtype_retaddr) {
1853 /* Vtype returned using a hidden argument */
1854 linfo->ret.storage = LLVMArgVtypeRetAddr;
1855 linfo->vret_arg_index = cinfo->vret_arg_index;
1856 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1857 cfg->exception_message = g_strdup ("unknown ret conv");
1858 cfg->disable_llvm = TRUE;
1862 for (i = 0; i < n; ++i) {
1863 ainfo = cinfo->args + i;
1865 linfo->args [i].storage = LLVMArgNone;
1867 switch (ainfo->storage) {
1868 case RegTypeGeneral:
1869 case RegTypeIRegPair:
1871 linfo->args [i].storage = LLVMArgInIReg;
1873 case RegTypeStructByVal:
1874 // FIXME: Passing entirely on the stack or split reg/stack
1875 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
1876 linfo->args [i].storage = LLVMArgVtypeInReg;
1877 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
1878 if (ainfo->size == 2)
1879 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
1881 linfo->args [i].pair_storage [1] = LLVMArgNone;
1883 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
1884 cfg->disable_llvm = TRUE;
1888 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1889 cfg->disable_llvm = TRUE;
1899 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1902 MonoMethodSignature *sig;
1906 sig = call->signature;
1907 n = sig->param_count + sig->hasthis;
1909 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
1911 for (i = 0; i < n; ++i) {
1912 ArgInfo *ainfo = cinfo->args + i;
1915 if (i >= sig->hasthis)
1916 t = sig->params [i - sig->hasthis];
1918 t = &mono_defaults.int_class->byval_arg;
1919 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
1921 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1922 /* Emit the signature cookie just before the implicit arguments */
1923 emit_sig_cookie (cfg, call, cinfo);
1926 in = call->args [i];
1928 switch (ainfo->storage) {
1929 case RegTypeGeneral:
1930 case RegTypeIRegPair:
1931 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1932 MONO_INST_NEW (cfg, ins, OP_MOVE);
1933 ins->dreg = mono_alloc_ireg (cfg);
1934 ins->sreg1 = in->dreg + 1;
1935 MONO_ADD_INS (cfg->cbb, ins);
1936 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1938 MONO_INST_NEW (cfg, ins, OP_MOVE);
1939 ins->dreg = mono_alloc_ireg (cfg);
1940 ins->sreg1 = in->dreg + 2;
1941 MONO_ADD_INS (cfg->cbb, ins);
1942 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1943 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1944 if (ainfo->size == 4) {
1945 if (IS_SOFT_FLOAT) {
1946 /* mono_emit_call_args () have already done the r8->r4 conversion */
1947 /* The converted value is in an int vreg */
1948 MONO_INST_NEW (cfg, ins, OP_MOVE);
1949 ins->dreg = mono_alloc_ireg (cfg);
1950 ins->sreg1 = in->dreg;
1951 MONO_ADD_INS (cfg->cbb, ins);
1952 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1956 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1957 creg = mono_alloc_ireg (cfg);
1958 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1959 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1962 if (IS_SOFT_FLOAT) {
1963 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1964 ins->dreg = mono_alloc_ireg (cfg);
1965 ins->sreg1 = in->dreg;
1966 MONO_ADD_INS (cfg->cbb, ins);
1967 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1969 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1970 ins->dreg = mono_alloc_ireg (cfg);
1971 ins->sreg1 = in->dreg;
1972 MONO_ADD_INS (cfg->cbb, ins);
1973 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1977 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1978 creg = mono_alloc_ireg (cfg);
1979 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1980 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1981 creg = mono_alloc_ireg (cfg);
1982 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1983 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1986 cfg->flags |= MONO_CFG_HAS_FPOUT;
1988 MONO_INST_NEW (cfg, ins, OP_MOVE);
1989 ins->dreg = mono_alloc_ireg (cfg);
1990 ins->sreg1 = in->dreg;
1991 MONO_ADD_INS (cfg->cbb, ins);
1993 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1996 case RegTypeStructByAddr:
1999 /* FIXME: where si the data allocated? */
2000 arg->backend.reg3 = ainfo->reg;
2001 call->used_iregs |= 1 << ainfo->reg;
2002 g_assert_not_reached ();
2005 case RegTypeStructByVal:
2006 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2007 ins->opcode = OP_OUTARG_VT;
2008 ins->sreg1 = in->dreg;
2009 ins->klass = in->klass;
2010 ins->inst_p0 = call;
2011 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2012 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2013 mono_call_inst_add_outarg_vt (cfg, call, ins);
2014 MONO_ADD_INS (cfg->cbb, ins);
2017 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2018 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2019 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2020 if (t->type == MONO_TYPE_R8) {
2021 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2024 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2026 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2029 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2032 case RegTypeBaseGen:
2033 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2034 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2035 MONO_INST_NEW (cfg, ins, OP_MOVE);
2036 ins->dreg = mono_alloc_ireg (cfg);
2037 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2038 MONO_ADD_INS (cfg->cbb, ins);
2039 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2040 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2043 /* This should work for soft-float as well */
2045 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2046 creg = mono_alloc_ireg (cfg);
2047 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2048 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2049 creg = mono_alloc_ireg (cfg);
2050 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2051 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2052 cfg->flags |= MONO_CFG_HAS_FPOUT;
2054 g_assert_not_reached ();
2061 arg->backend.reg3 = ainfo->reg;
2062 /* FP args are passed in int regs */
2063 call->used_iregs |= 1 << ainfo->reg;
2064 if (ainfo->size == 8) {
2065 arg->opcode = OP_OUTARG_R8;
2066 call->used_iregs |= 1 << (ainfo->reg + 1);
2068 arg->opcode = OP_OUTARG_R4;
2071 cfg->flags |= MONO_CFG_HAS_FPOUT;
2075 g_assert_not_reached ();
2079 /* Handle the case where there are no implicit arguments */
2080 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2081 emit_sig_cookie (cfg, call, cinfo);
2083 if (cinfo->ret.storage == RegTypeStructByVal) {
2084 /* The JIT will transform this into a normal call */
2085 call->vret_in_reg = TRUE;
2086 } else if (cinfo->vtype_retaddr) {
2088 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2089 vtarg->sreg1 = call->vret_var->dreg;
2090 vtarg->dreg = mono_alloc_preg (cfg);
2091 MONO_ADD_INS (cfg->cbb, vtarg);
2093 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2096 call->stack_usage = cinfo->stack_usage;
2102 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2104 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2105 ArgInfo *ainfo = ins->inst_p1;
2106 int ovf_size = ainfo->vtsize;
2107 int doffset = ainfo->offset;
2108 int struct_size = ainfo->struct_size;
2109 int i, soffset, dreg, tmpreg;
2112 for (i = 0; i < ainfo->size; ++i) {
2113 dreg = mono_alloc_ireg (cfg);
2114 switch (struct_size) {
2116 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2119 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2122 tmpreg = mono_alloc_ireg (cfg);
2123 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2124 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2125 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2126 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2127 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2128 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2129 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2132 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2135 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2136 soffset += sizeof (gpointer);
2137 struct_size -= sizeof (gpointer);
2139 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2141 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2145 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2147 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2150 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2153 if (COMPILE_LLVM (cfg)) {
2154 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2156 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2157 ins->sreg1 = val->dreg + 1;
2158 ins->sreg2 = val->dreg + 2;
2159 MONO_ADD_INS (cfg->cbb, ins);
2164 case MONO_ARM_FPU_NONE:
2165 if (ret->type == MONO_TYPE_R8) {
2168 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2169 ins->dreg = cfg->ret->dreg;
2170 ins->sreg1 = val->dreg;
2171 MONO_ADD_INS (cfg->cbb, ins);
2174 if (ret->type == MONO_TYPE_R4) {
2175 /* Already converted to an int in method_to_ir () */
2176 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2180 case MONO_ARM_FPU_VFP:
2181 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2184 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2185 ins->dreg = cfg->ret->dreg;
2186 ins->sreg1 = val->dreg;
2187 MONO_ADD_INS (cfg->cbb, ins);
2192 g_assert_not_reached ();
2196 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2199 #endif /* #ifndef DISABLE_JIT */
2202 mono_arch_is_inst_imm (gint64 imm)
2207 #define DYN_CALL_STACK_ARGS 6
2210 MonoMethodSignature *sig;
2215 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
2221 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2225 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2228 switch (cinfo->ret.storage) {
2230 case RegTypeGeneral:
2231 case RegTypeIRegPair:
2232 case RegTypeStructByAddr:
2243 for (i = 0; i < cinfo->nargs; ++i) {
2244 switch (cinfo->args [i].storage) {
2245 case RegTypeGeneral:
2247 case RegTypeIRegPair:
2250 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2253 case RegTypeStructByVal:
2254 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2262 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2263 for (i = 0; i < sig->param_count; ++i) {
2264 MonoType *t = sig->params [i];
2290 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2292 ArchDynCallInfo *info;
2295 cinfo = get_call_info (NULL, NULL, sig);
2297 if (!dyn_call_supported (cinfo, sig)) {
2302 info = g_new0 (ArchDynCallInfo, 1);
2303 // FIXME: Preprocess the info to speed up start_dyn_call ()
2305 info->cinfo = cinfo;
2307 return (MonoDynCallInfo*)info;
2311 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2313 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2315 g_free (ainfo->cinfo);
2320 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2322 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2323 DynCallArgs *p = (DynCallArgs*)buf;
2324 int arg_index, greg, i, j, pindex;
2325 MonoMethodSignature *sig = dinfo->sig;
2327 g_assert (buf_len >= sizeof (DynCallArgs));
2336 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2337 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2342 if (dinfo->cinfo->vtype_retaddr)
2343 p->regs [greg ++] = (mgreg_t)ret;
2345 for (i = pindex; i < sig->param_count; i++) {
2346 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
2347 gpointer *arg = args [arg_index ++];
2348 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2351 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2353 else if (ainfo->storage == RegTypeBase)
2354 slot = PARAM_REGS + (ainfo->offset / 4);
2356 g_assert_not_reached ();
2359 p->regs [slot] = (mgreg_t)*arg;
2364 case MONO_TYPE_STRING:
2365 case MONO_TYPE_CLASS:
2366 case MONO_TYPE_ARRAY:
2367 case MONO_TYPE_SZARRAY:
2368 case MONO_TYPE_OBJECT:
2372 p->regs [slot] = (mgreg_t)*arg;
2374 case MONO_TYPE_BOOLEAN:
2376 p->regs [slot] = *(guint8*)arg;
2379 p->regs [slot] = *(gint8*)arg;
2382 p->regs [slot] = *(gint16*)arg;
2385 case MONO_TYPE_CHAR:
2386 p->regs [slot] = *(guint16*)arg;
2389 p->regs [slot] = *(gint32*)arg;
2392 p->regs [slot] = *(guint32*)arg;
2396 p->regs [slot ++] = (mgreg_t)arg [0];
2397 p->regs [slot] = (mgreg_t)arg [1];
2400 p->regs [slot] = *(mgreg_t*)arg;
2403 p->regs [slot ++] = (mgreg_t)arg [0];
2404 p->regs [slot] = (mgreg_t)arg [1];
2406 case MONO_TYPE_GENERICINST:
2407 if (MONO_TYPE_IS_REFERENCE (t)) {
2408 p->regs [slot] = (mgreg_t)*arg;
2413 case MONO_TYPE_VALUETYPE:
2414 g_assert (ainfo->storage == RegTypeStructByVal);
2416 if (ainfo->size == 0)
2417 slot = PARAM_REGS + (ainfo->offset / 4);
2421 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2422 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2425 g_assert_not_reached ();
2431 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2433 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2434 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
2435 guint8 *ret = ((DynCallArgs*)buf)->ret;
2436 mgreg_t res = ((DynCallArgs*)buf)->res;
2437 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2439 switch (mono_type_get_underlying_type (sig->ret)->type) {
2440 case MONO_TYPE_VOID:
2441 *(gpointer*)ret = NULL;
2443 case MONO_TYPE_STRING:
2444 case MONO_TYPE_CLASS:
2445 case MONO_TYPE_ARRAY:
2446 case MONO_TYPE_SZARRAY:
2447 case MONO_TYPE_OBJECT:
2451 *(gpointer*)ret = (gpointer)res;
2457 case MONO_TYPE_BOOLEAN:
2458 *(guint8*)ret = res;
2461 *(gint16*)ret = res;
2464 case MONO_TYPE_CHAR:
2465 *(guint16*)ret = res;
2468 *(gint32*)ret = res;
2471 *(guint32*)ret = res;
2475 /* This handles endianness as well */
2476 ((gint32*)ret) [0] = res;
2477 ((gint32*)ret) [1] = res2;
2479 case MONO_TYPE_GENERICINST:
2480 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
2481 *(gpointer*)ret = (gpointer)res;
2486 case MONO_TYPE_VALUETYPE:
2487 g_assert (ainfo->cinfo->vtype_retaddr);
2492 *(float*)ret = *(float*)&res;
2494 case MONO_TYPE_R8: {
2501 *(double*)ret = *(double*)®s;
2505 g_assert_not_reached ();
2512 * Allow tracing to work with this interface (with an optional argument)
2516 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2520 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2521 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2522 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2523 code = emit_call_reg (code, ARMREG_R2);
2536 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2539 int save_mode = SAVE_NONE;
2541 MonoMethod *method = cfg->method;
2542 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2543 int save_offset = cfg->param_area;
2547 offset = code - cfg->native_code;
2548 /* we need about 16 instructions */
2549 if (offset > (cfg->code_size - 16 * 4)) {
2550 cfg->code_size *= 2;
2551 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2552 code = cfg->native_code + offset;
2555 case MONO_TYPE_VOID:
2556 /* special case string .ctor icall */
2557 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2558 save_mode = SAVE_ONE;
2560 save_mode = SAVE_NONE;
2564 save_mode = SAVE_TWO;
2568 save_mode = SAVE_FP;
2570 case MONO_TYPE_VALUETYPE:
2571 save_mode = SAVE_STRUCT;
2574 save_mode = SAVE_ONE;
2578 switch (save_mode) {
2580 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2581 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2582 if (enable_arguments) {
2583 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2584 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2588 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2589 if (enable_arguments) {
2590 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2594 /* FIXME: what reg? */
2595 if (enable_arguments) {
2596 /* FIXME: what reg? */
2600 if (enable_arguments) {
2601 /* FIXME: get the actual address */
2602 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2610 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2611 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2612 code = emit_call_reg (code, ARMREG_IP);
2614 switch (save_mode) {
2616 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2617 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2620 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2634 * The immediate field for cond branches is big enough for all reasonable methods
2636 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2637 if (0 && ins->inst_true_bb->native_offset) { \
2638 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2640 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2641 ARM_B_COND (code, (condcode), 0); \
2644 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2646 /* emit an exception if condition is fail
2648 * We assign the extra code used to throw the implicit exceptions
2649 * to cfg->bb_exit as far as the big branch handling is concerned
2651 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2653 mono_add_patch_info (cfg, code - cfg->native_code, \
2654 MONO_PATCH_INFO_EXC, exc_name); \
2655 ARM_BL_COND (code, (condcode), 0); \
2658 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2661 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2666 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2668 MonoInst *ins, *n, *last_ins = NULL;
2670 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2671 switch (ins->opcode) {
2674 /* Already done by an arch-independent pass */
2676 case OP_LOAD_MEMBASE:
2677 case OP_LOADI4_MEMBASE:
2679 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2680 * OP_LOAD_MEMBASE offset(basereg), reg
2682 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2683 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2684 ins->inst_basereg == last_ins->inst_destbasereg &&
2685 ins->inst_offset == last_ins->inst_offset) {
2686 if (ins->dreg == last_ins->sreg1) {
2687 MONO_DELETE_INS (bb, ins);
2690 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2691 ins->opcode = OP_MOVE;
2692 ins->sreg1 = last_ins->sreg1;
2696 * Note: reg1 must be different from the basereg in the second load
2697 * OP_LOAD_MEMBASE offset(basereg), reg1
2698 * OP_LOAD_MEMBASE offset(basereg), reg2
2700 * OP_LOAD_MEMBASE offset(basereg), reg1
2701 * OP_MOVE reg1, reg2
2703 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2704 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2705 ins->inst_basereg != last_ins->dreg &&
2706 ins->inst_basereg == last_ins->inst_basereg &&
2707 ins->inst_offset == last_ins->inst_offset) {
2709 if (ins->dreg == last_ins->dreg) {
2710 MONO_DELETE_INS (bb, ins);
2713 ins->opcode = OP_MOVE;
2714 ins->sreg1 = last_ins->dreg;
2717 //g_assert_not_reached ();
2721 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2722 * OP_LOAD_MEMBASE offset(basereg), reg
2724 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2725 * OP_ICONST reg, imm
2727 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2728 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2729 ins->inst_basereg == last_ins->inst_destbasereg &&
2730 ins->inst_offset == last_ins->inst_offset) {
2731 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2732 ins->opcode = OP_ICONST;
2733 ins->inst_c0 = last_ins->inst_imm;
2734 g_assert_not_reached (); // check this rule
2738 case OP_LOADU1_MEMBASE:
2739 case OP_LOADI1_MEMBASE:
2740 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2741 ins->inst_basereg == last_ins->inst_destbasereg &&
2742 ins->inst_offset == last_ins->inst_offset) {
2743 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2744 ins->sreg1 = last_ins->sreg1;
2747 case OP_LOADU2_MEMBASE:
2748 case OP_LOADI2_MEMBASE:
2749 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2750 ins->inst_basereg == last_ins->inst_destbasereg &&
2751 ins->inst_offset == last_ins->inst_offset) {
2752 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2753 ins->sreg1 = last_ins->sreg1;
2757 ins->opcode = OP_MOVE;
2761 if (ins->dreg == ins->sreg1) {
2762 MONO_DELETE_INS (bb, ins);
2766 * OP_MOVE sreg, dreg
2767 * OP_MOVE dreg, sreg
2769 if (last_ins && last_ins->opcode == OP_MOVE &&
2770 ins->sreg1 == last_ins->dreg &&
2771 ins->dreg == last_ins->sreg1) {
2772 MONO_DELETE_INS (bb, ins);
2780 bb->last_ins = last_ins;
2784 * the branch_cc_table should maintain the order of these
2798 branch_cc_table [] = {
2812 #define ADD_NEW_INS(cfg,dest,op) do { \
2813 MONO_INST_NEW ((cfg), (dest), (op)); \
2814 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2818 map_to_reg_reg_op (int op)
2827 case OP_COMPARE_IMM:
2829 case OP_ICOMPARE_IMM:
2843 case OP_LOAD_MEMBASE:
2844 return OP_LOAD_MEMINDEX;
2845 case OP_LOADI4_MEMBASE:
2846 return OP_LOADI4_MEMINDEX;
2847 case OP_LOADU4_MEMBASE:
2848 return OP_LOADU4_MEMINDEX;
2849 case OP_LOADU1_MEMBASE:
2850 return OP_LOADU1_MEMINDEX;
2851 case OP_LOADI2_MEMBASE:
2852 return OP_LOADI2_MEMINDEX;
2853 case OP_LOADU2_MEMBASE:
2854 return OP_LOADU2_MEMINDEX;
2855 case OP_LOADI1_MEMBASE:
2856 return OP_LOADI1_MEMINDEX;
2857 case OP_STOREI1_MEMBASE_REG:
2858 return OP_STOREI1_MEMINDEX;
2859 case OP_STOREI2_MEMBASE_REG:
2860 return OP_STOREI2_MEMINDEX;
2861 case OP_STOREI4_MEMBASE_REG:
2862 return OP_STOREI4_MEMINDEX;
2863 case OP_STORE_MEMBASE_REG:
2864 return OP_STORE_MEMINDEX;
2865 case OP_STORER4_MEMBASE_REG:
2866 return OP_STORER4_MEMINDEX;
2867 case OP_STORER8_MEMBASE_REG:
2868 return OP_STORER8_MEMINDEX;
2869 case OP_STORE_MEMBASE_IMM:
2870 return OP_STORE_MEMBASE_REG;
2871 case OP_STOREI1_MEMBASE_IMM:
2872 return OP_STOREI1_MEMBASE_REG;
2873 case OP_STOREI2_MEMBASE_IMM:
2874 return OP_STOREI2_MEMBASE_REG;
2875 case OP_STOREI4_MEMBASE_IMM:
2876 return OP_STOREI4_MEMBASE_REG;
2878 g_assert_not_reached ();
2882 * Remove from the instruction list the instructions that can't be
2883 * represented with very simple instructions with no register
2887 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2889 MonoInst *ins, *temp, *last_ins = NULL;
2890 int rot_amount, imm8, low_imm;
2892 MONO_BB_FOR_EACH_INS (bb, ins) {
2894 switch (ins->opcode) {
2898 case OP_COMPARE_IMM:
2899 case OP_ICOMPARE_IMM:
2913 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2914 ADD_NEW_INS (cfg, temp, OP_ICONST);
2915 temp->inst_c0 = ins->inst_imm;
2916 temp->dreg = mono_alloc_ireg (cfg);
2917 ins->sreg2 = temp->dreg;
2918 ins->opcode = mono_op_imm_to_op (ins->opcode);
2920 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2926 if (ins->inst_imm == 1) {
2927 ins->opcode = OP_MOVE;
2930 if (ins->inst_imm == 0) {
2931 ins->opcode = OP_ICONST;
2935 imm8 = mono_is_power_of_two (ins->inst_imm);
2937 ins->opcode = OP_SHL_IMM;
2938 ins->inst_imm = imm8;
2941 ADD_NEW_INS (cfg, temp, OP_ICONST);
2942 temp->inst_c0 = ins->inst_imm;
2943 temp->dreg = mono_alloc_ireg (cfg);
2944 ins->sreg2 = temp->dreg;
2945 ins->opcode = OP_IMUL;
2951 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2952 /* ARM sets the C flag to 1 if there was _no_ overflow */
2953 ins->next->opcode = OP_COND_EXC_NC;
2956 case OP_IDIV_UN_IMM:
2958 case OP_IREM_UN_IMM:
2959 ADD_NEW_INS (cfg, temp, OP_ICONST);
2960 temp->inst_c0 = ins->inst_imm;
2961 temp->dreg = mono_alloc_ireg (cfg);
2962 ins->sreg2 = temp->dreg;
2963 ins->opcode = mono_op_imm_to_op (ins->opcode);
2965 case OP_LOCALLOC_IMM:
2966 ADD_NEW_INS (cfg, temp, OP_ICONST);
2967 temp->inst_c0 = ins->inst_imm;
2968 temp->dreg = mono_alloc_ireg (cfg);
2969 ins->sreg1 = temp->dreg;
2970 ins->opcode = OP_LOCALLOC;
2972 case OP_LOAD_MEMBASE:
2973 case OP_LOADI4_MEMBASE:
2974 case OP_LOADU4_MEMBASE:
2975 case OP_LOADU1_MEMBASE:
2976 /* we can do two things: load the immed in a register
2977 * and use an indexed load, or see if the immed can be
2978 * represented as an ad_imm + a load with a smaller offset
2979 * that fits. We just do the first for now, optimize later.
2981 if (arm_is_imm12 (ins->inst_offset))
2983 ADD_NEW_INS (cfg, temp, OP_ICONST);
2984 temp->inst_c0 = ins->inst_offset;
2985 temp->dreg = mono_alloc_ireg (cfg);
2986 ins->sreg2 = temp->dreg;
2987 ins->opcode = map_to_reg_reg_op (ins->opcode);
2989 case OP_LOADI2_MEMBASE:
2990 case OP_LOADU2_MEMBASE:
2991 case OP_LOADI1_MEMBASE:
2992 if (arm_is_imm8 (ins->inst_offset))
2994 ADD_NEW_INS (cfg, temp, OP_ICONST);
2995 temp->inst_c0 = ins->inst_offset;
2996 temp->dreg = mono_alloc_ireg (cfg);
2997 ins->sreg2 = temp->dreg;
2998 ins->opcode = map_to_reg_reg_op (ins->opcode);
3000 case OP_LOADR4_MEMBASE:
3001 case OP_LOADR8_MEMBASE:
3002 if (arm_is_fpimm8 (ins->inst_offset))
3004 low_imm = ins->inst_offset & 0x1ff;
3005 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3006 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3007 temp->inst_imm = ins->inst_offset & ~0x1ff;
3008 temp->sreg1 = ins->inst_basereg;
3009 temp->dreg = mono_alloc_ireg (cfg);
3010 ins->inst_basereg = temp->dreg;
3011 ins->inst_offset = low_imm;
3015 ADD_NEW_INS (cfg, temp, OP_ICONST);
3016 temp->inst_c0 = ins->inst_offset;
3017 temp->dreg = mono_alloc_ireg (cfg);
3019 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3020 add_ins->sreg1 = ins->inst_basereg;
3021 add_ins->sreg2 = temp->dreg;
3022 add_ins->dreg = mono_alloc_ireg (cfg);
3024 ins->inst_basereg = add_ins->dreg;
3025 ins->inst_offset = 0;
3028 case OP_STORE_MEMBASE_REG:
3029 case OP_STOREI4_MEMBASE_REG:
3030 case OP_STOREI1_MEMBASE_REG:
3031 if (arm_is_imm12 (ins->inst_offset))
3033 ADD_NEW_INS (cfg, temp, OP_ICONST);
3034 temp->inst_c0 = ins->inst_offset;
3035 temp->dreg = mono_alloc_ireg (cfg);
3036 ins->sreg2 = temp->dreg;
3037 ins->opcode = map_to_reg_reg_op (ins->opcode);
3039 case OP_STOREI2_MEMBASE_REG:
3040 if (arm_is_imm8 (ins->inst_offset))
3042 ADD_NEW_INS (cfg, temp, OP_ICONST);
3043 temp->inst_c0 = ins->inst_offset;
3044 temp->dreg = mono_alloc_ireg (cfg);
3045 ins->sreg2 = temp->dreg;
3046 ins->opcode = map_to_reg_reg_op (ins->opcode);
3048 case OP_STORER4_MEMBASE_REG:
3049 case OP_STORER8_MEMBASE_REG:
3050 if (arm_is_fpimm8 (ins->inst_offset))
3052 low_imm = ins->inst_offset & 0x1ff;
3053 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3054 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3055 temp->inst_imm = ins->inst_offset & ~0x1ff;
3056 temp->sreg1 = ins->inst_destbasereg;
3057 temp->dreg = mono_alloc_ireg (cfg);
3058 ins->inst_destbasereg = temp->dreg;
3059 ins->inst_offset = low_imm;
3063 ADD_NEW_INS (cfg, temp, OP_ICONST);
3064 temp->inst_c0 = ins->inst_offset;
3065 temp->dreg = mono_alloc_ireg (cfg);
3067 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3068 add_ins->sreg1 = ins->inst_destbasereg;
3069 add_ins->sreg2 = temp->dreg;
3070 add_ins->dreg = mono_alloc_ireg (cfg);
3072 ins->inst_destbasereg = add_ins->dreg;
3073 ins->inst_offset = 0;
3076 case OP_STORE_MEMBASE_IMM:
3077 case OP_STOREI1_MEMBASE_IMM:
3078 case OP_STOREI2_MEMBASE_IMM:
3079 case OP_STOREI4_MEMBASE_IMM:
3080 ADD_NEW_INS (cfg, temp, OP_ICONST);
3081 temp->inst_c0 = ins->inst_imm;
3082 temp->dreg = mono_alloc_ireg (cfg);
3083 ins->sreg1 = temp->dreg;
3084 ins->opcode = map_to_reg_reg_op (ins->opcode);
3086 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3088 gboolean swap = FALSE;
3092 /* Optimized away */
3097 /* Some fp compares require swapped operands */
3098 switch (ins->next->opcode) {
3100 ins->next->opcode = OP_FBLT;
3104 ins->next->opcode = OP_FBLT_UN;
3108 ins->next->opcode = OP_FBGE;
3112 ins->next->opcode = OP_FBGE_UN;
3120 ins->sreg1 = ins->sreg2;
3129 bb->last_ins = last_ins;
3130 bb->max_vreg = cfg->next_vreg;
3134 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3138 if (long_ins->opcode == OP_LNEG) {
3140 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3141 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3147 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3149 /* sreg is a float, dreg is an integer reg */
3152 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
3154 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
3155 ARM_FMRS (code, dreg, ARM_VFP_F0);
3159 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3160 else if (size == 2) {
3161 ARM_SHL_IMM (code, dreg, dreg, 16);
3162 ARM_SHR_IMM (code, dreg, dreg, 16);
3166 ARM_SHL_IMM (code, dreg, dreg, 24);
3167 ARM_SAR_IMM (code, dreg, dreg, 24);
3168 } else if (size == 2) {
3169 ARM_SHL_IMM (code, dreg, dreg, 16);
3170 ARM_SAR_IMM (code, dreg, dreg, 16);
3176 #endif /* #ifndef DISABLE_JIT */
3180 const guchar *target;
3185 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3188 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3189 PatchData *pdata = (PatchData*)user_data;
3190 guchar *code = data;
3191 guint32 *thunks = data;
3192 guint32 *endthunks = (guint32*)(code + bsize);
3194 int difflow, diffhigh;
3196 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3197 difflow = (char*)pdata->code - (char*)thunks;
3198 diffhigh = (char*)pdata->code - (char*)endthunks;
3199 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3203 * The thunk is composed of 3 words:
3204 * load constant from thunks [2] into ARM_IP
3207 * Note that the LR register is already setup
3209 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3210 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3211 while (thunks < endthunks) {
3212 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3213 if (thunks [2] == (guint32)pdata->target) {
3214 arm_patch (pdata->code, (guchar*)thunks);
3215 mono_arch_flush_icache (pdata->code, 4);
3218 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3219 /* found a free slot instead: emit thunk */
3220 /* ARMREG_IP is fine to use since this can't be an IMT call
3223 code = (guchar*)thunks;
3224 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3225 if (thumb_supported)
3226 ARM_BX (code, ARMREG_IP);
3228 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3229 thunks [2] = (guint32)pdata->target;
3230 mono_arch_flush_icache ((guchar*)thunks, 12);
3232 arm_patch (pdata->code, (guchar*)thunks);
3233 mono_arch_flush_icache (pdata->code, 4);
3237 /* skip 12 bytes, the size of the thunk */
3241 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3247 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3252 domain = mono_domain_get ();
3255 pdata.target = target;
3256 pdata.absolute = absolute;
3260 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3263 if (pdata.found != 1) {
3264 mono_domain_lock (domain);
3265 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3268 /* this uses the first available slot */
3270 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3272 mono_domain_unlock (domain);
3275 if (pdata.found != 1) {
3277 GHashTableIter iter;
3278 MonoJitDynamicMethodInfo *ji;
3281 * This might be a dynamic method, search its code manager. We can only
3282 * use the dynamic method containing CODE, since the others might be freed later.
3286 mono_domain_lock (domain);
3287 hash = domain_jit_info (domain)->dynamic_code_hash;
3289 /* FIXME: Speed this up */
3290 g_hash_table_iter_init (&iter, hash);
3291 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3292 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3293 if (pdata.found == 1)
3297 mono_domain_unlock (domain);
3299 if (pdata.found != 1)
3300 g_print ("thunk failed for %p from %p\n", target, code);
3301 g_assert (pdata.found == 1);
3305 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3307 guint32 *code32 = (void*)code;
3308 guint32 ins = *code32;
3309 guint32 prim = (ins >> 25) & 7;
3310 guint32 tval = GPOINTER_TO_UINT (target);
3312 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3313 if (prim == 5) { /* 101b */
3314 /* the diff starts 8 bytes from the branch opcode */
3315 gint diff = target - code - 8;
3317 gint tmask = 0xffffffff;
3318 if (tval & 1) { /* entering thumb mode */
3319 diff = target - 1 - code - 8;
3320 g_assert (thumb_supported);
3321 tbits = 0xf << 28; /* bl->blx bit pattern */
3322 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3323 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3327 tmask = ~(1 << 24); /* clear the link bit */
3328 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3333 if (diff <= 33554431) {
3335 ins = (ins & 0xff000000) | diff;
3337 *code32 = ins | tbits;
3341 /* diff between 0 and -33554432 */
3342 if (diff >= -33554432) {
3344 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3346 *code32 = ins | tbits;
3351 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3355 #ifdef USE_JUMP_TABLES
3357 gpointer *jte = mono_jumptable_get_entry (code);
3359 jte [0] = (gpointer) target;
3363 * The alternative call sequences looks like this:
3365 * ldr ip, [pc] // loads the address constant
3366 * b 1f // jumps around the constant
3367 * address constant embedded in the code
3372 * There are two cases for patching:
3373 * a) at the end of method emission: in this case code points to the start
3374 * of the call sequence
3375 * b) during runtime patching of the call site: in this case code points
3376 * to the mov pc, ip instruction
3378 * We have to handle also the thunk jump code sequence:
3382 * address constant // execution never reaches here
3384 if ((ins & 0x0ffffff0) == 0x12fff10) {
3385 /* Branch and exchange: the address is constructed in a reg
3386 * We can patch BX when the code sequence is the following:
3387 * ldr ip, [pc, #0] ; 0x8
3394 guint8 *emit = (guint8*)ccode;
3395 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3397 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3398 ARM_BX (emit, ARMREG_IP);
3400 /*patching from magic trampoline*/
3401 if (ins == ccode [3]) {
3402 g_assert (code32 [-4] == ccode [0]);
3403 g_assert (code32 [-3] == ccode [1]);
3404 g_assert (code32 [-1] == ccode [2]);
3405 code32 [-2] = (guint32)target;
3408 /*patching from JIT*/
3409 if (ins == ccode [0]) {
3410 g_assert (code32 [1] == ccode [1]);
3411 g_assert (code32 [3] == ccode [2]);
3412 g_assert (code32 [4] == ccode [3]);
3413 code32 [2] = (guint32)target;
3416 g_assert_not_reached ();
3417 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3425 guint8 *emit = (guint8*)ccode;
3426 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3428 ARM_BLX_REG (emit, ARMREG_IP);
3430 g_assert (code32 [-3] == ccode [0]);
3431 g_assert (code32 [-2] == ccode [1]);
3432 g_assert (code32 [0] == ccode [2]);
3434 code32 [-1] = (guint32)target;
3437 guint32 *tmp = ccode;
3438 guint8 *emit = (guint8*)tmp;
3439 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3440 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3441 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3442 ARM_BX (emit, ARMREG_IP);
3443 if (ins == ccode [2]) {
3444 g_assert_not_reached (); // should be -2 ...
3445 code32 [-1] = (guint32)target;
3448 if (ins == ccode [0]) {
3449 /* handles both thunk jump code and the far call sequence */
3450 code32 [2] = (guint32)target;
3453 g_assert_not_reached ();
3455 // g_print ("patched with 0x%08x\n", ins);
3460 arm_patch (guchar *code, const guchar *target)
3462 arm_patch_general (NULL, code, target, NULL);
3466 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3467 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3468 * to be used with the emit macros.
3469 * Return -1 otherwise.
3472 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3475 for (i = 0; i < 31; i+= 2) {
3476 res = (val << (32 - i)) | (val >> i);
3479 *rot_amount = i? 32 - i: 0;
3486 * Emits in code a sequence of instructions that load the value 'val'
3487 * into the dreg register. Uses at most 4 instructions.
3490 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3492 int imm8, rot_amount;
3494 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3495 /* skip the constant pool */
3501 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3502 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3503 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3504 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3507 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3509 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3513 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3515 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3517 if (val & 0xFF0000) {
3518 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3520 if (val & 0xFF000000) {
3521 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3523 } else if (val & 0xFF00) {
3524 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3525 if (val & 0xFF0000) {
3526 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3528 if (val & 0xFF000000) {
3529 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3531 } else if (val & 0xFF0000) {
3532 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3533 if (val & 0xFF000000) {
3534 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3537 //g_assert_not_reached ();
3543 mono_arm_thumb_supported (void)
3545 return thumb_supported;
3551 * emit_load_volatile_arguments:
3553 * Load volatile arguments from the stack to the original input registers.
3554 * Required before a tail call.
3557 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3559 MonoMethod *method = cfg->method;
3560 MonoMethodSignature *sig;
3565 /* FIXME: Generate intermediate code instead */
3567 sig = mono_method_signature (method);
3569 /* This is the opposite of the code in emit_prolog */
3573 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
3575 if (cinfo->vtype_retaddr) {
3576 ArgInfo *ainfo = &cinfo->ret;
3577 inst = cfg->vret_addr;
3578 g_assert (arm_is_imm12 (inst->inst_offset));
3579 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3581 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3582 ArgInfo *ainfo = cinfo->args + i;
3583 inst = cfg->args [pos];
3585 if (cfg->verbose_level > 2)
3586 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3587 if (inst->opcode == OP_REGVAR) {
3588 if (ainfo->storage == RegTypeGeneral)
3589 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3590 else if (ainfo->storage == RegTypeFP) {
3591 g_assert_not_reached ();
3592 } else if (ainfo->storage == RegTypeBase) {
3596 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3597 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3599 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3600 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3604 g_assert_not_reached ();
3606 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3607 switch (ainfo->size) {
3614 g_assert (arm_is_imm12 (inst->inst_offset));
3615 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3616 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3617 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3620 if (arm_is_imm12 (inst->inst_offset)) {
3621 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3623 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3624 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3628 } else if (ainfo->storage == RegTypeBaseGen) {
3631 } else if (ainfo->storage == RegTypeBase) {
3633 } else if (ainfo->storage == RegTypeFP) {
3634 g_assert_not_reached ();
3635 } else if (ainfo->storage == RegTypeStructByVal) {
3636 int doffset = inst->inst_offset;
3640 if (mono_class_from_mono_type (inst->inst_vtype))
3641 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3642 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3643 if (arm_is_imm12 (doffset)) {
3644 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3646 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3647 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3649 soffset += sizeof (gpointer);
3650 doffset += sizeof (gpointer);
3655 } else if (ainfo->storage == RegTypeStructByAddr) {
3670 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3675 guint8 *code = cfg->native_code + cfg->code_len;
3676 MonoInst *last_ins = NULL;
3677 guint last_offset = 0;
3679 int imm8, rot_amount;
3681 /* we don't align basic blocks of loops on arm */
3683 if (cfg->verbose_level > 2)
3684 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3686 cpos = bb->max_offset;
3688 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3689 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3690 //g_assert (!mono_compile_aot);
3693 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3694 /* this is not thread save, but good enough */
3695 /* fixme: howto handle overflows? */
3696 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3699 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3700 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3701 (gpointer)"mono_break");
3702 code = emit_call_seq (cfg, code);
3705 MONO_BB_FOR_EACH_INS (bb, ins) {
3706 offset = code - cfg->native_code;
3708 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3710 if (offset > (cfg->code_size - max_len - 16)) {
3711 cfg->code_size *= 2;
3712 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3713 code = cfg->native_code + offset;
3715 // if (ins->cil_code)
3716 // g_print ("cil code\n");
3717 mono_debug_record_line_number (cfg, ins, offset);
3719 switch (ins->opcode) {
3720 case OP_MEMORY_BARRIER:
3722 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
3723 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
3727 #ifdef HAVE_AEABI_READ_TP
3728 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3729 (gpointer)"__aeabi_read_tp");
3730 code = emit_call_seq (cfg, code);
3732 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3734 g_assert_not_reached ();
3738 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3739 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3742 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3743 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3745 case OP_STOREI1_MEMBASE_IMM:
3746 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3747 g_assert (arm_is_imm12 (ins->inst_offset));
3748 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3750 case OP_STOREI2_MEMBASE_IMM:
3751 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3752 g_assert (arm_is_imm8 (ins->inst_offset));
3753 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3755 case OP_STORE_MEMBASE_IMM:
3756 case OP_STOREI4_MEMBASE_IMM:
3757 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3758 g_assert (arm_is_imm12 (ins->inst_offset));
3759 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3761 case OP_STOREI1_MEMBASE_REG:
3762 g_assert (arm_is_imm12 (ins->inst_offset));
3763 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3765 case OP_STOREI2_MEMBASE_REG:
3766 g_assert (arm_is_imm8 (ins->inst_offset));
3767 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3769 case OP_STORE_MEMBASE_REG:
3770 case OP_STOREI4_MEMBASE_REG:
3771 /* this case is special, since it happens for spill code after lowering has been called */
3772 if (arm_is_imm12 (ins->inst_offset)) {
3773 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3775 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3776 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3779 case OP_STOREI1_MEMINDEX:
3780 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3782 case OP_STOREI2_MEMINDEX:
3783 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3785 case OP_STORE_MEMINDEX:
3786 case OP_STOREI4_MEMINDEX:
3787 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3790 g_assert_not_reached ();
3792 case OP_LOAD_MEMINDEX:
3793 case OP_LOADI4_MEMINDEX:
3794 case OP_LOADU4_MEMINDEX:
3795 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3797 case OP_LOADI1_MEMINDEX:
3798 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3800 case OP_LOADU1_MEMINDEX:
3801 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3803 case OP_LOADI2_MEMINDEX:
3804 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3806 case OP_LOADU2_MEMINDEX:
3807 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3809 case OP_LOAD_MEMBASE:
3810 case OP_LOADI4_MEMBASE:
3811 case OP_LOADU4_MEMBASE:
3812 /* this case is special, since it happens for spill code after lowering has been called */
3813 if (arm_is_imm12 (ins->inst_offset)) {
3814 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3816 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3817 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3820 case OP_LOADI1_MEMBASE:
3821 g_assert (arm_is_imm8 (ins->inst_offset));
3822 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3824 case OP_LOADU1_MEMBASE:
3825 g_assert (arm_is_imm12 (ins->inst_offset));
3826 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3828 case OP_LOADU2_MEMBASE:
3829 g_assert (arm_is_imm8 (ins->inst_offset));
3830 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3832 case OP_LOADI2_MEMBASE:
3833 g_assert (arm_is_imm8 (ins->inst_offset));
3834 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3836 case OP_ICONV_TO_I1:
3837 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3838 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3840 case OP_ICONV_TO_I2:
3841 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3842 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3844 case OP_ICONV_TO_U1:
3845 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3847 case OP_ICONV_TO_U2:
3848 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3849 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3853 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3855 case OP_COMPARE_IMM:
3856 case OP_ICOMPARE_IMM:
3857 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3858 g_assert (imm8 >= 0);
3859 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3863 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3864 * So instead of emitting a trap, we emit a call a C function and place a
3867 //*(int*)code = 0xef9f0001;
3870 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3871 (gpointer)"mono_break");
3872 code = emit_call_seq (cfg, code);
3874 case OP_RELAXED_NOP:
3879 case OP_DUMMY_STORE:
3880 case OP_NOT_REACHED:
3883 case OP_SEQ_POINT: {
3885 MonoInst *info_var = cfg->arch.seq_point_info_var;
3886 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3887 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
3888 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
3889 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
3891 int dreg = ARMREG_LR;
3893 if (cfg->soft_breakpoints) {
3894 g_assert (!cfg->compile_aot);
3898 * For AOT, we use one got slot per method, which will point to a
3899 * SeqPointInfo structure, containing all the information required
3900 * by the code below.
3902 if (cfg->compile_aot) {
3903 g_assert (info_var);
3904 g_assert (info_var->opcode == OP_REGOFFSET);
3905 g_assert (arm_is_imm12 (info_var->inst_offset));
3908 if (!cfg->soft_breakpoints) {
3910 * Read from the single stepping trigger page. This will cause a
3911 * SIGSEGV when single stepping is enabled.
3912 * We do this _before_ the breakpoint, so single stepping after
3913 * a breakpoint is hit will step to the next IL offset.
3915 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3918 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3919 if (cfg->soft_breakpoints) {
3920 /* Load the address of the sequence point trigger variable. */
3923 g_assert (var->opcode == OP_REGOFFSET);
3924 g_assert (arm_is_imm12 (var->inst_offset));
3925 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3927 /* Read the value and check whether it is non-zero. */
3928 ARM_LDR_IMM (code, dreg, dreg, 0);
3929 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3931 /* Load the address of the sequence point method. */
3932 var = ss_method_var;
3934 g_assert (var->opcode == OP_REGOFFSET);
3935 g_assert (arm_is_imm12 (var->inst_offset));
3936 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3938 /* Call it conditionally. */
3939 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
3941 if (cfg->compile_aot) {
3942 /* Load the trigger page addr from the variable initialized in the prolog */
3943 var = ss_trigger_page_var;
3945 g_assert (var->opcode == OP_REGOFFSET);
3946 g_assert (arm_is_imm12 (var->inst_offset));
3947 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3949 #ifdef USE_JUMP_TABLES
3950 gpointer *jte = mono_jumptable_add_entry ();
3951 code = mono_arm_load_jumptable_entry (code, jte, dreg);
3952 jte [0] = ss_trigger_page;
3954 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3956 *(int*)code = (int)ss_trigger_page;
3960 ARM_LDR_IMM (code, dreg, dreg, 0);
3964 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3966 if (cfg->soft_breakpoints) {
3967 /* Load the address of the breakpoint method into ip. */
3968 var = bp_method_var;
3970 g_assert (var->opcode == OP_REGOFFSET);
3971 g_assert (arm_is_imm12 (var->inst_offset));
3972 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3975 * A placeholder for a possible breakpoint inserted by
3976 * mono_arch_set_breakpoint ().
3979 } else if (cfg->compile_aot) {
3980 guint32 offset = code - cfg->native_code;
3983 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3984 /* Add the offset */
3985 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3986 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3987 if (arm_is_imm12 ((int)val)) {
3988 ARM_LDR_IMM (code, dreg, dreg, val);
3990 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3992 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3994 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3995 g_assert (!(val & 0xFF000000));
3997 ARM_LDR_IMM (code, dreg, dreg, 0);
3999 /* What is faster, a branch or a load ? */
4000 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4001 /* The breakpoint instruction */
4002 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4005 * A placeholder for a possible breakpoint inserted by
4006 * mono_arch_set_breakpoint ().
4008 for (i = 0; i < 4; ++i)
4015 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4018 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4022 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4025 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4026 g_assert (imm8 >= 0);
4027 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4031 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4032 g_assert (imm8 >= 0);
4033 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4037 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4038 g_assert (imm8 >= 0);
4039 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4042 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4043 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4045 case OP_IADD_OVF_UN:
4046 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4047 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4050 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4051 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4053 case OP_ISUB_OVF_UN:
4054 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4055 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4057 case OP_ADD_OVF_CARRY:
4058 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4059 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4061 case OP_ADD_OVF_UN_CARRY:
4062 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4063 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4065 case OP_SUB_OVF_CARRY:
4066 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4067 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4069 case OP_SUB_OVF_UN_CARRY:
4070 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4071 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4075 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4078 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4079 g_assert (imm8 >= 0);
4080 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4083 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4087 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4091 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4092 g_assert (imm8 >= 0);
4093 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4097 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4098 g_assert (imm8 >= 0);
4099 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4101 case OP_ARM_RSBS_IMM:
4102 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4103 g_assert (imm8 >= 0);
4104 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4106 case OP_ARM_RSC_IMM:
4107 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4108 g_assert (imm8 >= 0);
4109 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4112 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4116 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4117 g_assert (imm8 >= 0);
4118 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4121 g_assert (v7s_supported);
4122 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4125 g_assert (v7s_supported);
4126 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4129 g_assert (v7s_supported);
4130 ARM_SDIV (code, ARMREG_IP, ins->sreg1, ins->sreg2);
4131 ARM_MLS (code, ins->dreg, ARMREG_IP, ins->sreg2, ins->sreg1);
4134 g_assert (v7s_supported);
4135 ARM_UDIV (code, ARMREG_IP, ins->sreg1, ins->sreg2);
4136 ARM_MLS (code, ins->dreg, ARMREG_IP, ins->sreg2, ins->sreg1);
4140 g_assert_not_reached ();
4142 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4146 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4147 g_assert (imm8 >= 0);
4148 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4151 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4155 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4156 g_assert (imm8 >= 0);
4157 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4160 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4165 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4166 else if (ins->dreg != ins->sreg1)
4167 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4170 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4175 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4176 else if (ins->dreg != ins->sreg1)
4177 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4180 case OP_ISHR_UN_IMM:
4182 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4183 else if (ins->dreg != ins->sreg1)
4184 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4187 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4190 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4193 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4196 if (ins->dreg == ins->sreg2)
4197 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4199 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4202 g_assert_not_reached ();
4205 /* FIXME: handle ovf/ sreg2 != dreg */
4206 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4207 /* FIXME: MUL doesn't set the C/O flags on ARM */
4209 case OP_IMUL_OVF_UN:
4210 /* FIXME: handle ovf/ sreg2 != dreg */
4211 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4212 /* FIXME: MUL doesn't set the C/O flags on ARM */
4215 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4218 /* Load the GOT offset */
4219 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4220 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4222 *(gpointer*)code = NULL;
4224 /* Load the value from the GOT */
4225 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4227 case OP_ICONV_TO_I4:
4228 case OP_ICONV_TO_U4:
4230 if (ins->dreg != ins->sreg1)
4231 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4234 int saved = ins->sreg2;
4235 if (ins->sreg2 == ARM_LSW_REG) {
4236 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4239 if (ins->sreg1 != ARM_LSW_REG)
4240 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4241 if (saved != ARM_MSW_REG)
4242 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4247 ARM_CPYD (code, ins->dreg, ins->sreg1);
4249 case OP_FCONV_TO_R4:
4251 ARM_CVTD (code, ins->dreg, ins->sreg1);
4252 ARM_CVTS (code, ins->dreg, ins->dreg);
4257 * Keep in sync with mono_arch_emit_epilog
4259 g_assert (!cfg->method->save_lmf);
4261 code = emit_load_volatile_arguments (cfg, code);
4263 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4265 if (cfg->used_int_regs)
4266 ARM_POP (code, cfg->used_int_regs);
4267 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4269 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4271 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4272 if (cfg->compile_aot) {
4273 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4275 *(gpointer*)code = NULL;
4277 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4279 code = mono_arm_patchable_b (code, ARMCOND_AL);
4283 /* ensure ins->sreg1 is not NULL */
4284 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4287 g_assert (cfg->sig_cookie < 128);
4288 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4289 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4298 call = (MonoCallInst*)ins;
4299 if (ins->flags & MONO_INST_HAS_METHOD)
4300 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4302 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4303 code = emit_call_seq (cfg, code);
4304 ins->flags |= MONO_INST_GC_CALLSITE;
4305 ins->backend.pc_offset = code - cfg->native_code;
4306 code = emit_move_return_value (cfg, ins, code);
4312 case OP_VOIDCALL_REG:
4314 code = emit_call_reg (code, ins->sreg1);
4315 ins->flags |= MONO_INST_GC_CALLSITE;
4316 ins->backend.pc_offset = code - cfg->native_code;
4317 code = emit_move_return_value (cfg, ins, code);
4319 case OP_FCALL_MEMBASE:
4320 case OP_LCALL_MEMBASE:
4321 case OP_VCALL_MEMBASE:
4322 case OP_VCALL2_MEMBASE:
4323 case OP_VOIDCALL_MEMBASE:
4324 case OP_CALL_MEMBASE: {
4325 gboolean imt_arg = FALSE;
4327 g_assert (ins->sreg1 != ARMREG_LR);
4328 call = (MonoCallInst*)ins;
4329 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4331 if (!arm_is_imm12 (ins->inst_offset))
4332 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4333 #ifdef USE_JUMP_TABLES
4339 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4341 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4343 if (!arm_is_imm12 (ins->inst_offset))
4344 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4346 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4349 * We can't embed the method in the code stream in PIC code, or
4351 * Instead, we put it in V5 in code emitted by
4352 * mono_arch_emit_imt_argument (), and embed NULL here to
4353 * signal the IMT thunk that the value is in V5.
4355 #ifdef USE_JUMP_TABLES
4356 /* In case of jumptables we always use value in V5. */
4359 if (call->dynamic_imt_arg)
4360 *((gpointer*)code) = NULL;
4362 *((gpointer*)code) = (gpointer)call->method;
4366 ins->flags |= MONO_INST_GC_CALLSITE;
4367 ins->backend.pc_offset = code - cfg->native_code;
4368 code = emit_move_return_value (cfg, ins, code);
4372 /* keep alignment */
4373 int alloca_waste = cfg->param_area;
4376 /* round the size to 8 bytes */
4377 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4378 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4380 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
4381 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4382 /* memzero the area: dreg holds the size, sp is the pointer */
4383 if (ins->flags & MONO_INST_INIT) {
4384 guint8 *start_loop, *branch_to_cond;
4385 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4386 branch_to_cond = code;
4389 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4390 arm_patch (branch_to_cond, code);
4391 /* decrement by 4 and set flags */
4392 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4393 ARM_B_COND (code, ARMCOND_GE, 0);
4394 arm_patch (code - 4, start_loop);
4396 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
4401 MonoInst *var = cfg->dyn_call_var;
4403 g_assert (var->opcode == OP_REGOFFSET);
4404 g_assert (arm_is_imm12 (var->inst_offset));
4406 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4407 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4409 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4411 /* Save args buffer */
4412 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4414 /* Set stack slots using R0 as scratch reg */
4415 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4416 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4417 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4418 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4421 /* Set argument registers */
4422 for (i = 0; i < PARAM_REGS; ++i)
4423 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
4426 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4427 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4430 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
4431 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
4432 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
4436 if (ins->sreg1 != ARMREG_R0)
4437 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4438 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4439 (gpointer)"mono_arch_throw_exception");
4440 code = emit_call_seq (cfg, code);
4444 if (ins->sreg1 != ARMREG_R0)
4445 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4446 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4447 (gpointer)"mono_arch_rethrow_exception");
4448 code = emit_call_seq (cfg, code);
4451 case OP_START_HANDLER: {
4452 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4455 /* Reserve a param area, see filter-stack.exe */
4456 if (cfg->param_area) {
4457 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4458 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4460 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4461 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4465 if (arm_is_imm12 (spvar->inst_offset)) {
4466 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
4468 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4469 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
4473 case OP_ENDFILTER: {
4474 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4477 /* Free the param area */
4478 if (cfg->param_area) {
4479 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4480 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4482 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4483 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4487 if (ins->sreg1 != ARMREG_R0)
4488 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
4489 if (arm_is_imm12 (spvar->inst_offset)) {
4490 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4492 g_assert (ARMREG_IP != spvar->inst_basereg);
4493 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4494 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4496 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4499 case OP_ENDFINALLY: {
4500 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
4503 /* Free the param area */
4504 if (cfg->param_area) {
4505 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
4506 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4508 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
4509 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4513 if (arm_is_imm12 (spvar->inst_offset)) {
4514 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
4516 g_assert (ARMREG_IP != spvar->inst_basereg);
4517 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
4518 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
4520 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
4523 case OP_CALL_HANDLER:
4524 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4525 code = mono_arm_patchable_bl (code, ARMCOND_AL);
4526 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
4529 ins->inst_c0 = code - cfg->native_code;
4532 /*if (ins->inst_target_bb->native_offset) {
4534 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
4536 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
4537 code = mono_arm_patchable_b (code, ARMCOND_AL);
4541 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
4545 * In the normal case we have:
4546 * ldr pc, [pc, ins->sreg1 << 2]
4549 * ldr lr, [pc, ins->sreg1 << 2]
4551 * After follows the data.
4552 * FIXME: add aot support.
4554 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
4555 #ifdef USE_JUMP_TABLES
4557 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
4558 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4559 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
4563 max_len += 4 * GPOINTER_TO_INT (ins->klass);
4564 if (offset + max_len > (cfg->code_size - 16)) {
4565 cfg->code_size += max_len;
4566 cfg->code_size *= 2;
4567 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4568 code = cfg->native_code + offset;
4570 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
4572 code += 4 * GPOINTER_TO_INT (ins->klass);
4577 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4578 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4582 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4583 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
4587 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4588 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
4592 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4593 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
4597 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4598 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
4600 case OP_COND_EXC_EQ:
4601 case OP_COND_EXC_NE_UN:
4602 case OP_COND_EXC_LT:
4603 case OP_COND_EXC_LT_UN:
4604 case OP_COND_EXC_GT:
4605 case OP_COND_EXC_GT_UN:
4606 case OP_COND_EXC_GE:
4607 case OP_COND_EXC_GE_UN:
4608 case OP_COND_EXC_LE:
4609 case OP_COND_EXC_LE_UN:
4610 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
4612 case OP_COND_EXC_IEQ:
4613 case OP_COND_EXC_INE_UN:
4614 case OP_COND_EXC_ILT:
4615 case OP_COND_EXC_ILT_UN:
4616 case OP_COND_EXC_IGT:
4617 case OP_COND_EXC_IGT_UN:
4618 case OP_COND_EXC_IGE:
4619 case OP_COND_EXC_IGE_UN:
4620 case OP_COND_EXC_ILE:
4621 case OP_COND_EXC_ILE_UN:
4622 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
4625 case OP_COND_EXC_IC:
4626 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
4628 case OP_COND_EXC_OV:
4629 case OP_COND_EXC_IOV:
4630 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
4632 case OP_COND_EXC_NC:
4633 case OP_COND_EXC_INC:
4634 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
4636 case OP_COND_EXC_NO:
4637 case OP_COND_EXC_INO:
4638 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
4650 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
4653 /* floating point opcodes */
4654 #if defined(ARM_FPU_VFP)
4657 if (cfg->compile_aot) {
4658 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4660 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4662 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4665 /* FIXME: we can optimize the imm load by dealing with part of
4666 * the displacement in LDFD (aligning to 512).
4668 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4669 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4673 if (cfg->compile_aot) {
4674 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4676 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4678 ARM_CVTS (code, ins->dreg, ins->dreg);
4680 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4681 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4682 ARM_CVTS (code, ins->dreg, ins->dreg);
4685 case OP_STORER8_MEMBASE_REG:
4686 /* This is generated by the local regalloc pass which runs after the lowering pass */
4687 if (!arm_is_fpimm8 (ins->inst_offset)) {
4688 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4689 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4690 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4692 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4695 case OP_LOADR8_MEMBASE:
4696 /* This is generated by the local regalloc pass which runs after the lowering pass */
4697 if (!arm_is_fpimm8 (ins->inst_offset)) {
4698 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4699 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4700 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4702 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4705 case OP_STORER4_MEMBASE_REG:
4706 g_assert (arm_is_fpimm8 (ins->inst_offset));
4707 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4708 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4710 case OP_LOADR4_MEMBASE:
4711 g_assert (arm_is_fpimm8 (ins->inst_offset));
4712 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4713 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4715 case OP_ICONV_TO_R_UN: {
4716 g_assert_not_reached ();
4719 case OP_ICONV_TO_R4:
4720 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4721 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4722 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4724 case OP_ICONV_TO_R8:
4725 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4726 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4730 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4731 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4732 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4734 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4740 case OP_FCONV_TO_I1:
4741 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4743 case OP_FCONV_TO_U1:
4744 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4746 case OP_FCONV_TO_I2:
4747 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4749 case OP_FCONV_TO_U2:
4750 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4752 case OP_FCONV_TO_I4:
4754 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4756 case OP_FCONV_TO_U4:
4758 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4760 case OP_FCONV_TO_I8:
4761 case OP_FCONV_TO_U8:
4762 g_assert_not_reached ();
4763 /* Implemented as helper calls */
4765 case OP_LCONV_TO_R_UN:
4766 g_assert_not_reached ();
4767 /* Implemented as helper calls */
4769 case OP_LCONV_TO_OVF_I4_2: {
4770 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4772 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4775 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4776 high_bit_not_set = code;
4777 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4779 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4780 valid_negative = code;
4781 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4782 invalid_negative = code;
4783 ARM_B_COND (code, ARMCOND_AL, 0);
4785 arm_patch (high_bit_not_set, code);
4787 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4788 valid_positive = code;
4789 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4791 arm_patch (invalid_negative, code);
4792 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4794 arm_patch (valid_negative, code);
4795 arm_patch (valid_positive, code);
4797 if (ins->dreg != ins->sreg1)
4798 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4801 #if defined(ARM_FPU_VFP)
4803 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4806 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4809 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4812 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4815 ARM_NEGD (code, ins->dreg, ins->sreg1);
4820 g_assert_not_reached ();
4824 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4830 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4833 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4834 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4838 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4841 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4842 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4846 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4849 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4850 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4851 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4855 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4858 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4859 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4863 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4866 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4867 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4868 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4870 /* ARM FPA flags table:
4871 * N Less than ARMCOND_MI
4872 * Z Equal ARMCOND_EQ
4873 * C Greater Than or Equal ARMCOND_CS
4874 * V Unordered ARMCOND_VS
4877 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4880 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4883 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4886 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4887 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4893 g_assert_not_reached ();
4897 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4899 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4900 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4901 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4905 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4906 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4911 #ifdef USE_JUMP_TABLES
4913 gpointer *jte = mono_jumptable_add_entries (2);
4914 jte [0] = GUINT_TO_POINTER (0xffffffff);
4915 jte [1] = GUINT_TO_POINTER (0x7fefffff);
4916 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
4917 ARM_FLDD (code, ARM_VFP_D0, ARMREG_IP, 0);
4920 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4921 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4923 *(guint32*)code = 0xffffffff;
4925 *(guint32*)code = 0x7fefffff;
4928 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4930 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4931 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4933 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4934 ARM_CPYD (code, ins->dreg, ins->sreg1);
4939 case OP_GC_LIVENESS_DEF:
4940 case OP_GC_LIVENESS_USE:
4941 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
4942 ins->backend.pc_offset = code - cfg->native_code;
4944 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
4945 ins->backend.pc_offset = code - cfg->native_code;
4946 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
4950 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4951 g_assert_not_reached ();
4954 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4955 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4956 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4957 g_assert_not_reached ();
4963 last_offset = offset;
4966 cfg->code_len = code - cfg->native_code;
4969 #endif /* DISABLE_JIT */
4971 #ifdef HAVE_AEABI_READ_TP
4972 void __aeabi_read_tp (void);
4976 mono_arch_register_lowlevel_calls (void)
4978 /* The signature doesn't matter */
4979 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4980 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4982 #ifndef MONO_CROSS_COMPILE
4983 #ifdef HAVE_AEABI_READ_TP
4984 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4989 #define patch_lis_ori(ip,val) do {\
4990 guint16 *__lis_ori = (guint16*)(ip); \
4991 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4992 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4996 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
4998 MonoJumpInfo *patch_info;
4999 gboolean compile_aot = !run_cctors;
5001 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5002 unsigned char *ip = patch_info->ip.i + code;
5003 const unsigned char *target;
5005 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5006 #ifdef USE_JUMP_TABLES
5007 gpointer *jt = mono_jumptable_get_entry (ip);
5009 gpointer *jt = (gpointer*)(ip + 8);
5012 /* jt is the inlined jump table, 2 instructions after ip
5013 * In the normal case we store the absolute addresses,
5014 * otherwise the displacements.
5016 for (i = 0; i < patch_info->data.table->table_size; i++)
5017 jt [i] = code + (int)patch_info->data.table->table [i];
5020 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5023 switch (patch_info->type) {
5024 case MONO_PATCH_INFO_BB:
5025 case MONO_PATCH_INFO_LABEL:
5028 /* No need to patch these */
5033 switch (patch_info->type) {
5034 case MONO_PATCH_INFO_IP:
5035 g_assert_not_reached ();
5036 patch_lis_ori (ip, ip);
5038 case MONO_PATCH_INFO_METHOD_REL:
5039 g_assert_not_reached ();
5040 *((gpointer *)(ip)) = code + patch_info->data.offset;
5042 case MONO_PATCH_INFO_METHODCONST:
5043 case MONO_PATCH_INFO_CLASS:
5044 case MONO_PATCH_INFO_IMAGE:
5045 case MONO_PATCH_INFO_FIELD:
5046 case MONO_PATCH_INFO_VTABLE:
5047 case MONO_PATCH_INFO_IID:
5048 case MONO_PATCH_INFO_SFLDA:
5049 case MONO_PATCH_INFO_LDSTR:
5050 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5051 case MONO_PATCH_INFO_LDTOKEN:
5052 g_assert_not_reached ();
5053 /* from OP_AOTCONST : lis + ori */
5054 patch_lis_ori (ip, target);
5056 case MONO_PATCH_INFO_R4:
5057 case MONO_PATCH_INFO_R8:
5058 g_assert_not_reached ();
5059 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5061 case MONO_PATCH_INFO_EXC_NAME:
5062 g_assert_not_reached ();
5063 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5065 case MONO_PATCH_INFO_NONE:
5066 case MONO_PATCH_INFO_BB_OVF:
5067 case MONO_PATCH_INFO_EXC_OVF:
5068 /* everything is dealt with at epilog output time */
5073 arm_patch_general (domain, ip, target, dyn_code_mp);
5080 * Stack frame layout:
5082 * ------------------- fp
5083 * MonoLMF structure or saved registers
5084 * -------------------
5086 * -------------------
5088 * -------------------
5089 * optional 8 bytes for tracing
5090 * -------------------
5091 * param area size is cfg->param_area
5092 * ------------------- sp
5095 mono_arch_emit_prolog (MonoCompile *cfg)
5097 MonoMethod *method = cfg->method;
5099 MonoMethodSignature *sig;
5101 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5106 int prev_sp_offset, reg_offset;
5108 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5111 sig = mono_method_signature (method);
5112 cfg->code_size = 256 + sig->param_count * 64;
5113 code = cfg->native_code = g_malloc (cfg->code_size);
5115 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5117 alloc_size = cfg->stack_offset;
5121 if (!method->save_lmf) {
5124 * The iphone uses R7 as the frame pointer, and it points at the saved
5129 * We can't use r7 as a frame pointer since it points into the middle of
5130 * the frame, so we keep using our own frame pointer.
5131 * FIXME: Optimize this.
5134 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5135 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5136 prev_sp_offset += 8; /* r7 and lr */
5137 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5138 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5140 /* No need to push LR again */
5141 if (cfg->used_int_regs)
5142 ARM_PUSH (code, cfg->used_int_regs);
5144 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5145 prev_sp_offset += 4;
5147 for (i = 0; i < 16; ++i) {
5148 if (cfg->used_int_regs & (1 << i))
5149 prev_sp_offset += 4;
5151 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5153 for (i = 0; i < 16; ++i) {
5154 if ((cfg->used_int_regs & (1 << i))) {
5155 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5156 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5161 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5162 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5164 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5165 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5168 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5169 ARM_PUSH (code, 0x5ff0);
5170 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5171 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5173 for (i = 0; i < 16; ++i) {
5174 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5175 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5179 pos += sizeof (MonoLMF) - prev_sp_offset;
5183 orig_alloc_size = alloc_size;
5184 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5185 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5186 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5187 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5190 /* the stack used in the pushed regs */
5191 if (prev_sp_offset & 4)
5193 cfg->stack_usage = alloc_size;
5195 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5196 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5198 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5199 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5201 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5203 if (cfg->frame_reg != ARMREG_SP) {
5204 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5205 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5207 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5208 prev_sp_offset += alloc_size;
5210 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5211 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5213 /* compute max_offset in order to use short forward jumps
5214 * we could skip do it on arm because the immediate displacement
5215 * for jumps is large enough, it may be useful later for constant pools
5218 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5219 MonoInst *ins = bb->code;
5220 bb->max_offset = max_offset;
5222 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5225 MONO_BB_FOR_EACH_INS (bb, ins)
5226 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5229 /* store runtime generic context */
5230 if (cfg->rgctx_var) {
5231 MonoInst *ins = cfg->rgctx_var;
5233 g_assert (ins->opcode == OP_REGOFFSET);
5235 if (arm_is_imm12 (ins->inst_offset)) {
5236 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5238 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5239 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5243 /* load arguments allocated to register from the stack */
5246 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5248 if (cinfo->vtype_retaddr) {
5249 ArgInfo *ainfo = &cinfo->ret;
5250 inst = cfg->vret_addr;
5251 g_assert (arm_is_imm12 (inst->inst_offset));
5252 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5255 if (sig->call_convention == MONO_CALL_VARARG) {
5256 ArgInfo *cookie = &cinfo->sig_cookie;
5258 /* Save the sig cookie address */
5259 g_assert (cookie->storage == RegTypeBase);
5261 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5262 g_assert (arm_is_imm12 (cfg->sig_cookie));
5263 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5264 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5267 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5268 ArgInfo *ainfo = cinfo->args + i;
5269 inst = cfg->args [pos];
5271 if (cfg->verbose_level > 2)
5272 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5273 if (inst->opcode == OP_REGVAR) {
5274 if (ainfo->storage == RegTypeGeneral)
5275 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5276 else if (ainfo->storage == RegTypeFP) {
5277 g_assert_not_reached ();
5278 } else if (ainfo->storage == RegTypeBase) {
5279 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5280 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5282 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5283 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5286 g_assert_not_reached ();
5288 if (cfg->verbose_level > 2)
5289 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5291 /* the argument should be put on the stack: FIXME handle size != word */
5292 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
5293 switch (ainfo->size) {
5295 if (arm_is_imm12 (inst->inst_offset))
5296 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5298 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5299 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5303 if (arm_is_imm8 (inst->inst_offset)) {
5304 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5306 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5307 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5311 if (arm_is_imm12 (inst->inst_offset)) {
5312 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5314 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5315 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5317 if (arm_is_imm12 (inst->inst_offset + 4)) {
5318 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5320 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5321 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5325 if (arm_is_imm12 (inst->inst_offset)) {
5326 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5328 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5329 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5333 } else if (ainfo->storage == RegTypeBaseGen) {
5334 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
5335 g_assert (arm_is_imm12 (inst->inst_offset));
5336 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5337 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5338 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5339 } else if (ainfo->storage == RegTypeBase) {
5340 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5341 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5343 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5344 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5347 switch (ainfo->size) {
5349 if (arm_is_imm8 (inst->inst_offset)) {
5350 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5352 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5353 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5357 if (arm_is_imm8 (inst->inst_offset)) {
5358 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5360 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5361 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5365 if (arm_is_imm12 (inst->inst_offset)) {
5366 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5368 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5369 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5371 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
5372 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
5374 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
5375 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5377 if (arm_is_imm12 (inst->inst_offset + 4)) {
5378 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5380 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5381 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5385 if (arm_is_imm12 (inst->inst_offset)) {
5386 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
5388 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5389 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5393 } else if (ainfo->storage == RegTypeFP) {
5394 g_assert_not_reached ();
5395 } else if (ainfo->storage == RegTypeStructByVal) {
5396 int doffset = inst->inst_offset;
5400 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
5401 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
5402 if (arm_is_imm12 (doffset)) {
5403 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
5405 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
5406 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
5408 soffset += sizeof (gpointer);
5409 doffset += sizeof (gpointer);
5411 if (ainfo->vtsize) {
5412 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5413 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
5414 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
5416 } else if (ainfo->storage == RegTypeStructByAddr) {
5417 g_assert_not_reached ();
5418 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
5419 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
5421 g_assert_not_reached ();
5426 if (method->save_lmf)
5427 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
5430 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
5432 if (cfg->arch.seq_point_info_var) {
5433 MonoInst *ins = cfg->arch.seq_point_info_var;
5435 /* Initialize the variable from a GOT slot */
5436 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
5437 #ifdef USE_JUMP_TABLES
5439 gpointer *jte = mono_jumptable_add_entry ();
5440 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
5441 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
5443 /** XXX: is it correct? */
5445 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5447 *(gpointer*)code = NULL;
5450 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
5452 g_assert (ins->opcode == OP_REGOFFSET);
5454 if (arm_is_imm12 (ins->inst_offset)) {
5455 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5457 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5458 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5462 /* Initialize ss_trigger_page_var */
5463 if (!cfg->soft_breakpoints) {
5464 MonoInst *info_var = cfg->arch.seq_point_info_var;
5465 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
5466 int dreg = ARMREG_LR;
5469 g_assert (info_var->opcode == OP_REGOFFSET);
5470 g_assert (arm_is_imm12 (info_var->inst_offset));
5472 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
5473 /* Load the trigger page addr */
5474 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
5475 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
5479 if (cfg->arch.seq_point_read_var) {
5480 MonoInst *read_ins = cfg->arch.seq_point_read_var;
5481 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
5482 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
5483 #ifdef USE_JUMP_TABLES
5486 g_assert (read_ins->opcode == OP_REGOFFSET);
5487 g_assert (arm_is_imm12 (read_ins->inst_offset));
5488 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
5489 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
5490 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
5491 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
5493 #ifdef USE_JUMP_TABLES
5494 jte = mono_jumptable_add_entries (3);
5495 jte [0] = (gpointer)&ss_trigger_var;
5496 jte [1] = single_step_func_wrapper;
5497 jte [2] = breakpoint_func_wrapper;
5498 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
5500 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5502 *(volatile int **)code = &ss_trigger_var;
5504 *(gpointer*)code = single_step_func_wrapper;
5506 *(gpointer*)code = breakpoint_func_wrapper;
5510 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
5511 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
5512 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
5513 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
5514 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
5515 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
5518 cfg->code_len = code - cfg->native_code;
5519 g_assert (cfg->code_len < cfg->code_size);
5526 mono_arch_emit_epilog (MonoCompile *cfg)
5528 MonoMethod *method = cfg->method;
5529 int pos, i, rot_amount;
5530 int max_epilog_size = 16 + 20*4;
5534 if (cfg->method->save_lmf)
5535 max_epilog_size += 128;
5537 if (mono_jit_trace_calls != NULL)
5538 max_epilog_size += 50;
5540 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
5541 max_epilog_size += 50;
5543 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5544 cfg->code_size *= 2;
5545 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5546 cfg->stat_code_reallocs++;
5550 * Keep in sync with OP_JMP
5552 code = cfg->native_code + cfg->code_len;
5554 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
5555 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
5559 /* Load returned vtypes into registers if needed */
5560 cinfo = cfg->arch.cinfo;
5561 if (cinfo->ret.storage == RegTypeStructByVal) {
5562 MonoInst *ins = cfg->ret;
5564 if (arm_is_imm12 (ins->inst_offset)) {
5565 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
5567 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5568 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
5572 if (method->save_lmf) {
5573 int lmf_offset, reg, sp_adj, regmask;
5574 /* all but r0-r3, sp and pc */
5575 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5578 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
5580 /* This points to r4 inside MonoLMF->iregs */
5581 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
5583 regmask = 0x9ff0; /* restore lr to pc */
5584 /* Skip caller saved registers not used by the method */
5585 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
5586 regmask &= ~(1 << reg);
5590 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
5591 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
5593 ARM_POP (code, regmask);
5595 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
5596 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
5598 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
5599 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
5603 /* Restore saved gregs */
5604 if (cfg->used_int_regs)
5605 ARM_POP (code, cfg->used_int_regs);
5606 /* Restore saved r7, restore LR to PC */
5607 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
5609 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
5613 cfg->code_len = code - cfg->native_code;
5615 g_assert (cfg->code_len < cfg->code_size);
5619 /* remove once throw_exception_by_name is eliminated */
5621 exception_id_by_name (const char *name)
5623 if (strcmp (name, "IndexOutOfRangeException") == 0)
5624 return MONO_EXC_INDEX_OUT_OF_RANGE;
5625 if (strcmp (name, "OverflowException") == 0)
5626 return MONO_EXC_OVERFLOW;
5627 if (strcmp (name, "ArithmeticException") == 0)
5628 return MONO_EXC_ARITHMETIC;
5629 if (strcmp (name, "DivideByZeroException") == 0)
5630 return MONO_EXC_DIVIDE_BY_ZERO;
5631 if (strcmp (name, "InvalidCastException") == 0)
5632 return MONO_EXC_INVALID_CAST;
5633 if (strcmp (name, "NullReferenceException") == 0)
5634 return MONO_EXC_NULL_REF;
5635 if (strcmp (name, "ArrayTypeMismatchException") == 0)
5636 return MONO_EXC_ARRAY_TYPE_MISMATCH;
5637 if (strcmp (name, "ArgumentException") == 0)
5638 return MONO_EXC_ARGUMENT;
5639 g_error ("Unknown intrinsic exception %s\n", name);
5644 mono_arch_emit_exceptions (MonoCompile *cfg)
5646 MonoJumpInfo *patch_info;
5649 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5650 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5651 int max_epilog_size = 50;
5653 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5654 exc_throw_pos [i] = NULL;
5655 exc_throw_found [i] = 0;
5658 /* count the number of exception infos */
5661 * make sure we have enough space for exceptions
5663 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5664 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5665 i = exception_id_by_name (patch_info->data.target);
5666 if (!exc_throw_found [i]) {
5667 max_epilog_size += 32;
5668 exc_throw_found [i] = TRUE;
5673 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5674 cfg->code_size *= 2;
5675 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5676 cfg->stat_code_reallocs++;
5679 code = cfg->native_code + cfg->code_len;
5681 /* add code to raise exceptions */
5682 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5683 switch (patch_info->type) {
5684 case MONO_PATCH_INFO_EXC: {
5685 MonoClass *exc_class;
5686 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5688 i = exception_id_by_name (patch_info->data.target);
5689 if (exc_throw_pos [i]) {
5690 arm_patch (ip, exc_throw_pos [i]);
5691 patch_info->type = MONO_PATCH_INFO_NONE;
5694 exc_throw_pos [i] = code;
5696 arm_patch (ip, code);
5698 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5699 g_assert (exc_class);
5701 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5702 #ifdef USE_JUMP_TABLES
5704 gpointer *jte = mono_jumptable_add_entries (2);
5705 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5706 patch_info->data.name = "mono_arch_throw_corlib_exception";
5707 patch_info->ip.i = code - cfg->native_code;
5708 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
5709 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
5710 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
5711 ARM_BLX_REG (code, ARMREG_IP);
5712 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
5715 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5716 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5717 patch_info->data.name = "mono_arch_throw_corlib_exception";
5718 patch_info->ip.i = code - cfg->native_code;
5720 *(guint32*)(gpointer)code = exc_class->type_token;
5731 cfg->code_len = code - cfg->native_code;
5733 g_assert (cfg->code_len < cfg->code_size);
5737 #endif /* #ifndef DISABLE_JIT */
5740 mono_arch_finish_init (void)
5742 lmf_tls_offset = mono_get_lmf_tls_offset ();
5743 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5747 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5752 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5759 mono_arch_print_tree (MonoInst *tree, int arity)
5765 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5767 return mono_get_domain_intrinsic (cfg);
5771 mono_arch_get_patch_offset (guint8 *code)
5778 mono_arch_flush_register_windows (void)
5782 #ifdef MONO_ARCH_HAVE_IMT
5787 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5789 int method_reg = mono_alloc_ireg (cfg);
5790 #ifdef USE_JUMP_TABLES
5791 int use_jumptables = TRUE;
5793 int use_jumptables = FALSE;
5796 if (cfg->compile_aot) {
5799 call->dynamic_imt_arg = TRUE;
5802 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
5804 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5805 ins->dreg = method_reg;
5806 ins->inst_p0 = call->method;
5807 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5808 MONO_ADD_INS (cfg->cbb, ins);
5810 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5811 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
5812 /* Always pass in a register for simplicity */
5813 call->dynamic_imt_arg = TRUE;
5815 cfg->uses_rgctx_reg = TRUE;
5818 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
5822 MONO_INST_NEW (cfg, ins, OP_PCONST);
5823 ins->inst_p0 = call->method;
5824 ins->dreg = method_reg;
5825 MONO_ADD_INS (cfg->cbb, ins);
5828 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5832 #endif /* DISABLE_JIT */
5835 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5837 #ifdef USE_JUMP_TABLES
5838 return (MonoMethod*)regs [ARMREG_V5];
5841 guint32 *code_ptr = (guint32*)code;
5843 method = GUINT_TO_POINTER (code_ptr [1]);
5847 return (MonoMethod*)regs [ARMREG_V5];
5849 /* The IMT value is stored in the code stream right after the LDC instruction. */
5850 /* This is no longer true for the gsharedvt_in trampoline */
5852 if (!IS_LDR_PC (code_ptr [0])) {
5853 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5854 g_assert (IS_LDR_PC (code_ptr [0]));
5858 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
5859 return (MonoMethod*)regs [ARMREG_V5];
5861 return (MonoMethod*) method;
5866 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5868 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5871 #define ENABLE_WRONG_METHOD_CHECK 0
5872 #define BASE_SIZE (6 * 4)
5873 #define BSEARCH_ENTRY_SIZE (4 * 4)
5874 #define CMP_SIZE (3 * 4)
5875 #define BRANCH_SIZE (1 * 4)
5876 #define CALL_SIZE (2 * 4)
5877 #define WMC_SIZE (5 * 4)
5878 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5880 #ifdef USE_JUMP_TABLES
5882 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
5884 g_assert (base [index] == NULL);
5885 base [index] = value;
5888 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
5890 if (arm_is_imm12 (jti * 4)) {
5891 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
5893 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
5894 if ((jti * 4) >> 16)
5895 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
5896 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
5902 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5904 guint32 delta = DISTANCE (target, code);
5906 g_assert (delta >= 0 && delta <= 0xFFF);
5907 *target = *target | delta;
5914 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5915 gpointer fail_tramp)
5918 arminstr_t *code, *start;
5919 #ifdef USE_JUMP_TABLES
5922 gboolean large_offsets = FALSE;
5923 guint32 **constant_pool_starts;
5924 arminstr_t *vtable_target = NULL;
5925 int extra_space = 0;
5929 #ifdef USE_JUMP_TABLES
5930 for (i = 0; i < count; ++i) {
5931 MonoIMTCheckItem *item = imt_entries [i];
5932 item->chunk_size += 4 * 16;
5933 if (!item->is_equals)
5934 imt_entries [item->check_target_idx]->compare_done = TRUE;
5935 size += item->chunk_size;
5938 constant_pool_starts = g_new0 (guint32*, count);
5940 for (i = 0; i < count; ++i) {
5941 MonoIMTCheckItem *item = imt_entries [i];
5942 if (item->is_equals) {
5943 gboolean fail_case = !item->check_target_idx && fail_tramp;
5945 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5946 item->chunk_size += 32;
5947 large_offsets = TRUE;
5950 if (item->check_target_idx || fail_case) {
5951 if (!item->compare_done || fail_case)
5952 item->chunk_size += CMP_SIZE;
5953 item->chunk_size += BRANCH_SIZE;
5955 #if ENABLE_WRONG_METHOD_CHECK
5956 item->chunk_size += WMC_SIZE;
5960 item->chunk_size += 16;
5961 large_offsets = TRUE;
5963 item->chunk_size += CALL_SIZE;
5965 item->chunk_size += BSEARCH_ENTRY_SIZE;
5966 imt_entries [item->check_target_idx]->compare_done = TRUE;
5968 size += item->chunk_size;
5972 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5976 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5978 code = mono_domain_code_reserve (domain, size);
5982 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5983 for (i = 0; i < count; ++i) {
5984 MonoIMTCheckItem *item = imt_entries [i];
5985 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5989 #ifdef USE_JUMP_TABLES
5990 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
5991 /* If jumptables we always pass the IMT method in R5 */
5992 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
5993 #define VTABLE_JTI 0
5994 #define IMT_METHOD_OFFSET 0
5995 #define TARGET_CODE_OFFSET 1
5996 #define JUMP_CODE_OFFSET 2
5997 #define RECORDS_PER_ENTRY 3
5998 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
5999 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6000 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6002 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6003 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6004 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6005 set_jumptable_element (jte, VTABLE_JTI, vtable);
6008 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6010 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6011 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6012 vtable_target = code;
6013 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6015 if (mono_use_llvm) {
6016 /* LLVM always passes the IMT method in R5 */
6017 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6019 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6020 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6021 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6025 for (i = 0; i < count; ++i) {
6026 MonoIMTCheckItem *item = imt_entries [i];
6027 #ifdef USE_JUMP_TABLES
6028 guint32 imt_method_jti = 0, target_code_jti = 0;
6030 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6032 gint32 vtable_offset;
6034 item->code_target = (guint8*)code;
6036 if (item->is_equals) {
6037 gboolean fail_case = !item->check_target_idx && fail_tramp;
6039 if (item->check_target_idx || fail_case) {
6040 if (!item->compare_done || fail_case) {
6041 #ifdef USE_JUMP_TABLES
6042 imt_method_jti = IMT_METHOD_JTI (i);
6043 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6046 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6048 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6050 #ifdef USE_JUMP_TABLES
6051 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6052 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6053 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6055 item->jmp_code = (guint8*)code;
6056 ARM_B_COND (code, ARMCOND_NE, 0);
6059 /*Enable the commented code to assert on wrong method*/
6060 #if ENABLE_WRONG_METHOD_CHECK
6061 #ifdef USE_JUMP_TABLES
6062 imt_method_jti = IMT_METHOD_JTI (i);
6063 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6066 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6068 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6069 ARM_B_COND (code, ARMCOND_NE, 1);
6075 if (item->has_target_code) {
6076 /* Load target address */
6077 #ifdef USE_JUMP_TABLES
6078 target_code_jti = TARGET_CODE_JTI (i);
6079 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6080 /* Restore registers */
6081 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6083 ARM_BX (code, ARMREG_R1);
6084 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6086 target_code_ins = code;
6087 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6088 /* Save it to the fourth slot */
6089 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6090 /* Restore registers and branch */
6091 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6093 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6096 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6097 if (!arm_is_imm12 (vtable_offset)) {
6099 * We need to branch to a computed address but we don't have
6100 * a free register to store it, since IP must contain the
6101 * vtable address. So we push the two values to the stack, and
6102 * load them both using LDM.
6104 /* Compute target address */
6105 #ifdef USE_JUMP_TABLES
6106 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6107 if (vtable_offset >> 16)
6108 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6109 /* IP had vtable base. */
6110 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6111 /* Restore registers and branch */
6112 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6113 ARM_BX (code, ARMREG_IP);
6115 vtable_offset_ins = code;
6116 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6117 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6118 /* Save it to the fourth slot */
6119 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6120 /* Restore registers and branch */
6121 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6123 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6126 #ifdef USE_JUMP_TABLES
6127 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6128 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6129 ARM_BX (code, ARMREG_IP);
6131 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6133 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6134 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6140 #ifdef USE_JUMP_TABLES
6141 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6142 target_code_jti = TARGET_CODE_JTI (i);
6143 /* Load target address */
6144 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6145 /* Restore registers */
6146 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6148 ARM_BX (code, ARMREG_R1);
6149 set_jumptable_element (jte, target_code_jti, fail_tramp);
6151 arm_patch (item->jmp_code, (guchar*)code);
6153 target_code_ins = code;
6154 /* Load target address */
6155 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6156 /* Save it to the fourth slot */
6157 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6158 /* Restore registers and branch */
6159 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6161 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6163 item->jmp_code = NULL;
6166 #ifdef USE_JUMP_TABLES
6168 set_jumptable_element (jte, imt_method_jti, item->key);
6171 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6173 /*must emit after unconditional branch*/
6174 if (vtable_target) {
6175 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6176 item->chunk_size += 4;
6177 vtable_target = NULL;
6180 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6181 constant_pool_starts [i] = code;
6183 code += extra_space;
6188 #ifdef USE_JUMP_TABLES
6189 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6190 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6191 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_GE);
6192 ARM_BX_COND (code, ARMCOND_GE, ARMREG_R1);
6193 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6195 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6196 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6198 item->jmp_code = (guint8*)code;
6199 ARM_B_COND (code, ARMCOND_GE, 0);
6205 for (i = 0; i < count; ++i) {
6206 MonoIMTCheckItem *item = imt_entries [i];
6207 if (item->jmp_code) {
6208 if (item->check_target_idx)
6209 #ifdef USE_JUMP_TABLES
6210 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6212 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6215 if (i > 0 && item->is_equals) {
6217 #ifdef USE_JUMP_TABLES
6218 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6219 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6221 arminstr_t *space_start = constant_pool_starts [i];
6222 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6223 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6231 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6232 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6237 #ifndef USE_JUMP_TABLES
6238 g_free (constant_pool_starts);
6241 mono_arch_flush_icache ((guint8*)start, size);
6242 mono_stats.imt_thunks_size += code - start;
6244 g_assert (DISTANCE (start, code) <= size);
6251 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6253 return ctx->regs [reg];
6257 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6259 ctx->regs [reg] = val;
6263 * mono_arch_get_trampolines:
6265 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6269 mono_arch_get_trampolines (gboolean aot)
6271 return mono_arm_get_exception_trampolines (aot);
6275 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
6277 * mono_arch_set_breakpoint:
6279 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6280 * The location should contain code emitted by OP_SEQ_POINT.
6283 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6286 guint32 native_offset = ip - (guint8*)ji->code_start;
6287 MonoDebugOptions *opt = mini_get_debug_options ();
6289 if (opt->soft_breakpoints) {
6290 g_assert (!ji->from_aot);
6292 ARM_BLX_REG (code, ARMREG_LR);
6293 mono_arch_flush_icache (code - 4, 4);
6294 } else if (ji->from_aot) {
6295 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6297 g_assert (native_offset % 4 == 0);
6298 g_assert (info->bp_addrs [native_offset / 4] == 0);
6299 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6301 int dreg = ARMREG_LR;
6303 /* Read from another trigger page */
6304 #ifdef USE_JUMP_TABLES
6305 gpointer *jte = mono_jumptable_add_entry ();
6306 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6307 jte [0] = bp_trigger_page;
6309 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6311 *(int*)code = (int)bp_trigger_page;
6314 ARM_LDR_IMM (code, dreg, dreg, 0);
6316 mono_arch_flush_icache (code - 16, 16);
6319 /* This is currently implemented by emitting an SWI instruction, which
6320 * qemu/linux seems to convert to a SIGILL.
6322 *(int*)code = (0xef << 24) | 8;
6324 mono_arch_flush_icache (code - 4, 4);
6330 * mono_arch_clear_breakpoint:
6332 * Clear the breakpoint at IP.
6335 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
6337 MonoDebugOptions *opt = mini_get_debug_options ();
6341 if (opt->soft_breakpoints) {
6342 g_assert (!ji->from_aot);
6345 mono_arch_flush_icache (code - 4, 4);
6346 } else if (ji->from_aot) {
6347 guint32 native_offset = ip - (guint8*)ji->code_start;
6348 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6350 g_assert (native_offset % 4 == 0);
6351 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
6352 info->bp_addrs [native_offset / 4] = 0;
6354 for (i = 0; i < 4; ++i)
6357 mono_arch_flush_icache (ip, code - ip);
6362 * mono_arch_start_single_stepping:
6364 * Start single stepping.
6367 mono_arch_start_single_stepping (void)
6369 if (ss_trigger_page)
6370 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
6376 * mono_arch_stop_single_stepping:
6378 * Stop single stepping.
6381 mono_arch_stop_single_stepping (void)
6383 if (ss_trigger_page)
6384 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
6390 #define DBG_SIGNAL SIGBUS
6392 #define DBG_SIGNAL SIGSEGV
6396 * mono_arch_is_single_step_event:
6398 * Return whenever the machine state in SIGCTX corresponds to a single
6402 mono_arch_is_single_step_event (void *info, void *sigctx)
6404 siginfo_t *sinfo = info;
6406 if (!ss_trigger_page)
6409 /* Sometimes the address is off by 4 */
6410 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
6417 * mono_arch_is_breakpoint_event:
6419 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
6422 mono_arch_is_breakpoint_event (void *info, void *sigctx)
6424 siginfo_t *sinfo = info;
6426 if (!ss_trigger_page)
6429 if (sinfo->si_signo == DBG_SIGNAL) {
6430 /* Sometimes the address is off by 4 */
6431 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
6441 * mono_arch_skip_breakpoint:
6443 * See mini-amd64.c for docs.
6446 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
6448 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6452 * mono_arch_skip_single_step:
6454 * See mini-amd64.c for docs.
6457 mono_arch_skip_single_step (MonoContext *ctx)
6459 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
6462 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
6465 * mono_arch_get_seq_point_info:
6467 * See mini-amd64.c for docs.
6470 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
6475 // FIXME: Add a free function
6477 mono_domain_lock (domain);
6478 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
6480 mono_domain_unlock (domain);
6483 ji = mono_jit_info_table_find (domain, (char*)code);
6486 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
6488 info->ss_trigger_page = ss_trigger_page;
6489 info->bp_trigger_page = bp_trigger_page;
6491 mono_domain_lock (domain);
6492 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
6494 mono_domain_unlock (domain);
6501 * mono_arch_set_target:
6503 * Set the target architecture the JIT backend should generate code for, in the form
6504 * of a GNU target triplet. Only used in AOT mode.
6507 mono_arch_set_target (char *mtriple)
6509 /* The GNU target triple format is not very well documented */
6510 if (strstr (mtriple, "armv7")) {
6511 v6_supported = TRUE;
6512 v7_supported = TRUE;
6514 if (strstr (mtriple, "armv6")) {
6515 v6_supported = TRUE;
6517 if (strstr (mtriple, "armv7s")) {
6518 v7s_supported = TRUE;
6520 if (strstr (mtriple, "thumbv7s")) {
6521 v7s_supported = TRUE;
6522 thumb2_supported = TRUE;
6524 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
6525 v5_supported = TRUE;
6526 thumb_supported = TRUE;
6530 if (strstr (mtriple, "gnueabi"))
6531 eabi_supported = TRUE;
6534 #if defined(MONOTOUCH) || defined(MONO_EXTENSIONS)
6536 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
6538 #endif /* !MONOTOUCH */