2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
15 #include <mono/metadata/abi-details.h>
16 #include <mono/metadata/appdomain.h>
17 #include <mono/metadata/profiler-private.h>
18 #include <mono/metadata/debug-helpers.h>
19 #include <mono/utils/mono-mmap.h>
20 #include <mono/utils/mono-hwcap-arm.h>
26 #include "debugger-agent.h"
28 #include "mono/arch/arm/arm-vfp-codegen.h"
30 /* Sanity check: This makes no sense */
31 #if defined(ARM_FPU_NONE) && (defined(ARM_FPU_VFP) || defined(ARM_FPU_VFP_HARD))
32 #error "ARM_FPU_NONE is defined while one of ARM_FPU_VFP/ARM_FPU_VFP_HARD is defined"
36 * IS_SOFT_FLOAT: Is full software floating point used?
37 * IS_HARD_FLOAT: Is full hardware floating point used?
38 * IS_VFP: Is hardware floating point with software ABI used?
40 * These are not necessarily constants, e.g. IS_SOFT_FLOAT and
41 * IS_VFP may delegate to mono_arch_is_soft_float ().
44 #if defined(ARM_FPU_VFP_HARD)
45 #define IS_SOFT_FLOAT (FALSE)
46 #define IS_HARD_FLOAT (TRUE)
48 #elif defined(ARM_FPU_NONE)
49 #define IS_SOFT_FLOAT (mono_arch_is_soft_float ())
50 #define IS_HARD_FLOAT (FALSE)
51 #define IS_VFP (!mono_arch_is_soft_float ())
53 #define IS_SOFT_FLOAT (FALSE)
54 #define IS_HARD_FLOAT (FALSE)
58 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID) && !defined(__native_client__)
59 #define HAVE_AEABI_READ_TP 1
62 #ifdef __native_client_codegen__
63 const guint kNaClAlignment = kNaClAlignmentARM;
64 const guint kNaClAlignmentMask = kNaClAlignmentMaskARM;
65 gint8 nacl_align_byte = -1; /* 0xff */
68 mono_arch_nacl_pad (guint8 *code, int pad)
70 /* Not yet properly implemented. */
71 g_assert_not_reached ();
76 mono_arch_nacl_skip_nops (guint8 *code)
78 /* Not yet properly implemented. */
79 g_assert_not_reached ();
83 #endif /* __native_client_codegen__ */
85 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
88 void sys_icache_invalidate (void *start, size_t len);
91 /* This mutex protects architecture specific caches */
92 #define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
93 #define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
94 static mono_mutex_t mini_arch_mutex;
96 static gboolean v5_supported = FALSE;
97 static gboolean v6_supported = FALSE;
98 static gboolean v7_supported = FALSE;
99 static gboolean v7s_supported = FALSE;
100 static gboolean thumb_supported = FALSE;
101 static gboolean thumb2_supported = FALSE;
103 * Whenever to use the ARM EABI
105 static gboolean eabi_supported = FALSE;
108 * Whenever to use the iphone ABI extensions:
109 * http://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/index.html
110 * Basically, r7 is used as a frame pointer and it should point to the saved r7 + lr.
111 * This is required for debugging/profiling tools to work, but it has some overhead so it should
112 * only be turned on in debug builds.
114 static gboolean iphone_abi = FALSE;
117 * The FPU we are generating code for. This is NOT runtime configurable right now,
118 * since some things like MONO_ARCH_CALLEE_FREGS still depend on defines.
120 static MonoArmFPU arm_fpu;
122 #if defined(ARM_FPU_VFP_HARD)
124 * On armhf, d0-d7 are used for argument passing and d8-d15
125 * must be preserved across calls, which leaves us no room
126 * for scratch registers. So we use d14-d15 but back up their
127 * previous contents to a stack slot before using them - see
128 * mono_arm_emit_vfp_scratch_save/_restore ().
130 static int vfp_scratch1 = ARM_VFP_D14;
131 static int vfp_scratch2 = ARM_VFP_D15;
134 * On armel, d0-d7 do not need to be preserved, so we can
135 * freely make use of them as scratch registers.
137 static int vfp_scratch1 = ARM_VFP_D0;
138 static int vfp_scratch2 = ARM_VFP_D1;
143 static volatile int ss_trigger_var = 0;
145 static gpointer single_step_func_wrapper;
146 static gpointer breakpoint_func_wrapper;
149 * The code generated for sequence points reads from this location, which is
150 * made read-only when single stepping is enabled.
152 static gpointer ss_trigger_page;
154 /* Enabled breakpoints read from this trigger page */
155 static gpointer bp_trigger_page;
159 * floating point support: on ARM it is a mess, there are at least 3
160 * different setups, each of which binary incompat with the other.
161 * 1) FPA: old and ugly, but unfortunately what current distros use
162 * the double binary format has the two words swapped. 8 double registers.
163 * Implemented usually by kernel emulation.
164 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
165 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
166 * 3) VFP: the new and actually sensible and useful FP support. Implemented
167 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
169 * We do not care about FPA. We will support soft float and VFP.
171 int mono_exc_esp_offset = 0;
173 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
174 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
175 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
177 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
178 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
179 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
181 //#define DEBUG_IMT 0
184 static void mono_arch_compute_omit_fp (MonoCompile *cfg);
188 mono_arch_regname (int reg)
190 static const char * rnames[] = {
191 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
192 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
193 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
196 if (reg >= 0 && reg < 16)
202 mono_arch_fregname (int reg)
204 static const char * rnames[] = {
205 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
206 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
207 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
208 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
209 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
210 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
213 if (reg >= 0 && reg < 32)
221 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
223 int imm8, rot_amount;
224 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
225 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
229 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
230 ARM_ADD_REG_REG (code, dreg, sreg, ARMREG_IP);
232 code = mono_arm_emit_load_imm (code, dreg, imm);
233 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
238 /* If dreg == sreg, this clobbers IP */
240 emit_sub_imm (guint8 *code, int dreg, int sreg, int imm)
242 int imm8, rot_amount;
243 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
244 ARM_SUB_REG_IMM (code, dreg, sreg, imm8, rot_amount);
248 code = mono_arm_emit_load_imm (code, ARMREG_IP, imm);
249 ARM_SUB_REG_REG (code, dreg, sreg, ARMREG_IP);
251 code = mono_arm_emit_load_imm (code, dreg, imm);
252 ARM_SUB_REG_REG (code, dreg, dreg, sreg);
258 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
260 /* we can use r0-r3, since this is called only for incoming args on the stack */
261 if (size > sizeof (gpointer) * 4) {
263 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
264 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
265 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
266 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
267 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
268 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
269 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
270 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
271 ARM_B_COND (code, ARMCOND_NE, 0);
272 arm_patch (code - 4, start_loop);
275 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
276 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
278 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
279 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
285 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
286 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
287 doffset = soffset = 0;
289 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
290 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
296 g_assert (size == 0);
301 emit_call_reg (guint8 *code, int reg)
304 ARM_BLX_REG (code, reg);
306 #ifdef USE_JUMP_TABLES
307 g_assert_not_reached ();
309 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
313 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
319 emit_call_seq (MonoCompile *cfg, guint8 *code)
321 #ifdef USE_JUMP_TABLES
322 code = mono_arm_patchable_bl (code, ARMCOND_AL);
324 if (cfg->method->dynamic) {
325 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
327 *(gpointer*)code = NULL;
329 code = emit_call_reg (code, ARMREG_IP);
338 mono_arm_patchable_b (guint8 *code, int cond)
340 #ifdef USE_JUMP_TABLES
343 jte = mono_jumptable_add_entry ();
344 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
345 ARM_BX_COND (code, cond, ARMREG_IP);
347 ARM_B_COND (code, cond, 0);
353 mono_arm_patchable_bl (guint8 *code, int cond)
355 #ifdef USE_JUMP_TABLES
358 jte = mono_jumptable_add_entry ();
359 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
360 ARM_BLX_REG_COND (code, cond, ARMREG_IP);
362 ARM_BL_COND (code, cond, 0);
367 #ifdef USE_JUMP_TABLES
369 mono_arm_load_jumptable_entry_addr (guint8 *code, gpointer *jte, ARMReg reg)
371 ARM_MOVW_REG_IMM (code, reg, GPOINTER_TO_UINT(jte) & 0xffff);
372 ARM_MOVT_REG_IMM (code, reg, (GPOINTER_TO_UINT(jte) >> 16) & 0xffff);
377 mono_arm_load_jumptable_entry (guint8 *code, gpointer* jte, ARMReg reg)
379 code = mono_arm_load_jumptable_entry_addr (code, jte, reg);
380 ARM_LDR_IMM (code, reg, reg, 0);
386 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
388 switch (ins->opcode) {
391 case OP_FCALL_MEMBASE:
393 MonoType *sig_ret = mini_type_get_underlying_type (NULL, ((MonoCallInst*)ins)->signature->ret);
394 if (sig_ret->type == MONO_TYPE_R4) {
396 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
398 ARM_FMSR (code, ins->dreg, ARMREG_R0);
399 ARM_CVTS (code, ins->dreg, ins->dreg);
403 ARM_CPYD (code, ins->dreg, ARM_VFP_D0);
405 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
418 * Emit code to push an LMF structure on the LMF stack.
419 * On arm, this is intermixed with the initialization of other fields of the structure.
422 emit_save_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
424 gboolean get_lmf_fast = FALSE;
427 #ifdef HAVE_AEABI_READ_TP
428 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
430 if (lmf_addr_tls_offset != -1) {
433 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
434 (gpointer)"__aeabi_read_tp");
435 code = emit_call_seq (cfg, code);
437 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
443 if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
446 /* Inline mono_get_lmf_addr () */
447 /* jit_tls = pthread_getspecific (mono_jit_tls_id); lmf_addr = &jit_tls->lmf; */
449 /* Load mono_jit_tls_id */
451 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_JIT_TLS_ID, NULL);
452 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
454 *(gpointer*)code = NULL;
456 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
457 /* call pthread_getspecific () */
458 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
459 (gpointer)"pthread_getspecific");
460 code = emit_call_seq (cfg, code);
461 /* lmf_addr = &jit_tls->lmf */
462 lmf_offset = MONO_STRUCT_OFFSET (MonoJitTlsData, lmf);
463 g_assert (arm_is_imm8 (lmf_offset));
464 ARM_ADD_REG_IMM (code, ARMREG_R0, ARMREG_R0, lmf_offset, 0);
471 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
472 (gpointer)"mono_get_lmf_addr");
473 code = emit_call_seq (cfg, code);
475 /* we build the MonoLMF structure on the stack - see mini-arm.h */
476 /* lmf_offset is the offset from the previous stack pointer,
477 * alloc_size is the total stack space allocated, so the offset
478 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
479 * The pointer to the struct is put in r1 (new_lmf).
480 * ip is used as scratch
481 * The callee-saved registers are already in the MonoLMF structure
483 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, lmf_offset);
484 /* r0 is the result from mono_get_lmf_addr () */
485 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
486 /* new_lmf->previous_lmf = *lmf_addr */
487 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
488 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
489 /* *(lmf_addr) = r1 */
490 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
491 /* Skip method (only needed for trampoline LMF frames) */
492 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, sp));
493 ARM_STR_IMM (code, ARMREG_FP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, fp));
494 /* save the current IP */
495 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_PC);
496 ARM_STR_IMM (code, ARMREG_IP, ARMREG_R1, MONO_STRUCT_OFFSET (MonoLMF, ip));
498 for (i = 0; i < sizeof (MonoLMF); i += sizeof (mgreg_t))
499 mini_gc_set_slot_type_from_fp (cfg, lmf_offset + i, SLOT_NOREF);
510 emit_float_args (MonoCompile *cfg, MonoCallInst *inst, guint8 *code, int *max_len, guint *offset)
514 for (list = inst->float_args; list; list = list->next) {
515 FloatArgData *fad = list->data;
516 MonoInst *var = get_vreg_to_inst (cfg, fad->vreg);
517 gboolean imm = arm_is_fpimm8 (var->inst_offset);
519 /* 4+1 insns for emit_big_add () and 1 for FLDS. */
525 if (*offset + *max_len > cfg->code_size) {
526 cfg->code_size += *max_len;
527 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
529 code = cfg->native_code + *offset;
533 code = emit_big_add (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
534 ARM_FLDS (code, fad->hreg, ARMREG_LR, 0);
536 ARM_FLDS (code, fad->hreg, var->inst_basereg, var->inst_offset);
538 *offset = code - cfg->native_code;
545 mono_arm_emit_vfp_scratch_save (MonoCompile *cfg, guint8 *code, int reg)
549 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
551 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
554 if (!arm_is_fpimm8 (inst->inst_offset)) {
555 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
556 ARM_FSTD (code, reg, ARMREG_LR, 0);
558 ARM_FSTD (code, reg, inst->inst_basereg, inst->inst_offset);
565 mono_arm_emit_vfp_scratch_restore (MonoCompile *cfg, guint8 *code, int reg)
569 g_assert (reg == vfp_scratch1 || reg == vfp_scratch2);
571 inst = (MonoInst *) cfg->arch.vfp_scratch_slots [reg == vfp_scratch1 ? 0 : 1];
574 if (!arm_is_fpimm8 (inst->inst_offset)) {
575 code = emit_big_add (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
576 ARM_FLDD (code, reg, ARMREG_LR, 0);
578 ARM_FLDD (code, reg, inst->inst_basereg, inst->inst_offset);
587 * Emit code to pop an LMF structure from the LMF stack.
590 emit_restore_lmf (MonoCompile *cfg, guint8 *code, gint32 lmf_offset)
594 if (lmf_offset < 32) {
595 basereg = cfg->frame_reg;
600 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, lmf_offset);
603 /* ip = previous_lmf */
604 ARM_LDR_IMM (code, ARMREG_IP, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
606 ARM_LDR_IMM (code, ARMREG_LR, basereg, offset + MONO_STRUCT_OFFSET (MonoLMF, lmf_addr));
607 /* *(lmf_addr) = previous_lmf */
608 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, MONO_STRUCT_OFFSET (MonoLMF, previous_lmf));
613 #endif /* #ifndef DISABLE_JIT */
616 * mono_arch_get_argument_info:
617 * @csig: a method signature
618 * @param_count: the number of parameters to consider
619 * @arg_info: an array to store the result infos
621 * Gathers information on parameters such as size, alignment and
622 * padding. arg_info should be large enought to hold param_count + 1 entries.
624 * Returns the size of the activation frame.
627 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
629 int k, frame_size = 0;
630 guint32 size, align, pad;
634 t = mini_type_get_underlying_type (gsctx, csig->ret);
635 if (MONO_TYPE_ISSTRUCT (t)) {
636 frame_size += sizeof (gpointer);
640 arg_info [0].offset = offset;
643 frame_size += sizeof (gpointer);
647 arg_info [0].size = frame_size;
649 for (k = 0; k < param_count; k++) {
650 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
652 /* ignore alignment for now */
655 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
656 arg_info [k].pad = pad;
658 arg_info [k + 1].pad = 0;
659 arg_info [k + 1].size = size;
661 arg_info [k + 1].offset = offset;
665 align = MONO_ARCH_FRAME_ALIGNMENT;
666 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
667 arg_info [k].pad = pad;
672 #define MAX_ARCH_DELEGATE_PARAMS 3
675 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
677 guint8 *code, *start;
680 start = code = mono_global_codeman_reserve (12);
682 /* Replace the this argument with the target */
683 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
684 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, target));
685 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
687 g_assert ((code - start) <= 12);
689 mono_arch_flush_icache (start, 12);
693 size = 8 + param_count * 4;
694 start = code = mono_global_codeman_reserve (size);
696 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, MONO_STRUCT_OFFSET (MonoDelegate, method_ptr));
697 /* slide down the arguments */
698 for (i = 0; i < param_count; ++i) {
699 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
701 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
703 g_assert ((code - start) <= size);
705 mono_arch_flush_icache (start, size);
708 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
710 *code_size = code - start;
716 * mono_arch_get_delegate_invoke_impls:
718 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
722 mono_arch_get_delegate_invoke_impls (void)
730 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
731 res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
733 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
734 code = get_delegate_invoke_impl (FALSE, i, &code_len);
735 tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
736 res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
744 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
746 guint8 *code, *start;
749 /* FIXME: Support more cases */
750 sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
751 if (MONO_TYPE_ISSTRUCT (sig_ret))
755 static guint8* cached = NULL;
756 mono_mini_arch_lock ();
758 mono_mini_arch_unlock ();
763 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
765 start = get_delegate_invoke_impl (TRUE, 0, NULL);
767 mono_mini_arch_unlock ();
770 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
773 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
775 for (i = 0; i < sig->param_count; ++i)
776 if (!mono_is_regsize_var (sig->params [i]))
779 mono_mini_arch_lock ();
780 code = cache [sig->param_count];
782 mono_mini_arch_unlock ();
787 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
788 start = mono_aot_get_trampoline (name);
791 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
793 cache [sig->param_count] = start;
794 mono_mini_arch_unlock ();
802 mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
808 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
810 return (gpointer)regs [ARMREG_R0];
814 * Initialize the cpu to execute managed code.
817 mono_arch_cpu_init (void)
819 i8_align = MONO_ABI_ALIGNOF (gint64);
820 #ifdef MONO_CROSS_COMPILE
821 /* Need to set the alignment of i8 since it can different on the target */
822 #ifdef TARGET_ANDROID
824 mono_type_set_alignment (MONO_TYPE_I8, i8_align);
830 create_function_wrapper (gpointer function)
832 guint8 *start, *code;
834 start = code = mono_global_codeman_reserve (96);
837 * Construct the MonoContext structure on the stack.
840 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, sizeof (MonoContext));
842 /* save ip, lr and pc into their correspodings ctx.regs slots. */
843 ARM_STR_IMM (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + sizeof (mgreg_t) * ARMREG_IP);
844 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
845 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
847 /* save r0..r10 and fp */
848 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, regs));
849 ARM_STM (code, ARMREG_IP, 0x0fff);
851 /* now we can update fp. */
852 ARM_MOV_REG_REG (code, ARMREG_FP, ARMREG_SP);
854 /* make ctx.esp hold the actual value of sp at the beginning of this method. */
855 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_FP, sizeof (MonoContext));
856 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, 4 * ARMREG_SP);
857 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_SP);
859 /* make ctx.eip hold the address of the call. */
860 ARM_SUB_REG_IMM8 (code, ARMREG_LR, ARMREG_LR, 4);
861 ARM_STR_IMM (code, ARMREG_LR, ARMREG_SP, MONO_STRUCT_OFFSET (MonoContext, pc));
863 /* r0 now points to the MonoContext */
864 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_FP);
867 #ifdef USE_JUMP_TABLES
869 gpointer *jte = mono_jumptable_add_entry ();
870 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
874 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
876 *(gpointer*)code = function;
879 ARM_BLX_REG (code, ARMREG_IP);
881 /* we're back; save ctx.eip and ctx.esp into the corresponding regs slots. */
882 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, pc));
883 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_LR);
884 ARM_STR_IMM (code, ARMREG_R0, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs) + 4 * ARMREG_PC);
886 /* make ip point to the regs array, then restore everything, including pc. */
887 ARM_ADD_REG_IMM8 (code, ARMREG_IP, ARMREG_FP, MONO_STRUCT_OFFSET (MonoContext, regs));
888 ARM_LDM (code, ARMREG_IP, 0xffff);
890 mono_arch_flush_icache (start, code - start);
891 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
897 * Initialize architecture specific code.
900 mono_arch_init (void)
902 const char *cpu_arch;
904 mono_mutex_init_recursive (&mini_arch_mutex);
905 #ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
906 if (mini_get_debug_options ()->soft_breakpoints) {
907 single_step_func_wrapper = create_function_wrapper (debugger_agent_single_step_from_context);
908 breakpoint_func_wrapper = create_function_wrapper (debugger_agent_breakpoint_from_context);
913 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
914 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
915 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
918 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
919 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
920 mono_aot_register_jit_icall ("mono_arm_resume_unwind", mono_arm_resume_unwind);
921 #if defined(ENABLE_GSHAREDVT)
922 mono_aot_register_jit_icall ("mono_arm_start_gsharedvt_call", mono_arm_start_gsharedvt_call);
925 #if defined(__ARM_EABI__)
926 eabi_supported = TRUE;
929 #if defined(ARM_FPU_VFP_HARD)
930 arm_fpu = MONO_ARM_FPU_VFP_HARD;
932 arm_fpu = MONO_ARM_FPU_VFP;
934 #if defined(ARM_FPU_NONE) && !defined(__APPLE__)
935 /* If we're compiling with a soft float fallback and it
936 turns out that no VFP unit is available, we need to
937 switch to soft float. We don't do this for iOS, since
938 iOS devices always have a VFP unit. */
939 if (!mono_hwcap_arm_has_vfp)
940 arm_fpu = MONO_ARM_FPU_NONE;
944 v5_supported = mono_hwcap_arm_is_v5;
945 v6_supported = mono_hwcap_arm_is_v6;
946 v7_supported = mono_hwcap_arm_is_v7;
947 v7s_supported = mono_hwcap_arm_is_v7s;
949 #if defined(__APPLE__)
950 /* iOS is special-cased here because we don't yet
951 have a way to properly detect CPU features on it. */
952 thumb_supported = TRUE;
955 thumb_supported = mono_hwcap_arm_has_thumb;
956 thumb2_supported = mono_hwcap_arm_has_thumb2;
959 /* Format: armv(5|6|7[s])[-thumb[2]] */
960 cpu_arch = g_getenv ("MONO_CPU_ARCH");
962 /* Do this here so it overrides any detection. */
964 if (strncmp (cpu_arch, "armv", 4) == 0) {
965 v5_supported = cpu_arch [4] >= '5';
966 v6_supported = cpu_arch [4] >= '6';
967 v7_supported = cpu_arch [4] >= '7';
968 v7s_supported = strncmp (cpu_arch, "armv7s", 6) == 0;
971 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
972 thumb2_supported = strstr (cpu_arch, "thumb2") != NULL;
977 * Cleanup architecture specific code.
980 mono_arch_cleanup (void)
985 * This function returns the optimizations supported on this cpu.
988 mono_arch_cpu_optimizations (guint32 *exclude_mask)
990 /* no arm-specific optimizations yet */
996 * This function test for all SIMD functions supported.
998 * Returns a bitmask corresponding to all supported versions.
1002 mono_arch_cpu_enumerate_simd_versions (void)
1004 /* SIMD is currently unimplemented */
1012 mono_arch_opcode_needs_emulation (MonoCompile *cfg, int opcode)
1014 if (v7s_supported) {
1028 #ifdef MONO_ARCH_SOFT_FLOAT_FALLBACK
1030 mono_arch_is_soft_float (void)
1032 return arm_fpu == MONO_ARM_FPU_NONE;
1037 mono_arm_is_hard_float (void)
1039 return arm_fpu == MONO_ARM_FPU_VFP_HARD;
1043 is_regsize_var (MonoGenericSharingContext *gsctx, MonoType *t) {
1046 t = mini_type_get_underlying_type (gsctx, t);
1053 case MONO_TYPE_FNPTR:
1055 case MONO_TYPE_OBJECT:
1056 case MONO_TYPE_STRING:
1057 case MONO_TYPE_CLASS:
1058 case MONO_TYPE_SZARRAY:
1059 case MONO_TYPE_ARRAY:
1061 case MONO_TYPE_GENERICINST:
1062 if (!mono_type_generic_inst_is_valuetype (t))
1065 case MONO_TYPE_VALUETYPE:
1072 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
1077 for (i = 0; i < cfg->num_varinfo; i++) {
1078 MonoInst *ins = cfg->varinfo [i];
1079 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
1082 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
1085 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
1088 /* we can only allocate 32 bit values */
1089 if (is_regsize_var (cfg->generic_sharing_context, ins->inst_vtype)) {
1090 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
1091 g_assert (i == vmv->idx);
1092 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
1100 mono_arch_get_global_int_regs (MonoCompile *cfg)
1104 mono_arch_compute_omit_fp (cfg);
1107 * FIXME: Interface calls might go through a static rgctx trampoline which
1108 * sets V5, but it doesn't save it, so we need to save it ourselves, and
1111 if (cfg->flags & MONO_CFG_HAS_CALLS)
1112 cfg->uses_rgctx_reg = TRUE;
1114 if (cfg->arch.omit_fp)
1115 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_FP));
1116 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
1117 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
1118 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
1120 /* V4=R7 is used as a frame pointer, but V7=R10 is preserved */
1121 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));
1123 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
1124 if (!(cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg)))
1125 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1126 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
1127 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
1128 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
1134 * mono_arch_regalloc_cost:
1136 * Return the cost, in number of memory references, of the action of
1137 * allocating the variable VMV into a register during global register
1141 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
1147 #endif /* #ifndef DISABLE_JIT */
1149 #ifndef __GNUC_PREREQ
1150 #define __GNUC_PREREQ(maj, min) (0)
1154 mono_arch_flush_icache (guint8 *code, gint size)
1156 #if defined(__native_client__)
1157 // For Native Client we don't have to flush i-cache here,
1158 // as it's being done by dyncode interface.
1161 #ifdef MONO_CROSS_COMPILE
1163 sys_icache_invalidate (code, size);
1164 #elif __GNUC_PREREQ(4, 3)
1165 __builtin___clear_cache (code, code + size);
1166 #elif __GNUC_PREREQ(4, 1)
1167 __clear_cache (code, code + size);
1168 #elif defined(PLATFORM_ANDROID)
1169 const int syscall = 0xf0002;
1177 : "r" (code), "r" (code + size), "r" (syscall)
1178 : "r0", "r1", "r7", "r2"
1181 __asm __volatile ("mov r0, %0\n"
1184 "swi 0x9f0002 @ sys_cacheflush"
1186 : "r" (code), "r" (code + size), "r" (0)
1187 : "r0", "r1", "r3" );
1189 #endif /* !__native_client__ */
1200 RegTypeStructByAddr,
1201 /* gsharedvt argument passed by addr in greg */
1202 RegTypeGSharedVtInReg,
1203 /* gsharedvt argument passed by addr on stack */
1204 RegTypeGSharedVtOnStack,
1209 guint16 vtsize; /* in param area */
1213 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
1218 guint32 stack_usage;
1219 gboolean vtype_retaddr;
1220 /* The index of the vret arg in the argument list */
1230 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
1233 if (*gr > ARMREG_R3) {
1235 ainfo->offset = *stack_size;
1236 ainfo->reg = ARMREG_SP; /* in the caller */
1237 ainfo->storage = RegTypeBase;
1240 ainfo->storage = RegTypeGeneral;
1247 split = i8_align == 4;
1252 if (*gr == ARMREG_R3 && split) {
1253 /* first word in r3 and the second on the stack */
1254 ainfo->offset = *stack_size;
1255 ainfo->reg = ARMREG_SP; /* in the caller */
1256 ainfo->storage = RegTypeBaseGen;
1258 } else if (*gr >= ARMREG_R3) {
1259 if (eabi_supported) {
1260 /* darwin aligns longs to 4 byte only */
1261 if (i8_align == 8) {
1266 ainfo->offset = *stack_size;
1267 ainfo->reg = ARMREG_SP; /* in the caller */
1268 ainfo->storage = RegTypeBase;
1271 if (eabi_supported) {
1272 if (i8_align == 8 && ((*gr) & 1))
1275 ainfo->storage = RegTypeIRegPair;
1284 add_float (guint *fpr, guint *stack_size, ArgInfo *ainfo, gboolean is_double, gint *float_spare)
1287 * If we're calling a function like this:
1289 * void foo(float a, double b, float c)
1291 * We pass a in s0 and b in d1. That leaves us
1292 * with s1 being unused. The armhf ABI recognizes
1293 * this and requires register assignment to then
1294 * use that for the next single-precision arg,
1295 * i.e. c in this example. So float_spare either
1296 * tells us which reg to use for the next single-
1297 * precision arg, or it's -1, meaning use *fpr.
1299 * Note that even though most of the JIT speaks
1300 * double-precision, fpr represents single-
1301 * precision registers.
1303 * See parts 5.5 and 6.1.2 of the AAPCS for how
1307 if (*fpr < ARM_VFP_F16 || (!is_double && *float_spare >= 0)) {
1308 ainfo->storage = RegTypeFP;
1312 * If we're passing a double-precision value
1313 * and *fpr is odd (e.g. it's s1, s3, ...)
1314 * we need to use the next even register. So
1315 * we mark the current *fpr as a spare that
1316 * can be used for the next single-precision
1320 *float_spare = *fpr;
1325 * At this point, we have an even register
1326 * so we assign that and move along.
1330 } else if (*float_spare >= 0) {
1332 * We're passing a single-precision value
1333 * and it looks like a spare single-
1334 * precision register is available. Let's
1338 ainfo->reg = *float_spare;
1342 * If we hit this branch, we're passing a
1343 * single-precision value and we can simply
1344 * use the next available register.
1352 * We've exhausted available floating point
1353 * regs, so pass the rest on the stack.
1361 ainfo->offset = *stack_size;
1362 ainfo->reg = ARMREG_SP;
1363 ainfo->storage = RegTypeBase;
1370 get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
1372 guint i, gr, fpr, pstart;
1374 int n = sig->hasthis + sig->param_count;
1375 MonoType *simpletype;
1376 guint32 stack_size = 0;
1378 gboolean is_pinvoke = sig->pinvoke;
1382 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1384 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
1391 t = mini_type_get_underlying_type (gsctx, sig->ret);
1392 if (MONO_TYPE_ISSTRUCT (t)) {
1395 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (t), &align) <= sizeof (gpointer)) {
1396 cinfo->ret.storage = RegTypeStructByVal;
1398 cinfo->vtype_retaddr = TRUE;
1400 } else if (!(t->type == MONO_TYPE_GENERICINST && !mono_type_generic_inst_is_valuetype (t)) && mini_is_gsharedvt_type_gsctx (gsctx, t)) {
1401 cinfo->vtype_retaddr = TRUE;
1407 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
1408 * the first argument, allowing 'this' to be always passed in the first arg reg.
1409 * Also do this if the first argument is a reference type, since virtual calls
1410 * are sometimes made using calli without sig->hasthis set, like in the delegate
1413 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
1415 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1417 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
1421 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1422 cinfo->vret_arg_index = 1;
1426 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
1430 if (cinfo->vtype_retaddr)
1431 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
1434 DEBUG(printf("params: %d\n", sig->param_count));
1435 for (i = pstart; i < sig->param_count; ++i) {
1436 ArgInfo *ainfo = &cinfo->args [n];
1438 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1439 /* Prevent implicit arguments and sig_cookie from
1440 being passed in registers */
1443 /* Emit the signature cookie just before the implicit arguments */
1444 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1446 DEBUG(printf("param %d: ", i));
1447 if (sig->params [i]->byref) {
1448 DEBUG(printf("byref\n"));
1449 add_general (&gr, &stack_size, ainfo, TRUE);
1453 simpletype = mini_type_get_underlying_type (gsctx, sig->params [i]);
1454 switch (simpletype->type) {
1455 case MONO_TYPE_BOOLEAN:
1458 cinfo->args [n].size = 1;
1459 add_general (&gr, &stack_size, ainfo, TRUE);
1462 case MONO_TYPE_CHAR:
1465 cinfo->args [n].size = 2;
1466 add_general (&gr, &stack_size, ainfo, TRUE);
1471 cinfo->args [n].size = 4;
1472 add_general (&gr, &stack_size, ainfo, TRUE);
1478 case MONO_TYPE_FNPTR:
1479 case MONO_TYPE_CLASS:
1480 case MONO_TYPE_OBJECT:
1481 case MONO_TYPE_STRING:
1482 case MONO_TYPE_SZARRAY:
1483 case MONO_TYPE_ARRAY:
1484 cinfo->args [n].size = sizeof (gpointer);
1485 add_general (&gr, &stack_size, ainfo, TRUE);
1488 case MONO_TYPE_GENERICINST:
1489 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1490 cinfo->args [n].size = sizeof (gpointer);
1491 add_general (&gr, &stack_size, ainfo, TRUE);
1495 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1496 /* gsharedvt arguments are passed by ref */
1497 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1498 add_general (&gr, &stack_size, ainfo, TRUE);
1499 switch (ainfo->storage) {
1500 case RegTypeGeneral:
1501 ainfo->storage = RegTypeGSharedVtInReg;
1504 ainfo->storage = RegTypeGSharedVtOnStack;
1507 g_assert_not_reached ();
1513 case MONO_TYPE_TYPEDBYREF:
1514 case MONO_TYPE_VALUETYPE: {
1520 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
1521 size = sizeof (MonoTypedRef);
1522 align = sizeof (gpointer);
1524 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
1526 size = mono_class_native_size (klass, &align);
1528 size = mini_type_stack_size_full (gsctx, simpletype, &align, FALSE);
1530 DEBUG(printf ("load %d bytes struct\n", size));
1533 align_size += (sizeof (gpointer) - 1);
1534 align_size &= ~(sizeof (gpointer) - 1);
1535 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
1536 ainfo->storage = RegTypeStructByVal;
1537 ainfo->struct_size = size;
1538 /* FIXME: align stack_size if needed */
1539 if (eabi_supported) {
1540 if (align >= 8 && (gr & 1))
1543 if (gr > ARMREG_R3) {
1545 ainfo->vtsize = nwords;
1547 int rest = ARMREG_R3 - gr + 1;
1548 int n_in_regs = rest >= nwords? nwords: rest;
1550 ainfo->size = n_in_regs;
1551 ainfo->vtsize = nwords - n_in_regs;
1554 nwords -= n_in_regs;
1556 if (sig->call_convention == MONO_CALL_VARARG)
1557 /* This matches the alignment in mono_ArgIterator_IntGetNextArg () */
1558 stack_size = ALIGN_TO (stack_size, align);
1559 ainfo->offset = stack_size;
1560 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
1561 stack_size += nwords * sizeof (gpointer);
1568 add_general (&gr, &stack_size, ainfo, FALSE);
1575 add_float (&fpr, &stack_size, ainfo, FALSE, &float_spare);
1577 add_general (&gr, &stack_size, ainfo, TRUE);
1585 add_float (&fpr, &stack_size, ainfo, TRUE, &float_spare);
1587 add_general (&gr, &stack_size, ainfo, FALSE);
1592 case MONO_TYPE_MVAR:
1593 /* gsharedvt arguments are passed by ref */
1594 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1595 add_general (&gr, &stack_size, ainfo, TRUE);
1596 switch (ainfo->storage) {
1597 case RegTypeGeneral:
1598 ainfo->storage = RegTypeGSharedVtInReg;
1601 ainfo->storage = RegTypeGSharedVtOnStack;
1604 g_assert_not_reached ();
1609 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1613 /* Handle the case where there are no implicit arguments */
1614 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1615 /* Prevent implicit arguments and sig_cookie from
1616 being passed in registers */
1619 /* Emit the signature cookie just before the implicit arguments */
1620 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1624 simpletype = mini_type_get_underlying_type (gsctx, sig->ret);
1625 switch (simpletype->type) {
1626 case MONO_TYPE_BOOLEAN:
1631 case MONO_TYPE_CHAR:
1637 case MONO_TYPE_FNPTR:
1638 case MONO_TYPE_CLASS:
1639 case MONO_TYPE_OBJECT:
1640 case MONO_TYPE_SZARRAY:
1641 case MONO_TYPE_ARRAY:
1642 case MONO_TYPE_STRING:
1643 cinfo->ret.storage = RegTypeGeneral;
1644 cinfo->ret.reg = ARMREG_R0;
1648 cinfo->ret.storage = RegTypeIRegPair;
1649 cinfo->ret.reg = ARMREG_R0;
1653 cinfo->ret.storage = RegTypeFP;
1655 if (IS_HARD_FLOAT) {
1656 cinfo->ret.reg = ARM_VFP_F0;
1658 cinfo->ret.reg = ARMREG_R0;
1662 case MONO_TYPE_GENERICINST:
1663 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1664 cinfo->ret.storage = RegTypeGeneral;
1665 cinfo->ret.reg = ARMREG_R0;
1668 // FIXME: Only for variable types
1669 if (mini_is_gsharedvt_type_gsctx (gsctx, simpletype)) {
1670 cinfo->ret.storage = RegTypeStructByAddr;
1671 g_assert (cinfo->vtype_retaddr);
1675 case MONO_TYPE_VALUETYPE:
1676 case MONO_TYPE_TYPEDBYREF:
1677 if (cinfo->ret.storage != RegTypeStructByVal)
1678 cinfo->ret.storage = RegTypeStructByAddr;
1681 case MONO_TYPE_MVAR:
1682 g_assert (mini_is_gsharedvt_type_gsctx (gsctx, simpletype));
1683 cinfo->ret.storage = RegTypeStructByAddr;
1684 g_assert (cinfo->vtype_retaddr);
1686 case MONO_TYPE_VOID:
1689 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1693 /* align stack size to 8 */
1694 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1695 stack_size = (stack_size + 7) & ~7;
1697 cinfo->stack_usage = stack_size;
1703 mono_arch_tail_call_supported (MonoCompile *cfg, MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
1705 MonoType *callee_ret;
1709 if (cfg->compile_aot && !cfg->full_aot)
1710 /* OP_TAILCALL doesn't work with AOT */
1713 c1 = get_call_info (NULL, NULL, caller_sig);
1714 c2 = get_call_info (NULL, NULL, callee_sig);
1717 * Tail calls with more callee stack usage than the caller cannot be supported, since
1718 * the extra stack space would be left on the stack after the tail call.
1720 res = c1->stack_usage >= c2->stack_usage;
1721 callee_ret = mini_replace_type (callee_sig->ret);
1722 if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != RegTypeStructByVal)
1723 /* An address on the callee's stack is passed as the first argument */
1726 if (c2->stack_usage > 16 * 4)
1738 debug_omit_fp (void)
1741 return mono_debug_count ();
1748 * mono_arch_compute_omit_fp:
1750 * Determine whenever the frame pointer can be eliminated.
1753 mono_arch_compute_omit_fp (MonoCompile *cfg)
1755 MonoMethodSignature *sig;
1756 MonoMethodHeader *header;
1760 if (cfg->arch.omit_fp_computed)
1763 header = cfg->header;
1765 sig = mono_method_signature (cfg->method);
1767 if (!cfg->arch.cinfo)
1768 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1769 cinfo = cfg->arch.cinfo;
1772 * FIXME: Remove some of the restrictions.
1774 cfg->arch.omit_fp = TRUE;
1775 cfg->arch.omit_fp_computed = TRUE;
1777 if (cfg->disable_omit_fp)
1778 cfg->arch.omit_fp = FALSE;
1779 if (!debug_omit_fp ())
1780 cfg->arch.omit_fp = FALSE;
1782 if (cfg->method->save_lmf)
1783 cfg->arch.omit_fp = FALSE;
1785 if (cfg->flags & MONO_CFG_HAS_ALLOCA)
1786 cfg->arch.omit_fp = FALSE;
1787 if (header->num_clauses)
1788 cfg->arch.omit_fp = FALSE;
1789 if (cfg->param_area)
1790 cfg->arch.omit_fp = FALSE;
1791 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
1792 cfg->arch.omit_fp = FALSE;
1793 if ((mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method)) ||
1794 (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE))
1795 cfg->arch.omit_fp = FALSE;
1796 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
1797 ArgInfo *ainfo = &cinfo->args [i];
1799 if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeBaseGen || ainfo->storage == RegTypeStructByVal) {
1801 * The stack offset can only be determined when the frame
1804 cfg->arch.omit_fp = FALSE;
1809 for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
1810 MonoInst *ins = cfg->varinfo [i];
1813 locals_size += mono_type_size (ins->inst_vtype, &ialign);
1818 * Set var information according to the calling convention. arm version.
1819 * The locals var stuff should most likely be split in another method.
1822 mono_arch_allocate_vars (MonoCompile *cfg)
1824 MonoMethodSignature *sig;
1825 MonoMethodHeader *header;
1828 int i, offset, size, align, curinst;
1832 sig = mono_method_signature (cfg->method);
1834 if (!cfg->arch.cinfo)
1835 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
1836 cinfo = cfg->arch.cinfo;
1837 sig_ret = mini_replace_type (sig->ret);
1839 mono_arch_compute_omit_fp (cfg);
1841 if (cfg->arch.omit_fp)
1842 cfg->frame_reg = ARMREG_SP;
1844 cfg->frame_reg = ARMREG_FP;
1846 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1848 /* allow room for the vararg method args: void* and long/double */
1849 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1850 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1852 header = cfg->header;
1854 /* See mono_arch_get_global_int_regs () */
1855 if (cfg->flags & MONO_CFG_HAS_CALLS)
1856 cfg->uses_rgctx_reg = TRUE;
1858 if (cfg->frame_reg != ARMREG_SP)
1859 cfg->used_int_regs |= 1 << cfg->frame_reg;
1861 if (cfg->compile_aot || cfg->uses_rgctx_reg || COMPILE_LLVM (cfg))
1862 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1863 cfg->used_int_regs |= (1 << ARMREG_V5);
1867 if (!MONO_TYPE_ISSTRUCT (sig_ret) && !cinfo->vtype_retaddr) {
1868 if (sig_ret->type != MONO_TYPE_VOID) {
1869 cfg->ret->opcode = OP_REGVAR;
1870 cfg->ret->inst_c0 = ARMREG_R0;
1873 /* local vars are at a positive offset from the stack pointer */
1875 * also note that if the function uses alloca, we use FP
1876 * to point at the local variables.
1878 offset = 0; /* linkage area */
1879 /* align the offset to 16 bytes: not sure this is needed here */
1881 //offset &= ~(8 - 1);
1883 /* add parameter area size for called functions */
1884 offset += cfg->param_area;
1887 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1890 /* allow room to save the return value */
1891 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1894 /* the MonoLMF structure is stored just below the stack pointer */
1895 if (cinfo->ret.storage == RegTypeStructByVal) {
1896 cfg->ret->opcode = OP_REGOFFSET;
1897 cfg->ret->inst_basereg = cfg->frame_reg;
1898 offset += sizeof (gpointer) - 1;
1899 offset &= ~(sizeof (gpointer) - 1);
1900 cfg->ret->inst_offset = - offset;
1901 offset += sizeof(gpointer);
1902 } else if (cinfo->vtype_retaddr) {
1903 ins = cfg->vret_addr;
1904 offset += sizeof(gpointer) - 1;
1905 offset &= ~(sizeof(gpointer) - 1);
1906 ins->inst_offset = offset;
1907 ins->opcode = OP_REGOFFSET;
1908 ins->inst_basereg = cfg->frame_reg;
1909 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1910 printf ("vret_addr =");
1911 mono_print_ins (cfg->vret_addr);
1913 offset += sizeof(gpointer);
1916 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1917 if (cfg->arch.seq_point_info_var) {
1920 ins = cfg->arch.seq_point_info_var;
1924 offset += align - 1;
1925 offset &= ~(align - 1);
1926 ins->opcode = OP_REGOFFSET;
1927 ins->inst_basereg = cfg->frame_reg;
1928 ins->inst_offset = offset;
1931 ins = cfg->arch.ss_trigger_page_var;
1934 offset += align - 1;
1935 offset &= ~(align - 1);
1936 ins->opcode = OP_REGOFFSET;
1937 ins->inst_basereg = cfg->frame_reg;
1938 ins->inst_offset = offset;
1942 if (cfg->arch.seq_point_read_var) {
1945 ins = cfg->arch.seq_point_read_var;
1949 offset += align - 1;
1950 offset &= ~(align - 1);
1951 ins->opcode = OP_REGOFFSET;
1952 ins->inst_basereg = cfg->frame_reg;
1953 ins->inst_offset = offset;
1956 ins = cfg->arch.seq_point_ss_method_var;
1959 offset += align - 1;
1960 offset &= ~(align - 1);
1961 ins->opcode = OP_REGOFFSET;
1962 ins->inst_basereg = cfg->frame_reg;
1963 ins->inst_offset = offset;
1966 ins = cfg->arch.seq_point_bp_method_var;
1969 offset += align - 1;
1970 offset &= ~(align - 1);
1971 ins->opcode = OP_REGOFFSET;
1972 ins->inst_basereg = cfg->frame_reg;
1973 ins->inst_offset = offset;
1977 if (cfg->has_atomic_exchange_i4 || cfg->has_atomic_cas_i4 || cfg->has_atomic_add_i4) {
1978 /* Allocate a temporary used by the atomic ops */
1982 /* Allocate a local slot to hold the sig cookie address */
1983 offset += align - 1;
1984 offset &= ~(align - 1);
1985 cfg->arch.atomic_tmp_offset = offset;
1988 cfg->arch.atomic_tmp_offset = -1;
1991 cfg->locals_min_stack_offset = offset;
1993 curinst = cfg->locals_start;
1994 for (i = curinst; i < cfg->num_varinfo; ++i) {
1997 ins = cfg->varinfo [i];
1998 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
2001 t = ins->inst_vtype;
2002 if (cfg->gsharedvt && mini_is_gsharedvt_variable_type (cfg, t))
2005 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
2006 * pinvoke wrappers when they call functions returning structure */
2007 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (t) && t->type != MONO_TYPE_TYPEDBYREF) {
2008 size = mono_class_native_size (mono_class_from_mono_type (t), &ualign);
2012 size = mono_type_size (t, &align);
2014 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2015 * since it loads/stores misaligned words, which don't do the right thing.
2017 if (align < 4 && size >= 4)
2019 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2020 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2021 offset += align - 1;
2022 offset &= ~(align - 1);
2023 ins->opcode = OP_REGOFFSET;
2024 ins->inst_offset = offset;
2025 ins->inst_basereg = cfg->frame_reg;
2027 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
2030 cfg->locals_max_stack_offset = offset;
2034 ins = cfg->args [curinst];
2035 if (ins->opcode != OP_REGVAR) {
2036 ins->opcode = OP_REGOFFSET;
2037 ins->inst_basereg = cfg->frame_reg;
2038 offset += sizeof (gpointer) - 1;
2039 offset &= ~(sizeof (gpointer) - 1);
2040 ins->inst_offset = offset;
2041 offset += sizeof (gpointer);
2046 if (sig->call_convention == MONO_CALL_VARARG) {
2050 /* Allocate a local slot to hold the sig cookie address */
2051 offset += align - 1;
2052 offset &= ~(align - 1);
2053 cfg->sig_cookie = offset;
2057 for (i = 0; i < sig->param_count; ++i) {
2058 ins = cfg->args [curinst];
2060 if (ins->opcode != OP_REGVAR) {
2061 ins->opcode = OP_REGOFFSET;
2062 ins->inst_basereg = cfg->frame_reg;
2063 size = mini_type_stack_size_full (cfg->generic_sharing_context, sig->params [i], &ualign, sig->pinvoke);
2065 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
2066 * since it loads/stores misaligned words, which don't do the right thing.
2068 if (align < 4 && size >= 4)
2070 /* The code in the prolog () stores words when storing vtypes received in a register */
2071 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
2073 if (ALIGN_TO (offset, align) > ALIGN_TO (offset, 4))
2074 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2075 offset += align - 1;
2076 offset &= ~(align - 1);
2077 ins->inst_offset = offset;
2083 /* align the offset to 8 bytes */
2084 if (ALIGN_TO (offset, 8) > ALIGN_TO (offset, 4))
2085 mini_gc_set_slot_type_from_fp (cfg, ALIGN_TO (offset, 4), SLOT_NOREF);
2090 cfg->stack_offset = offset;
2094 mono_arch_create_vars (MonoCompile *cfg)
2096 MonoMethodSignature *sig;
2100 sig = mono_method_signature (cfg->method);
2102 if (!cfg->arch.cinfo)
2103 cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2104 cinfo = cfg->arch.cinfo;
2106 if (IS_HARD_FLOAT) {
2107 for (i = 0; i < 2; i++) {
2108 MonoInst *inst = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_LOCAL);
2109 inst->flags |= MONO_INST_VOLATILE;
2111 cfg->arch.vfp_scratch_slots [i] = (gpointer) inst;
2115 if (cinfo->ret.storage == RegTypeStructByVal)
2116 cfg->ret_var_is_local = TRUE;
2118 if (cinfo->vtype_retaddr) {
2119 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
2120 if (G_UNLIKELY (cfg->verbose_level > 1)) {
2121 printf ("vret_addr = ");
2122 mono_print_ins (cfg->vret_addr);
2126 if (cfg->gen_seq_points) {
2127 if (cfg->soft_breakpoints) {
2128 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2129 ins->flags |= MONO_INST_VOLATILE;
2130 cfg->arch.seq_point_read_var = ins;
2132 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2133 ins->flags |= MONO_INST_VOLATILE;
2134 cfg->arch.seq_point_ss_method_var = ins;
2136 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2137 ins->flags |= MONO_INST_VOLATILE;
2138 cfg->arch.seq_point_bp_method_var = ins;
2140 g_assert (!cfg->compile_aot);
2141 } else if (cfg->compile_aot) {
2142 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2143 ins->flags |= MONO_INST_VOLATILE;
2144 cfg->arch.seq_point_info_var = ins;
2146 /* Allocate a separate variable for this to save 1 load per seq point */
2147 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
2148 ins->flags |= MONO_INST_VOLATILE;
2149 cfg->arch.ss_trigger_page_var = ins;
2155 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
2157 MonoMethodSignature *tmp_sig;
2160 if (call->tail_call)
2163 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
2166 * mono_ArgIterator_Setup assumes the signature cookie is
2167 * passed first and all the arguments which were before it are
2168 * passed on the stack after the signature. So compensate by
2169 * passing a different signature.
2171 tmp_sig = mono_metadata_signature_dup (call->signature);
2172 tmp_sig->param_count -= call->signature->sentinelpos;
2173 tmp_sig->sentinelpos = 0;
2174 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
2176 sig_reg = mono_alloc_ireg (cfg);
2177 MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
2179 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_reg);
2184 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
2189 LLVMCallInfo *linfo;
2191 n = sig->param_count + sig->hasthis;
2193 cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
2195 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
2198 * LLVM always uses the native ABI while we use our own ABI, the
2199 * only difference is the handling of vtypes:
2200 * - we only pass/receive them in registers in some cases, and only
2201 * in 1 or 2 integer registers.
2203 if (cinfo->vtype_retaddr) {
2204 /* Vtype returned using a hidden argument */
2205 linfo->ret.storage = LLVMArgVtypeRetAddr;
2206 linfo->vret_arg_index = cinfo->vret_arg_index;
2207 } else if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
2208 cfg->exception_message = g_strdup ("unknown ret conv");
2209 cfg->disable_llvm = TRUE;
2213 for (i = 0; i < n; ++i) {
2214 ainfo = cinfo->args + i;
2216 linfo->args [i].storage = LLVMArgNone;
2218 switch (ainfo->storage) {
2219 case RegTypeGeneral:
2220 case RegTypeIRegPair:
2222 linfo->args [i].storage = LLVMArgInIReg;
2224 case RegTypeStructByVal:
2225 // FIXME: Passing entirely on the stack or split reg/stack
2226 if (ainfo->vtsize == 0 && ainfo->size <= 2) {
2227 linfo->args [i].storage = LLVMArgVtypeInReg;
2228 linfo->args [i].pair_storage [0] = LLVMArgInIReg;
2229 if (ainfo->size == 2)
2230 linfo->args [i].pair_storage [1] = LLVMArgInIReg;
2232 linfo->args [i].pair_storage [1] = LLVMArgNone;
2234 cfg->exception_message = g_strdup_printf ("vtype-by-val on stack");
2235 cfg->disable_llvm = TRUE;
2239 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
2240 cfg->disable_llvm = TRUE;
2250 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
2253 MonoMethodSignature *sig;
2257 sig = call->signature;
2258 n = sig->param_count + sig->hasthis;
2260 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
2262 for (i = 0; i < n; ++i) {
2263 ArgInfo *ainfo = cinfo->args + i;
2266 if (i >= sig->hasthis)
2267 t = sig->params [i - sig->hasthis];
2269 t = &mono_defaults.int_class->byval_arg;
2270 t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
2272 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
2273 /* Emit the signature cookie just before the implicit arguments */
2274 emit_sig_cookie (cfg, call, cinfo);
2277 in = call->args [i];
2279 switch (ainfo->storage) {
2280 case RegTypeGeneral:
2281 case RegTypeIRegPair:
2282 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2283 MONO_INST_NEW (cfg, ins, OP_MOVE);
2284 ins->dreg = mono_alloc_ireg (cfg);
2285 ins->sreg1 = in->dreg + 1;
2286 MONO_ADD_INS (cfg->cbb, ins);
2287 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2289 MONO_INST_NEW (cfg, ins, OP_MOVE);
2290 ins->dreg = mono_alloc_ireg (cfg);
2291 ins->sreg1 = in->dreg + 2;
2292 MONO_ADD_INS (cfg->cbb, ins);
2293 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2294 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
2295 if (ainfo->size == 4) {
2296 if (IS_SOFT_FLOAT) {
2297 /* mono_emit_call_args () have already done the r8->r4 conversion */
2298 /* The converted value is in an int vreg */
2299 MONO_INST_NEW (cfg, ins, OP_MOVE);
2300 ins->dreg = mono_alloc_ireg (cfg);
2301 ins->sreg1 = in->dreg;
2302 MONO_ADD_INS (cfg->cbb, ins);
2303 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2307 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2308 creg = mono_alloc_ireg (cfg);
2309 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2310 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2313 if (IS_SOFT_FLOAT) {
2314 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
2315 ins->dreg = mono_alloc_ireg (cfg);
2316 ins->sreg1 = in->dreg;
2317 MONO_ADD_INS (cfg->cbb, ins);
2318 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2320 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
2321 ins->dreg = mono_alloc_ireg (cfg);
2322 ins->sreg1 = in->dreg;
2323 MONO_ADD_INS (cfg->cbb, ins);
2324 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
2328 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2329 creg = mono_alloc_ireg (cfg);
2330 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2331 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
2332 creg = mono_alloc_ireg (cfg);
2333 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
2334 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
2337 cfg->flags |= MONO_CFG_HAS_FPOUT;
2339 MONO_INST_NEW (cfg, ins, OP_MOVE);
2340 ins->dreg = mono_alloc_ireg (cfg);
2341 ins->sreg1 = in->dreg;
2342 MONO_ADD_INS (cfg->cbb, ins);
2344 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
2347 case RegTypeStructByAddr:
2350 /* FIXME: where si the data allocated? */
2351 arg->backend.reg3 = ainfo->reg;
2352 call->used_iregs |= 1 << ainfo->reg;
2353 g_assert_not_reached ();
2356 case RegTypeStructByVal:
2357 case RegTypeGSharedVtInReg:
2358 case RegTypeGSharedVtOnStack:
2359 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
2360 ins->opcode = OP_OUTARG_VT;
2361 ins->sreg1 = in->dreg;
2362 ins->klass = in->klass;
2363 ins->inst_p0 = call;
2364 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
2365 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
2366 mono_call_inst_add_outarg_vt (cfg, call, ins);
2367 MONO_ADD_INS (cfg->cbb, ins);
2370 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2371 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2372 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
2373 if (t->type == MONO_TYPE_R8) {
2374 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2377 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2379 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2382 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
2385 case RegTypeBaseGen:
2386 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
2387 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
2388 MONO_INST_NEW (cfg, ins, OP_MOVE);
2389 ins->dreg = mono_alloc_ireg (cfg);
2390 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
2391 MONO_ADD_INS (cfg->cbb, ins);
2392 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
2393 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
2396 /* This should work for soft-float as well */
2398 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
2399 creg = mono_alloc_ireg (cfg);
2400 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
2401 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
2402 creg = mono_alloc_ireg (cfg);
2403 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
2404 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
2405 cfg->flags |= MONO_CFG_HAS_FPOUT;
2407 g_assert_not_reached ();
2411 int fdreg = mono_alloc_freg (cfg);
2413 if (ainfo->size == 8) {
2414 MONO_INST_NEW (cfg, ins, OP_FMOVE);
2415 ins->sreg1 = in->dreg;
2417 MONO_ADD_INS (cfg->cbb, ins);
2419 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
2424 * Mono's register allocator doesn't speak single-precision registers that
2425 * overlap double-precision registers (i.e. armhf). So we have to work around
2426 * the register allocator and load the value from memory manually.
2428 * So we create a variable for the float argument and an instruction to store
2429 * the argument into the variable. We then store the list of these arguments
2430 * in cfg->float_args. This list is then used by emit_float_args later to
2431 * pass the arguments in the various call opcodes.
2433 * This is not very nice, and we should really try to fix the allocator.
2436 MonoInst *float_arg = mono_compile_create_var (cfg, &mono_defaults.single_class->byval_arg, OP_LOCAL);
2438 /* Make sure the instruction isn't seen as pointless and removed.
2440 float_arg->flags |= MONO_INST_VOLATILE;
2442 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, float_arg->dreg, in->dreg);
2444 /* We use the dreg to look up the instruction later. The hreg is used to
2445 * emit the instruction that loads the value into the FP reg.
2447 fad = mono_mempool_alloc0 (cfg->mempool, sizeof (FloatArgData));
2448 fad->vreg = float_arg->dreg;
2449 fad->hreg = ainfo->reg;
2451 call->float_args = g_slist_append_mempool (cfg->mempool, call->float_args, fad);
2454 call->used_iregs |= 1 << ainfo->reg;
2455 cfg->flags |= MONO_CFG_HAS_FPOUT;
2459 g_assert_not_reached ();
2463 /* Handle the case where there are no implicit arguments */
2464 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
2465 emit_sig_cookie (cfg, call, cinfo);
2467 if (cinfo->ret.storage == RegTypeStructByVal) {
2468 /* The JIT will transform this into a normal call */
2469 call->vret_in_reg = TRUE;
2470 } else if (cinfo->vtype_retaddr) {
2472 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
2473 vtarg->sreg1 = call->vret_var->dreg;
2474 vtarg->dreg = mono_alloc_preg (cfg);
2475 MONO_ADD_INS (cfg->cbb, vtarg);
2477 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
2480 call->stack_usage = cinfo->stack_usage;
2486 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
2488 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
2489 ArgInfo *ainfo = ins->inst_p1;
2490 int ovf_size = ainfo->vtsize;
2491 int doffset = ainfo->offset;
2492 int struct_size = ainfo->struct_size;
2493 int i, soffset, dreg, tmpreg;
2495 if (ainfo->storage == RegTypeGSharedVtInReg) {
2497 mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
2500 if (ainfo->storage == RegTypeGSharedVtOnStack) {
2501 /* Pass by addr on stack */
2502 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, src->dreg);
2507 for (i = 0; i < ainfo->size; ++i) {
2508 dreg = mono_alloc_ireg (cfg);
2509 switch (struct_size) {
2511 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2514 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU2_MEMBASE, dreg, src->dreg, soffset);
2517 tmpreg = mono_alloc_ireg (cfg);
2518 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, dreg, src->dreg, soffset);
2519 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 1);
2520 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 8);
2521 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2522 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADU1_MEMBASE, tmpreg, src->dreg, soffset + 2);
2523 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SHL_IMM, tmpreg, tmpreg, 16);
2524 MONO_EMIT_NEW_BIALU (cfg, OP_IOR, dreg, dreg, tmpreg);
2527 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
2530 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
2531 soffset += sizeof (gpointer);
2532 struct_size -= sizeof (gpointer);
2534 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
2536 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, MIN (ovf_size * sizeof (gpointer), struct_size), struct_size < 4 ? 1 : 4);
2540 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
2542 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2545 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
2548 if (COMPILE_LLVM (cfg)) {
2549 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2551 MONO_INST_NEW (cfg, ins, OP_SETLRET);
2552 ins->sreg1 = val->dreg + 1;
2553 ins->sreg2 = val->dreg + 2;
2554 MONO_ADD_INS (cfg->cbb, ins);
2559 case MONO_ARM_FPU_NONE:
2560 if (ret->type == MONO_TYPE_R8) {
2563 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2564 ins->dreg = cfg->ret->dreg;
2565 ins->sreg1 = val->dreg;
2566 MONO_ADD_INS (cfg->cbb, ins);
2569 if (ret->type == MONO_TYPE_R4) {
2570 /* Already converted to an int in method_to_ir () */
2571 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2575 case MONO_ARM_FPU_VFP:
2576 case MONO_ARM_FPU_VFP_HARD:
2577 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
2580 MONO_INST_NEW (cfg, ins, OP_SETFRET);
2581 ins->dreg = cfg->ret->dreg;
2582 ins->sreg1 = val->dreg;
2583 MONO_ADD_INS (cfg->cbb, ins);
2588 g_assert_not_reached ();
2592 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
2595 #endif /* #ifndef DISABLE_JIT */
2598 mono_arch_is_inst_imm (gint64 imm)
2604 MonoMethodSignature *sig;
2607 MonoType **param_types;
2611 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
2615 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
2618 switch (cinfo->ret.storage) {
2620 case RegTypeGeneral:
2621 case RegTypeIRegPair:
2622 case RegTypeStructByAddr:
2633 for (i = 0; i < cinfo->nargs; ++i) {
2634 ArgInfo *ainfo = &cinfo->args [i];
2637 switch (ainfo->storage) {
2638 case RegTypeGeneral:
2640 case RegTypeIRegPair:
2643 if (ainfo->offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
2646 case RegTypeStructByVal:
2647 if (ainfo->size == 0)
2648 last_slot = PARAM_REGS + (ainfo->offset / 4) + ainfo->vtsize;
2650 last_slot = ainfo->reg + ainfo->size + ainfo->vtsize;
2651 if (last_slot >= PARAM_REGS + DYN_CALL_STACK_ARGS)
2659 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
2660 for (i = 0; i < sig->param_count; ++i) {
2661 MonoType *t = sig->params [i];
2666 t = mini_replace_type (t);
2689 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
2691 ArchDynCallInfo *info;
2695 cinfo = get_call_info (NULL, NULL, sig);
2697 if (!dyn_call_supported (cinfo, sig)) {
2702 info = g_new0 (ArchDynCallInfo, 1);
2703 // FIXME: Preprocess the info to speed up start_dyn_call ()
2705 info->cinfo = cinfo;
2706 info->rtype = mini_replace_type (sig->ret);
2707 info->param_types = g_new0 (MonoType*, sig->param_count);
2708 for (i = 0; i < sig->param_count; ++i)
2709 info->param_types [i] = mini_replace_type (sig->params [i]);
2711 return (MonoDynCallInfo*)info;
2715 mono_arch_dyn_call_free (MonoDynCallInfo *info)
2717 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2719 g_free (ainfo->cinfo);
2724 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
2726 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
2727 DynCallArgs *p = (DynCallArgs*)buf;
2728 int arg_index, greg, i, j, pindex;
2729 MonoMethodSignature *sig = dinfo->sig;
2731 g_assert (buf_len >= sizeof (DynCallArgs));
2740 if (sig->hasthis || dinfo->cinfo->vret_arg_index == 1) {
2741 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
2746 if (dinfo->cinfo->vtype_retaddr)
2747 p->regs [greg ++] = (mgreg_t)ret;
2749 for (i = pindex; i < sig->param_count; i++) {
2750 MonoType *t = dinfo->param_types [i];
2751 gpointer *arg = args [arg_index ++];
2752 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
2755 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
2757 else if (ainfo->storage == RegTypeBase)
2758 slot = PARAM_REGS + (ainfo->offset / 4);
2760 g_assert_not_reached ();
2763 p->regs [slot] = (mgreg_t)*arg;
2768 case MONO_TYPE_STRING:
2769 case MONO_TYPE_CLASS:
2770 case MONO_TYPE_ARRAY:
2771 case MONO_TYPE_SZARRAY:
2772 case MONO_TYPE_OBJECT:
2776 p->regs [slot] = (mgreg_t)*arg;
2778 case MONO_TYPE_BOOLEAN:
2780 p->regs [slot] = *(guint8*)arg;
2783 p->regs [slot] = *(gint8*)arg;
2786 p->regs [slot] = *(gint16*)arg;
2789 case MONO_TYPE_CHAR:
2790 p->regs [slot] = *(guint16*)arg;
2793 p->regs [slot] = *(gint32*)arg;
2796 p->regs [slot] = *(guint32*)arg;
2800 p->regs [slot ++] = (mgreg_t)arg [0];
2801 p->regs [slot] = (mgreg_t)arg [1];
2804 p->regs [slot] = *(mgreg_t*)arg;
2807 p->regs [slot ++] = (mgreg_t)arg [0];
2808 p->regs [slot] = (mgreg_t)arg [1];
2810 case MONO_TYPE_GENERICINST:
2811 if (MONO_TYPE_IS_REFERENCE (t)) {
2812 p->regs [slot] = (mgreg_t)*arg;
2817 case MONO_TYPE_VALUETYPE:
2818 g_assert (ainfo->storage == RegTypeStructByVal);
2820 if (ainfo->size == 0)
2821 slot = PARAM_REGS + (ainfo->offset / 4);
2825 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
2826 p->regs [slot ++] = ((mgreg_t*)arg) [j];
2829 g_assert_not_reached ();
2835 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
2837 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
2838 MonoType *ptype = ainfo->rtype;
2839 guint8 *ret = ((DynCallArgs*)buf)->ret;
2840 mgreg_t res = ((DynCallArgs*)buf)->res;
2841 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
2843 switch (ptype->type) {
2844 case MONO_TYPE_VOID:
2845 *(gpointer*)ret = NULL;
2847 case MONO_TYPE_STRING:
2848 case MONO_TYPE_CLASS:
2849 case MONO_TYPE_ARRAY:
2850 case MONO_TYPE_SZARRAY:
2851 case MONO_TYPE_OBJECT:
2855 *(gpointer*)ret = (gpointer)res;
2861 case MONO_TYPE_BOOLEAN:
2862 *(guint8*)ret = res;
2865 *(gint16*)ret = res;
2868 case MONO_TYPE_CHAR:
2869 *(guint16*)ret = res;
2872 *(gint32*)ret = res;
2875 *(guint32*)ret = res;
2879 /* This handles endianness as well */
2880 ((gint32*)ret) [0] = res;
2881 ((gint32*)ret) [1] = res2;
2883 case MONO_TYPE_GENERICINST:
2884 if (MONO_TYPE_IS_REFERENCE (ptype)) {
2885 *(gpointer*)ret = (gpointer)res;
2890 case MONO_TYPE_VALUETYPE:
2891 g_assert (ainfo->cinfo->vtype_retaddr);
2896 *(float*)ret = *(float*)&res;
2898 case MONO_TYPE_R8: {
2905 *(double*)ret = *(double*)®s;
2909 g_assert_not_reached ();
2916 * Allow tracing to work with this interface (with an optional argument)
2920 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2924 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2925 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2926 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2927 code = emit_call_reg (code, ARMREG_R2);
2941 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2944 int save_mode = SAVE_NONE;
2946 MonoMethod *method = cfg->method;
2947 MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
2948 int rtype = ret_type->type;
2949 int save_offset = cfg->param_area;
2953 offset = code - cfg->native_code;
2954 /* we need about 16 instructions */
2955 if (offset > (cfg->code_size - 16 * 4)) {
2956 cfg->code_size *= 2;
2957 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2958 code = cfg->native_code + offset;
2961 case MONO_TYPE_VOID:
2962 /* special case string .ctor icall */
2963 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2964 save_mode = SAVE_ONE;
2966 save_mode = SAVE_NONE;
2970 save_mode = SAVE_TWO;
2974 save_mode = SAVE_ONE_FP;
2976 save_mode = SAVE_ONE;
2980 save_mode = SAVE_TWO_FP;
2982 save_mode = SAVE_TWO;
2984 case MONO_TYPE_GENERICINST:
2985 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
2986 save_mode = SAVE_ONE;
2990 case MONO_TYPE_VALUETYPE:
2991 save_mode = SAVE_STRUCT;
2994 save_mode = SAVE_ONE;
2998 switch (save_mode) {
3000 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3001 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3002 if (enable_arguments) {
3003 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
3004 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3008 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3009 if (enable_arguments) {
3010 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3014 ARM_FSTS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3015 if (enable_arguments) {
3016 ARM_FMRS (code, ARMREG_R1, ARM_VFP_F0);
3020 ARM_FSTD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3021 if (enable_arguments) {
3022 ARM_FMDRR (code, ARMREG_R1, ARMREG_R2, ARM_VFP_D0);
3026 if (enable_arguments) {
3027 /* FIXME: get the actual address */
3028 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
3036 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
3037 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
3038 code = emit_call_reg (code, ARMREG_IP);
3040 switch (save_mode) {
3042 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3043 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
3046 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
3049 ARM_FLDS (code, ARM_VFP_F0, cfg->frame_reg, save_offset);
3052 ARM_FLDD (code, ARM_VFP_D0, cfg->frame_reg, save_offset);
3063 * The immediate field for cond branches is big enough for all reasonable methods
3065 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
3066 if (0 && ins->inst_true_bb->native_offset) { \
3067 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
3069 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
3070 ARM_B_COND (code, (condcode), 0); \
3073 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
3075 /* emit an exception if condition is fail
3077 * We assign the extra code used to throw the implicit exceptions
3078 * to cfg->bb_exit as far as the big branch handling is concerned
3080 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
3082 mono_add_patch_info (cfg, code - cfg->native_code, \
3083 MONO_PATCH_INFO_EXC, exc_name); \
3084 ARM_BL_COND (code, (condcode), 0); \
3087 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
3090 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
3095 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
3097 MonoInst *ins, *n, *last_ins = NULL;
3099 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
3100 switch (ins->opcode) {
3103 /* Already done by an arch-independent pass */
3105 case OP_LOAD_MEMBASE:
3106 case OP_LOADI4_MEMBASE:
3108 * OP_STORE_MEMBASE_REG reg, offset(basereg)
3109 * OP_LOAD_MEMBASE offset(basereg), reg
3111 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
3112 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
3113 ins->inst_basereg == last_ins->inst_destbasereg &&
3114 ins->inst_offset == last_ins->inst_offset) {
3115 if (ins->dreg == last_ins->sreg1) {
3116 MONO_DELETE_INS (bb, ins);
3119 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3120 ins->opcode = OP_MOVE;
3121 ins->sreg1 = last_ins->sreg1;
3125 * Note: reg1 must be different from the basereg in the second load
3126 * OP_LOAD_MEMBASE offset(basereg), reg1
3127 * OP_LOAD_MEMBASE offset(basereg), reg2
3129 * OP_LOAD_MEMBASE offset(basereg), reg1
3130 * OP_MOVE reg1, reg2
3132 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
3133 || last_ins->opcode == OP_LOAD_MEMBASE) &&
3134 ins->inst_basereg != last_ins->dreg &&
3135 ins->inst_basereg == last_ins->inst_basereg &&
3136 ins->inst_offset == last_ins->inst_offset) {
3138 if (ins->dreg == last_ins->dreg) {
3139 MONO_DELETE_INS (bb, ins);
3142 ins->opcode = OP_MOVE;
3143 ins->sreg1 = last_ins->dreg;
3146 //g_assert_not_reached ();
3150 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3151 * OP_LOAD_MEMBASE offset(basereg), reg
3153 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
3154 * OP_ICONST reg, imm
3156 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
3157 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
3158 ins->inst_basereg == last_ins->inst_destbasereg &&
3159 ins->inst_offset == last_ins->inst_offset) {
3160 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
3161 ins->opcode = OP_ICONST;
3162 ins->inst_c0 = last_ins->inst_imm;
3163 g_assert_not_reached (); // check this rule
3167 case OP_LOADU1_MEMBASE:
3168 case OP_LOADI1_MEMBASE:
3169 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
3170 ins->inst_basereg == last_ins->inst_destbasereg &&
3171 ins->inst_offset == last_ins->inst_offset) {
3172 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
3173 ins->sreg1 = last_ins->sreg1;
3176 case OP_LOADU2_MEMBASE:
3177 case OP_LOADI2_MEMBASE:
3178 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
3179 ins->inst_basereg == last_ins->inst_destbasereg &&
3180 ins->inst_offset == last_ins->inst_offset) {
3181 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
3182 ins->sreg1 = last_ins->sreg1;
3186 ins->opcode = OP_MOVE;
3190 if (ins->dreg == ins->sreg1) {
3191 MONO_DELETE_INS (bb, ins);
3195 * OP_MOVE sreg, dreg
3196 * OP_MOVE dreg, sreg
3198 if (last_ins && last_ins->opcode == OP_MOVE &&
3199 ins->sreg1 == last_ins->dreg &&
3200 ins->dreg == last_ins->sreg1) {
3201 MONO_DELETE_INS (bb, ins);
3209 bb->last_ins = last_ins;
3213 * the branch_cc_table should maintain the order of these
3227 branch_cc_table [] = {
3241 #define ADD_NEW_INS(cfg,dest,op) do { \
3242 MONO_INST_NEW ((cfg), (dest), (op)); \
3243 mono_bblock_insert_before_ins (bb, ins, (dest)); \
3247 map_to_reg_reg_op (int op)
3256 case OP_COMPARE_IMM:
3258 case OP_ICOMPARE_IMM:
3272 case OP_LOAD_MEMBASE:
3273 return OP_LOAD_MEMINDEX;
3274 case OP_LOADI4_MEMBASE:
3275 return OP_LOADI4_MEMINDEX;
3276 case OP_LOADU4_MEMBASE:
3277 return OP_LOADU4_MEMINDEX;
3278 case OP_LOADU1_MEMBASE:
3279 return OP_LOADU1_MEMINDEX;
3280 case OP_LOADI2_MEMBASE:
3281 return OP_LOADI2_MEMINDEX;
3282 case OP_LOADU2_MEMBASE:
3283 return OP_LOADU2_MEMINDEX;
3284 case OP_LOADI1_MEMBASE:
3285 return OP_LOADI1_MEMINDEX;
3286 case OP_STOREI1_MEMBASE_REG:
3287 return OP_STOREI1_MEMINDEX;
3288 case OP_STOREI2_MEMBASE_REG:
3289 return OP_STOREI2_MEMINDEX;
3290 case OP_STOREI4_MEMBASE_REG:
3291 return OP_STOREI4_MEMINDEX;
3292 case OP_STORE_MEMBASE_REG:
3293 return OP_STORE_MEMINDEX;
3294 case OP_STORER4_MEMBASE_REG:
3295 return OP_STORER4_MEMINDEX;
3296 case OP_STORER8_MEMBASE_REG:
3297 return OP_STORER8_MEMINDEX;
3298 case OP_STORE_MEMBASE_IMM:
3299 return OP_STORE_MEMBASE_REG;
3300 case OP_STOREI1_MEMBASE_IMM:
3301 return OP_STOREI1_MEMBASE_REG;
3302 case OP_STOREI2_MEMBASE_IMM:
3303 return OP_STOREI2_MEMBASE_REG;
3304 case OP_STOREI4_MEMBASE_IMM:
3305 return OP_STOREI4_MEMBASE_REG;
3307 g_assert_not_reached ();
3311 * Remove from the instruction list the instructions that can't be
3312 * represented with very simple instructions with no register
3316 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
3318 MonoInst *ins, *temp, *last_ins = NULL;
3319 int rot_amount, imm8, low_imm;
3321 MONO_BB_FOR_EACH_INS (bb, ins) {
3323 switch (ins->opcode) {
3327 case OP_COMPARE_IMM:
3328 case OP_ICOMPARE_IMM:
3342 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
3343 ADD_NEW_INS (cfg, temp, OP_ICONST);
3344 temp->inst_c0 = ins->inst_imm;
3345 temp->dreg = mono_alloc_ireg (cfg);
3346 ins->sreg2 = temp->dreg;
3347 ins->opcode = mono_op_imm_to_op (ins->opcode);
3349 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
3355 if (ins->inst_imm == 1) {
3356 ins->opcode = OP_MOVE;
3359 if (ins->inst_imm == 0) {
3360 ins->opcode = OP_ICONST;
3364 imm8 = mono_is_power_of_two (ins->inst_imm);
3366 ins->opcode = OP_SHL_IMM;
3367 ins->inst_imm = imm8;
3370 ADD_NEW_INS (cfg, temp, OP_ICONST);
3371 temp->inst_c0 = ins->inst_imm;
3372 temp->dreg = mono_alloc_ireg (cfg);
3373 ins->sreg2 = temp->dreg;
3374 ins->opcode = OP_IMUL;
3380 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
3381 /* ARM sets the C flag to 1 if there was _no_ overflow */
3382 ins->next->opcode = OP_COND_EXC_NC;
3385 case OP_IDIV_UN_IMM:
3387 case OP_IREM_UN_IMM:
3388 ADD_NEW_INS (cfg, temp, OP_ICONST);
3389 temp->inst_c0 = ins->inst_imm;
3390 temp->dreg = mono_alloc_ireg (cfg);
3391 ins->sreg2 = temp->dreg;
3392 ins->opcode = mono_op_imm_to_op (ins->opcode);
3394 case OP_LOCALLOC_IMM:
3395 ADD_NEW_INS (cfg, temp, OP_ICONST);
3396 temp->inst_c0 = ins->inst_imm;
3397 temp->dreg = mono_alloc_ireg (cfg);
3398 ins->sreg1 = temp->dreg;
3399 ins->opcode = OP_LOCALLOC;
3401 case OP_LOAD_MEMBASE:
3402 case OP_LOADI4_MEMBASE:
3403 case OP_LOADU4_MEMBASE:
3404 case OP_LOADU1_MEMBASE:
3405 /* we can do two things: load the immed in a register
3406 * and use an indexed load, or see if the immed can be
3407 * represented as an ad_imm + a load with a smaller offset
3408 * that fits. We just do the first for now, optimize later.
3410 if (arm_is_imm12 (ins->inst_offset))
3412 ADD_NEW_INS (cfg, temp, OP_ICONST);
3413 temp->inst_c0 = ins->inst_offset;
3414 temp->dreg = mono_alloc_ireg (cfg);
3415 ins->sreg2 = temp->dreg;
3416 ins->opcode = map_to_reg_reg_op (ins->opcode);
3418 case OP_LOADI2_MEMBASE:
3419 case OP_LOADU2_MEMBASE:
3420 case OP_LOADI1_MEMBASE:
3421 if (arm_is_imm8 (ins->inst_offset))
3423 ADD_NEW_INS (cfg, temp, OP_ICONST);
3424 temp->inst_c0 = ins->inst_offset;
3425 temp->dreg = mono_alloc_ireg (cfg);
3426 ins->sreg2 = temp->dreg;
3427 ins->opcode = map_to_reg_reg_op (ins->opcode);
3429 case OP_LOADR4_MEMBASE:
3430 case OP_LOADR8_MEMBASE:
3431 if (arm_is_fpimm8 (ins->inst_offset))
3433 low_imm = ins->inst_offset & 0x1ff;
3434 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
3435 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3436 temp->inst_imm = ins->inst_offset & ~0x1ff;
3437 temp->sreg1 = ins->inst_basereg;
3438 temp->dreg = mono_alloc_ireg (cfg);
3439 ins->inst_basereg = temp->dreg;
3440 ins->inst_offset = low_imm;
3444 ADD_NEW_INS (cfg, temp, OP_ICONST);
3445 temp->inst_c0 = ins->inst_offset;
3446 temp->dreg = mono_alloc_ireg (cfg);
3448 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3449 add_ins->sreg1 = ins->inst_basereg;
3450 add_ins->sreg2 = temp->dreg;
3451 add_ins->dreg = mono_alloc_ireg (cfg);
3453 ins->inst_basereg = add_ins->dreg;
3454 ins->inst_offset = 0;
3457 case OP_STORE_MEMBASE_REG:
3458 case OP_STOREI4_MEMBASE_REG:
3459 case OP_STOREI1_MEMBASE_REG:
3460 if (arm_is_imm12 (ins->inst_offset))
3462 ADD_NEW_INS (cfg, temp, OP_ICONST);
3463 temp->inst_c0 = ins->inst_offset;
3464 temp->dreg = mono_alloc_ireg (cfg);
3465 ins->sreg2 = temp->dreg;
3466 ins->opcode = map_to_reg_reg_op (ins->opcode);
3468 case OP_STOREI2_MEMBASE_REG:
3469 if (arm_is_imm8 (ins->inst_offset))
3471 ADD_NEW_INS (cfg, temp, OP_ICONST);
3472 temp->inst_c0 = ins->inst_offset;
3473 temp->dreg = mono_alloc_ireg (cfg);
3474 ins->sreg2 = temp->dreg;
3475 ins->opcode = map_to_reg_reg_op (ins->opcode);
3477 case OP_STORER4_MEMBASE_REG:
3478 case OP_STORER8_MEMBASE_REG:
3479 if (arm_is_fpimm8 (ins->inst_offset))
3481 low_imm = ins->inst_offset & 0x1ff;
3482 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
3483 ADD_NEW_INS (cfg, temp, OP_ADD_IMM);
3484 temp->inst_imm = ins->inst_offset & ~0x1ff;
3485 temp->sreg1 = ins->inst_destbasereg;
3486 temp->dreg = mono_alloc_ireg (cfg);
3487 ins->inst_destbasereg = temp->dreg;
3488 ins->inst_offset = low_imm;
3492 ADD_NEW_INS (cfg, temp, OP_ICONST);
3493 temp->inst_c0 = ins->inst_offset;
3494 temp->dreg = mono_alloc_ireg (cfg);
3496 ADD_NEW_INS (cfg, add_ins, OP_IADD);
3497 add_ins->sreg1 = ins->inst_destbasereg;
3498 add_ins->sreg2 = temp->dreg;
3499 add_ins->dreg = mono_alloc_ireg (cfg);
3501 ins->inst_destbasereg = add_ins->dreg;
3502 ins->inst_offset = 0;
3505 case OP_STORE_MEMBASE_IMM:
3506 case OP_STOREI1_MEMBASE_IMM:
3507 case OP_STOREI2_MEMBASE_IMM:
3508 case OP_STOREI4_MEMBASE_IMM:
3509 ADD_NEW_INS (cfg, temp, OP_ICONST);
3510 temp->inst_c0 = ins->inst_imm;
3511 temp->dreg = mono_alloc_ireg (cfg);
3512 ins->sreg1 = temp->dreg;
3513 ins->opcode = map_to_reg_reg_op (ins->opcode);
3515 goto loop_start; /* make it handle the possibly big ins->inst_offset */
3517 gboolean swap = FALSE;
3521 /* Optimized away */
3526 /* Some fp compares require swapped operands */
3527 switch (ins->next->opcode) {
3529 ins->next->opcode = OP_FBLT;
3533 ins->next->opcode = OP_FBLT_UN;
3537 ins->next->opcode = OP_FBGE;
3541 ins->next->opcode = OP_FBGE_UN;
3549 ins->sreg1 = ins->sreg2;
3558 bb->last_ins = last_ins;
3559 bb->max_vreg = cfg->next_vreg;
3563 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
3567 if (long_ins->opcode == OP_LNEG) {
3569 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
3570 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
3576 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
3578 /* sreg is a float, dreg is an integer reg */
3580 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
3582 ARM_TOSIZD (code, vfp_scratch1, sreg);
3584 ARM_TOUIZD (code, vfp_scratch1, sreg);
3585 ARM_FMRS (code, dreg, vfp_scratch1);
3586 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
3590 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
3591 else if (size == 2) {
3592 ARM_SHL_IMM (code, dreg, dreg, 16);
3593 ARM_SHR_IMM (code, dreg, dreg, 16);
3597 ARM_SHL_IMM (code, dreg, dreg, 24);
3598 ARM_SAR_IMM (code, dreg, dreg, 24);
3599 } else if (size == 2) {
3600 ARM_SHL_IMM (code, dreg, dreg, 16);
3601 ARM_SAR_IMM (code, dreg, dreg, 16);
3607 #endif /* #ifndef DISABLE_JIT */
3611 const guchar *target;
3616 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
3619 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
3620 PatchData *pdata = (PatchData*)user_data;
3621 guchar *code = data;
3622 guint32 *thunks = data;
3623 guint32 *endthunks = (guint32*)(code + bsize);
3625 int difflow, diffhigh;
3627 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
3628 difflow = (char*)pdata->code - (char*)thunks;
3629 diffhigh = (char*)pdata->code - (char*)endthunks;
3630 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
3634 * The thunk is composed of 3 words:
3635 * load constant from thunks [2] into ARM_IP
3638 * Note that the LR register is already setup
3640 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
3641 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
3642 while (thunks < endthunks) {
3643 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
3644 if (thunks [2] == (guint32)pdata->target) {
3645 arm_patch (pdata->code, (guchar*)thunks);
3646 mono_arch_flush_icache (pdata->code, 4);
3649 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
3650 /* found a free slot instead: emit thunk */
3651 /* ARMREG_IP is fine to use since this can't be an IMT call
3654 code = (guchar*)thunks;
3655 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3656 if (thumb_supported)
3657 ARM_BX (code, ARMREG_IP);
3659 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3660 thunks [2] = (guint32)pdata->target;
3661 mono_arch_flush_icache ((guchar*)thunks, 12);
3663 arm_patch (pdata->code, (guchar*)thunks);
3664 mono_arch_flush_icache (pdata->code, 4);
3668 /* skip 12 bytes, the size of the thunk */
3672 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
3678 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3683 domain = mono_domain_get ();
3686 pdata.target = target;
3687 pdata.absolute = absolute;
3691 mono_code_manager_foreach (dyn_code_mp, search_thunk_slot, &pdata);
3694 if (pdata.found != 1) {
3695 mono_domain_lock (domain);
3696 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3699 /* this uses the first available slot */
3701 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
3703 mono_domain_unlock (domain);
3706 if (pdata.found != 1) {
3708 GHashTableIter iter;
3709 MonoJitDynamicMethodInfo *ji;
3712 * This might be a dynamic method, search its code manager. We can only
3713 * use the dynamic method containing CODE, since the others might be freed later.
3717 mono_domain_lock (domain);
3718 hash = domain_jit_info (domain)->dynamic_code_hash;
3720 /* FIXME: Speed this up */
3721 g_hash_table_iter_init (&iter, hash);
3722 while (g_hash_table_iter_next (&iter, NULL, (gpointer*)&ji)) {
3723 mono_code_manager_foreach (ji->code_mp, search_thunk_slot, &pdata);
3724 if (pdata.found == 1)
3728 mono_domain_unlock (domain);
3730 if (pdata.found != 1)
3731 g_print ("thunk failed for %p from %p\n", target, code);
3732 g_assert (pdata.found == 1);
3736 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target, MonoCodeManager *dyn_code_mp)
3738 guint32 *code32 = (void*)code;
3739 guint32 ins = *code32;
3740 guint32 prim = (ins >> 25) & 7;
3741 guint32 tval = GPOINTER_TO_UINT (target);
3743 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
3744 if (prim == 5) { /* 101b */
3745 /* the diff starts 8 bytes from the branch opcode */
3746 gint diff = target - code - 8;
3748 gint tmask = 0xffffffff;
3749 if (tval & 1) { /* entering thumb mode */
3750 diff = target - 1 - code - 8;
3751 g_assert (thumb_supported);
3752 tbits = 0xf << 28; /* bl->blx bit pattern */
3753 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
3754 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
3758 tmask = ~(1 << 24); /* clear the link bit */
3759 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
3764 if (diff <= 33554431) {
3766 ins = (ins & 0xff000000) | diff;
3768 *code32 = ins | tbits;
3772 /* diff between 0 and -33554432 */
3773 if (diff >= -33554432) {
3775 ins = (ins & 0xff000000) | (diff & ~0xff000000);
3777 *code32 = ins | tbits;
3782 handle_thunk (domain, TRUE, code, target, dyn_code_mp);
3786 #ifdef USE_JUMP_TABLES
3788 gpointer *jte = mono_jumptable_get_entry (code);
3790 jte [0] = (gpointer) target;
3794 * The alternative call sequences looks like this:
3796 * ldr ip, [pc] // loads the address constant
3797 * b 1f // jumps around the constant
3798 * address constant embedded in the code
3803 * There are two cases for patching:
3804 * a) at the end of method emission: in this case code points to the start
3805 * of the call sequence
3806 * b) during runtime patching of the call site: in this case code points
3807 * to the mov pc, ip instruction
3809 * We have to handle also the thunk jump code sequence:
3813 * address constant // execution never reaches here
3815 if ((ins & 0x0ffffff0) == 0x12fff10) {
3816 /* Branch and exchange: the address is constructed in a reg
3817 * We can patch BX when the code sequence is the following:
3818 * ldr ip, [pc, #0] ; 0x8
3825 guint8 *emit = (guint8*)ccode;
3826 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3828 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3829 ARM_BX (emit, ARMREG_IP);
3831 /*patching from magic trampoline*/
3832 if (ins == ccode [3]) {
3833 g_assert (code32 [-4] == ccode [0]);
3834 g_assert (code32 [-3] == ccode [1]);
3835 g_assert (code32 [-1] == ccode [2]);
3836 code32 [-2] = (guint32)target;
3839 /*patching from JIT*/
3840 if (ins == ccode [0]) {
3841 g_assert (code32 [1] == ccode [1]);
3842 g_assert (code32 [3] == ccode [2]);
3843 g_assert (code32 [4] == ccode [3]);
3844 code32 [2] = (guint32)target;
3847 g_assert_not_reached ();
3848 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
3856 guint8 *emit = (guint8*)ccode;
3857 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3859 ARM_BLX_REG (emit, ARMREG_IP);
3861 g_assert (code32 [-3] == ccode [0]);
3862 g_assert (code32 [-2] == ccode [1]);
3863 g_assert (code32 [0] == ccode [2]);
3865 code32 [-1] = (guint32)target;
3868 guint32 *tmp = ccode;
3869 guint8 *emit = (guint8*)tmp;
3870 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
3871 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
3872 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
3873 ARM_BX (emit, ARMREG_IP);
3874 if (ins == ccode [2]) {
3875 g_assert_not_reached (); // should be -2 ...
3876 code32 [-1] = (guint32)target;
3879 if (ins == ccode [0]) {
3880 /* handles both thunk jump code and the far call sequence */
3881 code32 [2] = (guint32)target;
3884 g_assert_not_reached ();
3886 // g_print ("patched with 0x%08x\n", ins);
3891 arm_patch (guchar *code, const guchar *target)
3893 arm_patch_general (NULL, code, target, NULL);
3897 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
3898 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
3899 * to be used with the emit macros.
3900 * Return -1 otherwise.
3903 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
3906 for (i = 0; i < 31; i+= 2) {
3907 res = (val << (32 - i)) | (val >> i);
3910 *rot_amount = i? 32 - i: 0;
3917 * Emits in code a sequence of instructions that load the value 'val'
3918 * into the dreg register. Uses at most 4 instructions.
3921 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
3923 int imm8, rot_amount;
3925 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3926 /* skip the constant pool */
3932 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
3933 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
3934 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
3935 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
3938 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
3940 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
3944 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
3946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3948 if (val & 0xFF0000) {
3949 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3951 if (val & 0xFF000000) {
3952 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3954 } else if (val & 0xFF00) {
3955 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
3956 if (val & 0xFF0000) {
3957 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3959 if (val & 0xFF000000) {
3960 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3962 } else if (val & 0xFF0000) {
3963 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
3964 if (val & 0xFF000000) {
3965 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
3968 //g_assert_not_reached ();
3974 mono_arm_thumb_supported (void)
3976 return thumb_supported;
3982 * emit_load_volatile_arguments:
3984 * Load volatile arguments from the stack to the original input registers.
3985 * Required before a tail call.
3988 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3990 MonoMethod *method = cfg->method;
3991 MonoMethodSignature *sig;
3996 /* FIXME: Generate intermediate code instead */
3998 sig = mono_method_signature (method);
4000 /* This is the opposite of the code in emit_prolog */
4004 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
4006 if (cinfo->vtype_retaddr) {
4007 ArgInfo *ainfo = &cinfo->ret;
4008 inst = cfg->vret_addr;
4009 g_assert (arm_is_imm12 (inst->inst_offset));
4010 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4012 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4013 ArgInfo *ainfo = cinfo->args + i;
4014 inst = cfg->args [pos];
4016 if (cfg->verbose_level > 2)
4017 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
4018 if (inst->opcode == OP_REGVAR) {
4019 if (ainfo->storage == RegTypeGeneral)
4020 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4021 else if (ainfo->storage == RegTypeFP) {
4022 g_assert_not_reached ();
4023 } else if (ainfo->storage == RegTypeBase) {
4027 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4028 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4030 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4031 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4035 g_assert_not_reached ();
4037 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4038 switch (ainfo->size) {
4045 g_assert (arm_is_imm12 (inst->inst_offset));
4046 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4047 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4048 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4051 if (arm_is_imm12 (inst->inst_offset)) {
4052 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4054 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4055 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4059 } else if (ainfo->storage == RegTypeBaseGen) {
4062 } else if (ainfo->storage == RegTypeBase) {
4064 } else if (ainfo->storage == RegTypeFP) {
4065 g_assert_not_reached ();
4066 } else if (ainfo->storage == RegTypeStructByVal) {
4067 int doffset = inst->inst_offset;
4071 if (mono_class_from_mono_type (inst->inst_vtype))
4072 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
4073 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4074 if (arm_is_imm12 (doffset)) {
4075 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4077 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4078 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4080 soffset += sizeof (gpointer);
4081 doffset += sizeof (gpointer);
4086 } else if (ainfo->storage == RegTypeStructByAddr) {
4101 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
4106 guint8 *code = cfg->native_code + cfg->code_len;
4107 MonoInst *last_ins = NULL;
4108 guint last_offset = 0;
4110 int imm8, rot_amount;
4112 /* we don't align basic blocks of loops on arm */
4114 if (cfg->verbose_level > 2)
4115 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
4117 cpos = bb->max_offset;
4119 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
4120 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
4121 //g_assert (!mono_compile_aot);
4124 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
4125 /* this is not thread save, but good enough */
4126 /* fixme: howto handle overflows? */
4127 //x86_inc_mem (code, &cov->data [bb->dfn].count);
4130 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
4131 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4132 (gpointer)"mono_break");
4133 code = emit_call_seq (cfg, code);
4136 MONO_BB_FOR_EACH_INS (bb, ins) {
4137 offset = code - cfg->native_code;
4139 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4141 if (offset > (cfg->code_size - max_len - 16)) {
4142 cfg->code_size *= 2;
4143 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4144 code = cfg->native_code + offset;
4146 // if (ins->cil_code)
4147 // g_print ("cil code\n");
4148 mono_debug_record_line_number (cfg, ins, offset);
4150 switch (ins->opcode) {
4151 case OP_MEMORY_BARRIER:
4153 ARM_MOV_REG_IMM8 (code, ARMREG_R0, 0);
4154 ARM_MCR (code, 15, 0, ARMREG_R0, 7, 10, 5);
4158 #ifdef HAVE_AEABI_READ_TP
4159 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4160 (gpointer)"__aeabi_read_tp");
4161 code = emit_call_seq (cfg, code);
4163 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
4165 g_assert_not_reached ();
4168 case OP_ATOMIC_EXCHANGE_I4:
4169 case OP_ATOMIC_CAS_I4:
4170 case OP_ATOMIC_ADD_I4: {
4174 g_assert (v7_supported);
4177 if (ins->sreg1 != ARMREG_IP && ins->sreg2 != ARMREG_IP && ins->sreg3 != ARMREG_IP)
4179 else if (ins->sreg1 != ARMREG_R0 && ins->sreg2 != ARMREG_R0 && ins->sreg3 != ARMREG_R0)
4181 else if (ins->sreg1 != ARMREG_R1 && ins->sreg2 != ARMREG_R1 && ins->sreg3 != ARMREG_R1)
4185 g_assert (cfg->arch.atomic_tmp_offset != -1);
4186 ARM_STR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4188 switch (ins->opcode) {
4189 case OP_ATOMIC_EXCHANGE_I4:
4191 ARM_DMB (code, ARM_DMB_SY);
4192 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4193 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4194 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4196 ARM_B_COND (code, ARMCOND_NE, 0);
4197 arm_patch (buf [1], buf [0]);
4199 case OP_ATOMIC_CAS_I4:
4200 ARM_DMB (code, ARM_DMB_SY);
4202 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4203 ARM_CMP_REG_REG (code, ARMREG_LR, ins->sreg3);
4205 ARM_B_COND (code, ARMCOND_NE, 0);
4206 ARM_STREX_REG (code, tmpreg, ins->sreg2, ins->sreg1);
4207 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4209 ARM_B_COND (code, ARMCOND_NE, 0);
4210 arm_patch (buf [2], buf [0]);
4211 arm_patch (buf [1], code);
4213 case OP_ATOMIC_ADD_I4:
4215 ARM_DMB (code, ARM_DMB_SY);
4216 ARM_LDREX_REG (code, ARMREG_LR, ins->sreg1);
4217 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->sreg2);
4218 ARM_STREX_REG (code, tmpreg, ARMREG_LR, ins->sreg1);
4219 ARM_CMP_REG_IMM (code, tmpreg, 0, 0);
4221 ARM_B_COND (code, ARMCOND_NE, 0);
4222 arm_patch (buf [1], buf [0]);
4225 g_assert_not_reached ();
4228 ARM_DMB (code, ARM_DMB_SY);
4229 if (tmpreg != ins->dreg)
4230 ARM_LDR_IMM (code, tmpreg, cfg->frame_reg, cfg->arch.atomic_tmp_offset);
4231 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_LR);
4236 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4237 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
4240 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
4241 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
4243 case OP_STOREI1_MEMBASE_IMM:
4244 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
4245 g_assert (arm_is_imm12 (ins->inst_offset));
4246 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4248 case OP_STOREI2_MEMBASE_IMM:
4249 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
4250 g_assert (arm_is_imm8 (ins->inst_offset));
4251 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4253 case OP_STORE_MEMBASE_IMM:
4254 case OP_STOREI4_MEMBASE_IMM:
4255 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
4256 g_assert (arm_is_imm12 (ins->inst_offset));
4257 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
4259 case OP_STOREI1_MEMBASE_REG:
4260 g_assert (arm_is_imm12 (ins->inst_offset));
4261 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4263 case OP_STOREI2_MEMBASE_REG:
4264 g_assert (arm_is_imm8 (ins->inst_offset));
4265 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4267 case OP_STORE_MEMBASE_REG:
4268 case OP_STOREI4_MEMBASE_REG:
4269 /* this case is special, since it happens for spill code after lowering has been called */
4270 if (arm_is_imm12 (ins->inst_offset)) {
4271 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4273 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4274 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
4277 case OP_STOREI1_MEMINDEX:
4278 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4280 case OP_STOREI2_MEMINDEX:
4281 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4283 case OP_STORE_MEMINDEX:
4284 case OP_STOREI4_MEMINDEX:
4285 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
4288 g_assert_not_reached ();
4290 case OP_LOAD_MEMINDEX:
4291 case OP_LOADI4_MEMINDEX:
4292 case OP_LOADU4_MEMINDEX:
4293 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4295 case OP_LOADI1_MEMINDEX:
4296 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4298 case OP_LOADU1_MEMINDEX:
4299 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4301 case OP_LOADI2_MEMINDEX:
4302 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4304 case OP_LOADU2_MEMINDEX:
4305 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
4307 case OP_LOAD_MEMBASE:
4308 case OP_LOADI4_MEMBASE:
4309 case OP_LOADU4_MEMBASE:
4310 /* this case is special, since it happens for spill code after lowering has been called */
4311 if (arm_is_imm12 (ins->inst_offset)) {
4312 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4314 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4315 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
4318 case OP_LOADI1_MEMBASE:
4319 g_assert (arm_is_imm8 (ins->inst_offset));
4320 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4322 case OP_LOADU1_MEMBASE:
4323 g_assert (arm_is_imm12 (ins->inst_offset));
4324 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4326 case OP_LOADU2_MEMBASE:
4327 g_assert (arm_is_imm8 (ins->inst_offset));
4328 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4330 case OP_LOADI2_MEMBASE:
4331 g_assert (arm_is_imm8 (ins->inst_offset));
4332 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4334 case OP_ICONV_TO_I1:
4335 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
4336 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
4338 case OP_ICONV_TO_I2:
4339 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4340 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
4342 case OP_ICONV_TO_U1:
4343 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
4345 case OP_ICONV_TO_U2:
4346 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
4347 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
4351 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
4353 case OP_COMPARE_IMM:
4354 case OP_ICOMPARE_IMM:
4355 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4356 g_assert (imm8 >= 0);
4357 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
4361 * gdb does not like encountering the hw breakpoint ins in the debugged code.
4362 * So instead of emitting a trap, we emit a call a C function and place a
4365 //*(int*)code = 0xef9f0001;
4368 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4369 (gpointer)"mono_break");
4370 code = emit_call_seq (cfg, code);
4372 case OP_RELAXED_NOP:
4377 case OP_DUMMY_STORE:
4378 case OP_DUMMY_ICONST:
4379 case OP_DUMMY_R8CONST:
4380 case OP_NOT_REACHED:
4383 case OP_IL_SEQ_POINT:
4384 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4386 case OP_SEQ_POINT: {
4388 MonoInst *info_var = cfg->arch.seq_point_info_var;
4389 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4390 MonoInst *ss_read_var = cfg->arch.seq_point_read_var;
4391 MonoInst *ss_method_var = cfg->arch.seq_point_ss_method_var;
4392 MonoInst *bp_method_var = cfg->arch.seq_point_bp_method_var;
4394 int dreg = ARMREG_LR;
4396 if (cfg->soft_breakpoints) {
4397 g_assert (!cfg->compile_aot);
4401 * For AOT, we use one got slot per method, which will point to a
4402 * SeqPointInfo structure, containing all the information required
4403 * by the code below.
4405 if (cfg->compile_aot) {
4406 g_assert (info_var);
4407 g_assert (info_var->opcode == OP_REGOFFSET);
4408 g_assert (arm_is_imm12 (info_var->inst_offset));
4411 if (!cfg->soft_breakpoints) {
4413 * Read from the single stepping trigger page. This will cause a
4414 * SIGSEGV when single stepping is enabled.
4415 * We do this _before_ the breakpoint, so single stepping after
4416 * a breakpoint is hit will step to the next IL offset.
4418 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
4421 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
4422 if (cfg->soft_breakpoints) {
4423 /* Load the address of the sequence point trigger variable. */
4426 g_assert (var->opcode == OP_REGOFFSET);
4427 g_assert (arm_is_imm12 (var->inst_offset));
4428 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4430 /* Read the value and check whether it is non-zero. */
4431 ARM_LDR_IMM (code, dreg, dreg, 0);
4432 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4434 /* Load the address of the sequence point method. */
4435 var = ss_method_var;
4437 g_assert (var->opcode == OP_REGOFFSET);
4438 g_assert (arm_is_imm12 (var->inst_offset));
4439 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4441 /* Call it conditionally. */
4442 ARM_BLX_REG_COND (code, ARMCOND_NE, dreg);
4444 if (cfg->compile_aot) {
4445 /* Load the trigger page addr from the variable initialized in the prolog */
4446 var = ss_trigger_page_var;
4448 g_assert (var->opcode == OP_REGOFFSET);
4449 g_assert (arm_is_imm12 (var->inst_offset));
4450 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4452 #ifdef USE_JUMP_TABLES
4453 gpointer *jte = mono_jumptable_add_entry ();
4454 code = mono_arm_load_jumptable_entry (code, jte, dreg);
4455 jte [0] = ss_trigger_page;
4457 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
4459 *(int*)code = (int)ss_trigger_page;
4463 ARM_LDR_IMM (code, dreg, dreg, 0);
4467 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
4469 if (cfg->soft_breakpoints) {
4470 /* Load the address of the breakpoint method into ip. */
4471 var = bp_method_var;
4473 g_assert (var->opcode == OP_REGOFFSET);
4474 g_assert (arm_is_imm12 (var->inst_offset));
4475 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
4478 * A placeholder for a possible breakpoint inserted by
4479 * mono_arch_set_breakpoint ().
4482 } else if (cfg->compile_aot) {
4483 guint32 offset = code - cfg->native_code;
4486 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4487 /* Add the offset */
4488 val = ((offset / 4) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
4489 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
4490 if (arm_is_imm12 ((int)val)) {
4491 ARM_LDR_IMM (code, dreg, dreg, val);
4493 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
4495 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
4497 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
4498 g_assert (!(val & 0xFF000000));
4500 ARM_LDR_IMM (code, dreg, dreg, 0);
4502 /* What is faster, a branch or a load ? */
4503 ARM_CMP_REG_IMM (code, dreg, 0, 0);
4504 /* The breakpoint instruction */
4505 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
4508 * A placeholder for a possible breakpoint inserted by
4509 * mono_arch_set_breakpoint ().
4511 for (i = 0; i < 4; ++i)
4518 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4521 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4525 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4528 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4529 g_assert (imm8 >= 0);
4530 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4534 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4535 g_assert (imm8 >= 0);
4536 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4540 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4541 g_assert (imm8 >= 0);
4542 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4545 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4546 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4548 case OP_IADD_OVF_UN:
4549 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4550 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4553 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4554 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4556 case OP_ISUB_OVF_UN:
4557 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4558 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4560 case OP_ADD_OVF_CARRY:
4561 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4562 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4564 case OP_ADD_OVF_UN_CARRY:
4565 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4566 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4568 case OP_SUB_OVF_CARRY:
4569 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4570 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
4572 case OP_SUB_OVF_UN_CARRY:
4573 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4574 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
4578 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4581 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4582 g_assert (imm8 >= 0);
4583 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4586 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4590 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4594 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4595 g_assert (imm8 >= 0);
4596 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4600 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4601 g_assert (imm8 >= 0);
4602 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4604 case OP_ARM_RSBS_IMM:
4605 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4606 g_assert (imm8 >= 0);
4607 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4609 case OP_ARM_RSC_IMM:
4610 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4611 g_assert (imm8 >= 0);
4612 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4615 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4619 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4620 g_assert (imm8 >= 0);
4621 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4624 g_assert (v7s_supported);
4625 ARM_SDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4628 g_assert (v7s_supported);
4629 ARM_UDIV (code, ins->dreg, ins->sreg1, ins->sreg2);
4632 g_assert (v7s_supported);
4633 ARM_SDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4634 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4637 g_assert (v7s_supported);
4638 ARM_UDIV (code, ARMREG_LR, ins->sreg1, ins->sreg2);
4639 ARM_MLS (code, ins->dreg, ARMREG_LR, ins->sreg2, ins->sreg1);
4643 g_assert_not_reached ();
4645 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4649 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4650 g_assert (imm8 >= 0);
4651 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4654 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4658 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
4659 g_assert (imm8 >= 0);
4660 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
4663 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4668 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4669 else if (ins->dreg != ins->sreg1)
4670 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4673 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4678 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4679 else if (ins->dreg != ins->sreg1)
4680 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4683 case OP_ISHR_UN_IMM:
4685 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
4686 else if (ins->dreg != ins->sreg1)
4687 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4690 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4693 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
4696 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
4699 if (ins->dreg == ins->sreg2)
4700 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4702 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
4705 g_assert_not_reached ();
4708 /* FIXME: handle ovf/ sreg2 != dreg */
4709 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4710 /* FIXME: MUL doesn't set the C/O flags on ARM */
4712 case OP_IMUL_OVF_UN:
4713 /* FIXME: handle ovf/ sreg2 != dreg */
4714 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
4715 /* FIXME: MUL doesn't set the C/O flags on ARM */
4718 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
4721 /* Load the GOT offset */
4722 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
4723 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4725 *(gpointer*)code = NULL;
4727 /* Load the value from the GOT */
4728 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4730 case OP_OBJC_GET_SELECTOR:
4731 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_OBJC_SELECTOR_REF, ins->inst_p0);
4732 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
4734 *(gpointer*)code = NULL;
4736 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
4738 case OP_ICONV_TO_I4:
4739 case OP_ICONV_TO_U4:
4741 if (ins->dreg != ins->sreg1)
4742 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4745 int saved = ins->sreg2;
4746 if (ins->sreg2 == ARM_LSW_REG) {
4747 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
4750 if (ins->sreg1 != ARM_LSW_REG)
4751 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
4752 if (saved != ARM_MSW_REG)
4753 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
4758 ARM_CPYD (code, ins->dreg, ins->sreg1);
4760 case OP_FCONV_TO_R4:
4762 ARM_CVTD (code, ins->dreg, ins->sreg1);
4763 ARM_CVTS (code, ins->dreg, ins->dreg);
4768 * Keep in sync with mono_arch_emit_epilog
4770 g_assert (!cfg->method->save_lmf);
4772 code = emit_load_volatile_arguments (cfg, code);
4774 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4776 if (cfg->used_int_regs)
4777 ARM_POP (code, cfg->used_int_regs);
4778 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4780 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4782 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
4783 if (cfg->compile_aot) {
4784 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4786 *(gpointer*)code = NULL;
4788 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4790 code = mono_arm_patchable_b (code, ARMCOND_AL);
4794 MonoCallInst *call = (MonoCallInst*)ins;
4797 * The stack looks like the following:
4798 * <caller argument area>
4801 * <callee argument area>
4802 * Need to copy the arguments from the callee argument area to
4803 * the caller argument area, and pop the frame.
4805 if (call->stack_usage) {
4806 int i, prev_sp_offset = 0;
4808 /* Compute size of saved registers restored below */
4810 prev_sp_offset = 2 * 4;
4812 prev_sp_offset = 1 * 4;
4813 for (i = 0; i < 16; ++i) {
4814 if (cfg->used_int_regs & (1 << i))
4815 prev_sp_offset += 4;
4818 code = emit_big_add (code, ARMREG_IP, cfg->frame_reg, cfg->stack_usage + prev_sp_offset);
4820 /* Copy arguments on the stack to our argument area */
4821 for (i = 0; i < call->stack_usage; i += sizeof (mgreg_t)) {
4822 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, i);
4823 ARM_STR_IMM (code, ARMREG_LR, ARMREG_IP, i);
4828 * Keep in sync with mono_arch_emit_epilog
4830 g_assert (!cfg->method->save_lmf);
4832 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
4834 if (cfg->used_int_regs)
4835 ARM_POP (code, cfg->used_int_regs);
4836 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
4838 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_LR));
4841 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, call->method);
4842 if (cfg->compile_aot) {
4843 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4845 *(gpointer*)code = NULL;
4847 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
4849 code = mono_arm_patchable_b (code, ARMCOND_AL);
4854 /* ensure ins->sreg1 is not NULL */
4855 ARM_LDRB_IMM (code, ARMREG_LR, ins->sreg1, 0);
4858 g_assert (cfg->sig_cookie < 128);
4859 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4860 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
4869 call = (MonoCallInst*)ins;
4872 code = emit_float_args (cfg, call, code, &max_len, &offset);
4874 if (ins->flags & MONO_INST_HAS_METHOD)
4875 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
4877 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
4878 code = emit_call_seq (cfg, code);
4879 ins->flags |= MONO_INST_GC_CALLSITE;
4880 ins->backend.pc_offset = code - cfg->native_code;
4881 code = emit_move_return_value (cfg, ins, code);
4887 case OP_VOIDCALL_REG:
4890 code = emit_float_args (cfg, (MonoCallInst *)ins, code, &max_len, &offset);
4892 code = emit_call_reg (code, ins->sreg1);
4893 ins->flags |= MONO_INST_GC_CALLSITE;
4894 ins->backend.pc_offset = code - cfg->native_code;
4895 code = emit_move_return_value (cfg, ins, code);
4897 case OP_FCALL_MEMBASE:
4898 case OP_LCALL_MEMBASE:
4899 case OP_VCALL_MEMBASE:
4900 case OP_VCALL2_MEMBASE:
4901 case OP_VOIDCALL_MEMBASE:
4902 case OP_CALL_MEMBASE: {
4903 gboolean imt_arg = FALSE;
4905 g_assert (ins->sreg1 != ARMREG_LR);
4906 call = (MonoCallInst*)ins;
4909 code = emit_float_args (cfg, call, code, &max_len, &offset);
4911 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE)
4913 if (!arm_is_imm12 (ins->inst_offset))
4914 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_offset);
4915 #ifdef USE_JUMP_TABLES
4921 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, LR_BIAS);
4923 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
4925 if (!arm_is_imm12 (ins->inst_offset))
4926 ARM_LDR_REG_REG (code, ARMREG_PC, ins->sreg1, ARMREG_IP);
4928 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
4931 * We can't embed the method in the code stream in PIC code, or
4933 * Instead, we put it in V5 in code emitted by
4934 * mono_arch_emit_imt_argument (), and embed NULL here to
4935 * signal the IMT thunk that the value is in V5.
4937 #ifdef USE_JUMP_TABLES
4938 /* In case of jumptables we always use value in V5. */
4941 if (call->dynamic_imt_arg)
4942 *((gpointer*)code) = NULL;
4944 *((gpointer*)code) = (gpointer)call->method;
4948 ins->flags |= MONO_INST_GC_CALLSITE;
4949 ins->backend.pc_offset = code - cfg->native_code;
4950 code = emit_move_return_value (cfg, ins, code);
4954 /* round the size to 8 bytes */
4955 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
4956 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
4957 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
4958 /* memzero the area: dreg holds the size, sp is the pointer */
4959 if (ins->flags & MONO_INST_INIT) {
4960 guint8 *start_loop, *branch_to_cond;
4961 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
4962 branch_to_cond = code;
4965 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
4966 arm_patch (branch_to_cond, code);
4967 /* decrement by 4 and set flags */
4968 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, sizeof (mgreg_t));
4969 ARM_B_COND (code, ARMCOND_GE, 0);
4970 arm_patch (code - 4, start_loop);
4972 ARM_MOV_REG_REG (code, ins->dreg, ARMREG_SP);
4973 if (cfg->param_area)
4974 code = emit_sub_imm (code, ARMREG_SP, ARMREG_SP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
4979 MonoInst *var = cfg->dyn_call_var;
4981 g_assert (var->opcode == OP_REGOFFSET);
4982 g_assert (arm_is_imm12 (var->inst_offset));
4984 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
4985 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
4987 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
4989 /* Save args buffer */
4990 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
4992 /* Set stack slots using R0 as scratch reg */
4993 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
4994 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
4995 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (mgreg_t));
4996 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (mgreg_t));
4999 /* Set argument registers */
5000 for (i = 0; i < PARAM_REGS; ++i)
5001 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (mgreg_t));
5004 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
5005 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5008 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
5009 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res));
5010 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, MONO_STRUCT_OFFSET (DynCallArgs, res2));
5014 if (ins->sreg1 != ARMREG_R0)
5015 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5016 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5017 (gpointer)"mono_arch_throw_exception");
5018 code = emit_call_seq (cfg, code);
5022 if (ins->sreg1 != ARMREG_R0)
5023 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5024 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
5025 (gpointer)"mono_arch_rethrow_exception");
5026 code = emit_call_seq (cfg, code);
5029 case OP_START_HANDLER: {
5030 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5033 /* Reserve a param area, see filter-stack.exe */
5034 if (cfg->param_area) {
5035 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5036 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5038 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5039 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5043 if (arm_is_imm12 (spvar->inst_offset)) {
5044 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
5046 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5047 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
5051 case OP_ENDFILTER: {
5052 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5055 /* Free the param area */
5056 if (cfg->param_area) {
5057 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5058 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5060 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5061 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5065 if (ins->sreg1 != ARMREG_R0)
5066 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
5067 if (arm_is_imm12 (spvar->inst_offset)) {
5068 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5070 g_assert (ARMREG_IP != spvar->inst_basereg);
5071 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5072 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5074 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5077 case OP_ENDFINALLY: {
5078 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
5081 /* Free the param area */
5082 if (cfg->param_area) {
5083 if ((i = mono_arm_is_rotated_imm8 (cfg->param_area, &rot_amount)) >= 0) {
5084 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5086 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->param_area);
5087 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5091 if (arm_is_imm12 (spvar->inst_offset)) {
5092 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
5094 g_assert (ARMREG_IP != spvar->inst_basereg);
5095 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
5096 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
5098 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
5101 case OP_CALL_HANDLER:
5102 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5103 code = mono_arm_patchable_bl (code, ARMCOND_AL);
5104 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
5107 ins->inst_c0 = code - cfg->native_code;
5110 /*if (ins->inst_target_bb->native_offset) {
5112 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
5114 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
5115 code = mono_arm_patchable_b (code, ARMCOND_AL);
5119 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
5123 * In the normal case we have:
5124 * ldr pc, [pc, ins->sreg1 << 2]
5127 * ldr lr, [pc, ins->sreg1 << 2]
5129 * After follows the data.
5130 * FIXME: add aot support.
5132 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
5133 #ifdef USE_JUMP_TABLES
5135 gpointer *jte = mono_jumptable_add_entries (GPOINTER_TO_INT (ins->klass));
5136 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5137 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_IP, ins->sreg1, ARMSHIFT_LSL, 2);
5141 max_len += 4 * GPOINTER_TO_INT (ins->klass);
5142 if (offset + max_len > (cfg->code_size - 16)) {
5143 cfg->code_size += max_len;
5144 cfg->code_size *= 2;
5145 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5146 code = cfg->native_code + offset;
5148 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
5150 code += 4 * GPOINTER_TO_INT (ins->klass);
5155 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5156 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5160 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5161 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
5165 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5166 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
5170 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5171 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
5175 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
5179 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5180 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5183 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5184 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LT);
5187 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5188 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_GT);
5192 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5193 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_LO);
5195 case OP_COND_EXC_EQ:
5196 case OP_COND_EXC_NE_UN:
5197 case OP_COND_EXC_LT:
5198 case OP_COND_EXC_LT_UN:
5199 case OP_COND_EXC_GT:
5200 case OP_COND_EXC_GT_UN:
5201 case OP_COND_EXC_GE:
5202 case OP_COND_EXC_GE_UN:
5203 case OP_COND_EXC_LE:
5204 case OP_COND_EXC_LE_UN:
5205 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
5207 case OP_COND_EXC_IEQ:
5208 case OP_COND_EXC_INE_UN:
5209 case OP_COND_EXC_ILT:
5210 case OP_COND_EXC_ILT_UN:
5211 case OP_COND_EXC_IGT:
5212 case OP_COND_EXC_IGT_UN:
5213 case OP_COND_EXC_IGE:
5214 case OP_COND_EXC_IGE_UN:
5215 case OP_COND_EXC_ILE:
5216 case OP_COND_EXC_ILE_UN:
5217 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
5220 case OP_COND_EXC_IC:
5221 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
5223 case OP_COND_EXC_OV:
5224 case OP_COND_EXC_IOV:
5225 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
5227 case OP_COND_EXC_NC:
5228 case OP_COND_EXC_INC:
5229 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
5231 case OP_COND_EXC_NO:
5232 case OP_COND_EXC_INO:
5233 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
5245 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
5248 /* floating point opcodes */
5250 if (cfg->compile_aot) {
5251 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
5253 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5255 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
5258 /* FIXME: we can optimize the imm load by dealing with part of
5259 * the displacement in LDFD (aligning to 512).
5261 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5262 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5266 if (cfg->compile_aot) {
5267 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
5269 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
5271 ARM_CVTS (code, ins->dreg, ins->dreg);
5273 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
5274 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
5275 ARM_CVTS (code, ins->dreg, ins->dreg);
5278 case OP_STORER8_MEMBASE_REG:
5279 /* This is generated by the local regalloc pass which runs after the lowering pass */
5280 if (!arm_is_fpimm8 (ins->inst_offset)) {
5281 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5282 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
5283 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
5285 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
5288 case OP_LOADR8_MEMBASE:
5289 /* This is generated by the local regalloc pass which runs after the lowering pass */
5290 if (!arm_is_fpimm8 (ins->inst_offset)) {
5291 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5292 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
5293 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
5295 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
5298 case OP_STORER4_MEMBASE_REG:
5299 g_assert (arm_is_fpimm8 (ins->inst_offset));
5300 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5301 ARM_CVTD (code, vfp_scratch1, ins->sreg1);
5302 ARM_FSTS (code, vfp_scratch1, ins->inst_destbasereg, ins->inst_offset);
5303 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5305 case OP_LOADR4_MEMBASE:
5306 g_assert (arm_is_fpimm8 (ins->inst_offset));
5307 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5308 ARM_FLDS (code, vfp_scratch1, ins->inst_basereg, ins->inst_offset);
5309 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5310 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5312 case OP_ICONV_TO_R_UN: {
5313 g_assert_not_reached ();
5316 case OP_ICONV_TO_R4:
5317 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5318 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5319 ARM_FSITOS (code, vfp_scratch1, vfp_scratch1);
5320 ARM_CVTS (code, ins->dreg, vfp_scratch1);
5321 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5323 case OP_ICONV_TO_R8:
5324 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5325 ARM_FMSR (code, vfp_scratch1, ins->sreg1);
5326 ARM_FSITOD (code, ins->dreg, vfp_scratch1);
5327 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5331 MonoType *sig_ret = mini_type_get_underlying_type (NULL, mono_method_signature (cfg->method)->ret);
5332 if (sig_ret->type == MONO_TYPE_R4) {
5333 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
5335 if (!IS_HARD_FLOAT) {
5336 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
5339 if (IS_HARD_FLOAT) {
5340 ARM_CPYD (code, ARM_VFP_D0, ins->sreg1);
5342 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
5347 case OP_FCONV_TO_I1:
5348 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
5350 case OP_FCONV_TO_U1:
5351 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
5353 case OP_FCONV_TO_I2:
5354 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
5356 case OP_FCONV_TO_U2:
5357 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
5359 case OP_FCONV_TO_I4:
5361 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
5363 case OP_FCONV_TO_U4:
5365 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
5367 case OP_FCONV_TO_I8:
5368 case OP_FCONV_TO_U8:
5369 g_assert_not_reached ();
5370 /* Implemented as helper calls */
5372 case OP_LCONV_TO_R_UN:
5373 g_assert_not_reached ();
5374 /* Implemented as helper calls */
5376 case OP_LCONV_TO_OVF_I4_2: {
5377 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
5379 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
5382 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
5383 high_bit_not_set = code;
5384 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
5386 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
5387 valid_negative = code;
5388 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
5389 invalid_negative = code;
5390 ARM_B_COND (code, ARMCOND_AL, 0);
5392 arm_patch (high_bit_not_set, code);
5394 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
5395 valid_positive = code;
5396 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
5398 arm_patch (invalid_negative, code);
5399 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
5401 arm_patch (valid_negative, code);
5402 arm_patch (valid_positive, code);
5404 if (ins->dreg != ins->sreg1)
5405 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
5409 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
5412 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
5415 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
5418 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
5421 ARM_NEGD (code, ins->dreg, ins->sreg1);
5425 g_assert_not_reached ();
5429 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5435 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5438 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
5439 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
5443 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5446 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5447 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5451 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5454 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5455 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5456 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5460 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5463 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5464 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5468 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5471 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
5472 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
5473 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
5477 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5480 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_NE);
5481 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_EQ);
5485 ARM_CMPD (code, ins->sreg1, ins->sreg2);
5488 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5489 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5493 ARM_CMPD (code, ins->sreg2, ins->sreg1);
5496 ARM_MOV_REG_IMM8 (code, ins->dreg, 1);
5497 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_MI);
5500 /* ARM FPA flags table:
5501 * N Less than ARMCOND_MI
5502 * Z Equal ARMCOND_EQ
5503 * C Greater Than or Equal ARMCOND_CS
5504 * V Unordered ARMCOND_VS
5507 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
5510 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
5513 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5516 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5517 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
5523 g_assert_not_reached ();
5527 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5529 /* FPA requires EQ even thou the docs suggests that just CS is enough */
5530 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
5531 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
5535 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
5536 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
5541 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch1);
5542 code = mono_arm_emit_vfp_scratch_save (cfg, code, vfp_scratch2);
5544 #ifdef USE_JUMP_TABLES
5546 gpointer *jte = mono_jumptable_add_entries (2);
5547 jte [0] = GUINT_TO_POINTER (0xffffffff);
5548 jte [1] = GUINT_TO_POINTER (0x7fefffff);
5549 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_IP);
5550 ARM_FLDD (code, vfp_scratch1, ARMREG_IP, 0);
5553 ARM_ABSD (code, vfp_scratch2, ins->sreg1);
5554 ARM_FLDD (code, vfp_scratch1, ARMREG_PC, 0);
5556 *(guint32*)code = 0xffffffff;
5558 *(guint32*)code = 0x7fefffff;
5561 ARM_CMPD (code, vfp_scratch2, vfp_scratch1);
5563 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
5564 ARM_CMPD (code, ins->sreg1, ins->sreg1);
5566 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
5567 ARM_CPYD (code, ins->dreg, ins->sreg1);
5569 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch1);
5570 code = mono_arm_emit_vfp_scratch_restore (cfg, code, vfp_scratch2);
5575 case OP_GC_LIVENESS_DEF:
5576 case OP_GC_LIVENESS_USE:
5577 case OP_GC_PARAM_SLOT_LIVENESS_DEF:
5578 ins->backend.pc_offset = code - cfg->native_code;
5580 case OP_GC_SPILL_SLOT_LIVENESS_DEF:
5581 ins->backend.pc_offset = code - cfg->native_code;
5582 bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
5586 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
5587 g_assert_not_reached ();
5590 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
5591 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
5592 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
5593 g_assert_not_reached ();
5599 last_offset = offset;
5602 cfg->code_len = code - cfg->native_code;
5605 #endif /* DISABLE_JIT */
5607 #ifdef HAVE_AEABI_READ_TP
5608 void __aeabi_read_tp (void);
5612 mono_arch_register_lowlevel_calls (void)
5614 /* The signature doesn't matter */
5615 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
5616 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
5618 #ifndef MONO_CROSS_COMPILE
5619 #ifdef HAVE_AEABI_READ_TP
5620 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
5625 #define patch_lis_ori(ip,val) do {\
5626 guint16 *__lis_ori = (guint16*)(ip); \
5627 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
5628 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
5632 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
5634 MonoJumpInfo *patch_info;
5635 gboolean compile_aot = !run_cctors;
5637 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
5638 unsigned char *ip = patch_info->ip.i + code;
5639 const unsigned char *target;
5641 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
5642 #ifdef USE_JUMP_TABLES
5643 gpointer *jt = mono_jumptable_get_entry (ip);
5645 gpointer *jt = (gpointer*)(ip + 8);
5648 /* jt is the inlined jump table, 2 instructions after ip
5649 * In the normal case we store the absolute addresses,
5650 * otherwise the displacements.
5652 for (i = 0; i < patch_info->data.table->table_size; i++)
5653 jt [i] = code + (int)patch_info->data.table->table [i];
5658 switch (patch_info->type) {
5659 case MONO_PATCH_INFO_BB:
5660 case MONO_PATCH_INFO_LABEL:
5663 /* No need to patch these */
5668 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
5670 switch (patch_info->type) {
5671 case MONO_PATCH_INFO_IP:
5672 g_assert_not_reached ();
5673 patch_lis_ori (ip, ip);
5675 case MONO_PATCH_INFO_METHOD_REL:
5676 g_assert_not_reached ();
5677 *((gpointer *)(ip)) = code + patch_info->data.offset;
5679 case MONO_PATCH_INFO_METHODCONST:
5680 case MONO_PATCH_INFO_CLASS:
5681 case MONO_PATCH_INFO_IMAGE:
5682 case MONO_PATCH_INFO_FIELD:
5683 case MONO_PATCH_INFO_VTABLE:
5684 case MONO_PATCH_INFO_IID:
5685 case MONO_PATCH_INFO_SFLDA:
5686 case MONO_PATCH_INFO_LDSTR:
5687 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
5688 case MONO_PATCH_INFO_LDTOKEN:
5689 g_assert_not_reached ();
5690 /* from OP_AOTCONST : lis + ori */
5691 patch_lis_ori (ip, target);
5693 case MONO_PATCH_INFO_R4:
5694 case MONO_PATCH_INFO_R8:
5695 g_assert_not_reached ();
5696 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
5698 case MONO_PATCH_INFO_EXC_NAME:
5699 g_assert_not_reached ();
5700 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
5702 case MONO_PATCH_INFO_NONE:
5703 case MONO_PATCH_INFO_BB_OVF:
5704 case MONO_PATCH_INFO_EXC_OVF:
5705 /* everything is dealt with at epilog output time */
5710 arm_patch_general (domain, ip, target, dyn_code_mp);
5717 * Stack frame layout:
5719 * ------------------- fp
5720 * MonoLMF structure or saved registers
5721 * -------------------
5723 * -------------------
5725 * -------------------
5726 * optional 8 bytes for tracing
5727 * -------------------
5728 * param area size is cfg->param_area
5729 * ------------------- sp
5732 mono_arch_emit_prolog (MonoCompile *cfg)
5734 MonoMethod *method = cfg->method;
5736 MonoMethodSignature *sig;
5738 int alloc_size, orig_alloc_size, pos, max_offset, i, rot_amount;
5743 int prev_sp_offset, reg_offset;
5745 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
5748 sig = mono_method_signature (method);
5749 cfg->code_size = 256 + sig->param_count * 64;
5750 code = cfg->native_code = g_malloc (cfg->code_size);
5752 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
5754 alloc_size = cfg->stack_offset;
5760 * The iphone uses R7 as the frame pointer, and it points at the saved
5765 * We can't use r7 as a frame pointer since it points into the middle of
5766 * the frame, so we keep using our own frame pointer.
5767 * FIXME: Optimize this.
5769 ARM_PUSH (code, (1 << ARMREG_R7) | (1 << ARMREG_LR));
5770 ARM_MOV_REG_REG (code, ARMREG_R7, ARMREG_SP);
5771 prev_sp_offset += 8; /* r7 and lr */
5772 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5773 mono_emit_unwind_op_offset (cfg, code, ARMREG_R7, (- prev_sp_offset) + 0);
5776 if (!method->save_lmf) {
5778 /* No need to push LR again */
5779 if (cfg->used_int_regs)
5780 ARM_PUSH (code, cfg->used_int_regs);
5782 ARM_PUSH (code, cfg->used_int_regs | (1 << ARMREG_LR));
5783 prev_sp_offset += 4;
5785 for (i = 0; i < 16; ++i) {
5786 if (cfg->used_int_regs & (1 << i))
5787 prev_sp_offset += 4;
5789 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5791 for (i = 0; i < 16; ++i) {
5792 if ((cfg->used_int_regs & (1 << i))) {
5793 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5794 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + reg_offset, SLOT_NOREF);
5799 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5800 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5802 mono_emit_unwind_op_offset (cfg, code, ARMREG_LR, -4);
5803 mini_gc_set_slot_type_from_cfa (cfg, -4, SLOT_NOREF);
5806 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
5807 ARM_PUSH (code, 0x5ff0);
5808 prev_sp_offset += 4 * 10; /* all but r0-r3, sp and pc */
5809 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
5811 for (i = 0; i < 16; ++i) {
5812 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
5813 /* The original r7 is saved at the start */
5814 if (!(iphone_abi && i == ARMREG_R7))
5815 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
5819 g_assert (reg_offset == 4 * 10);
5820 pos += sizeof (MonoLMF) - (4 * 10);
5824 orig_alloc_size = alloc_size;
5825 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
5826 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
5827 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
5828 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
5831 /* the stack used in the pushed regs */
5832 if (prev_sp_offset & 4)
5834 cfg->stack_usage = alloc_size;
5836 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
5837 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
5839 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
5840 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
5842 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
5844 if (cfg->frame_reg != ARMREG_SP) {
5845 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
5846 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
5848 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
5849 prev_sp_offset += alloc_size;
5851 for (i = 0; i < alloc_size - orig_alloc_size; i += 4)
5852 mini_gc_set_slot_type_from_cfa (cfg, (- prev_sp_offset) + orig_alloc_size + i, SLOT_NOREF);
5854 /* compute max_offset in order to use short forward jumps
5855 * we could skip do it on arm because the immediate displacement
5856 * for jumps is large enough, it may be useful later for constant pools
5859 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
5860 MonoInst *ins = bb->code;
5861 bb->max_offset = max_offset;
5863 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
5866 MONO_BB_FOR_EACH_INS (bb, ins)
5867 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
5870 /* store runtime generic context */
5871 if (cfg->rgctx_var) {
5872 MonoInst *ins = cfg->rgctx_var;
5874 g_assert (ins->opcode == OP_REGOFFSET);
5876 if (arm_is_imm12 (ins->inst_offset)) {
5877 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
5879 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
5880 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
5884 /* load arguments allocated to register from the stack */
5887 cinfo = get_call_info (cfg->generic_sharing_context, NULL, sig);
5889 if (cinfo->vtype_retaddr) {
5890 ArgInfo *ainfo = &cinfo->ret;
5891 inst = cfg->vret_addr;
5892 g_assert (arm_is_imm12 (inst->inst_offset));
5893 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5896 if (sig->call_convention == MONO_CALL_VARARG) {
5897 ArgInfo *cookie = &cinfo->sig_cookie;
5899 /* Save the sig cookie address */
5900 g_assert (cookie->storage == RegTypeBase);
5902 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
5903 g_assert (arm_is_imm12 (cfg->sig_cookie));
5904 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
5905 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
5908 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
5909 ArgInfo *ainfo = cinfo->args + i;
5910 inst = cfg->args [pos];
5912 if (cfg->verbose_level > 2)
5913 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
5914 if (inst->opcode == OP_REGVAR) {
5915 if (ainfo->storage == RegTypeGeneral)
5916 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
5917 else if (ainfo->storage == RegTypeFP) {
5918 g_assert_not_reached ();
5919 } else if (ainfo->storage == RegTypeBase) {
5920 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5921 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5923 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5924 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
5927 g_assert_not_reached ();
5929 if (cfg->verbose_level > 2)
5930 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
5932 /* the argument should be put on the stack: FIXME handle size != word */
5933 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeGSharedVtInReg) {
5934 switch (ainfo->size) {
5936 if (arm_is_imm12 (inst->inst_offset))
5937 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5939 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5940 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5944 if (arm_is_imm8 (inst->inst_offset)) {
5945 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5947 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5948 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5952 if (arm_is_imm12 (inst->inst_offset)) {
5953 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5955 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5956 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5958 if (arm_is_imm12 (inst->inst_offset + 4)) {
5959 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
5961 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5962 ARM_STR_REG_REG (code, ainfo->reg + 1, inst->inst_basereg, ARMREG_IP);
5966 if (arm_is_imm12 (inst->inst_offset)) {
5967 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
5969 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5970 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
5974 } else if (ainfo->storage == RegTypeBaseGen) {
5975 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5976 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5978 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5979 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5981 if (arm_is_imm12 (inst->inst_offset + 4)) {
5982 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
5983 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
5985 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
5986 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
5987 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
5988 ARM_STR_REG_REG (code, ARMREG_R3, inst->inst_basereg, ARMREG_IP);
5990 } else if (ainfo->storage == RegTypeBase || ainfo->storage == RegTypeGSharedVtOnStack) {
5991 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
5992 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
5994 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
5995 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
5998 switch (ainfo->size) {
6000 if (arm_is_imm8 (inst->inst_offset)) {
6001 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6003 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6004 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6008 if (arm_is_imm8 (inst->inst_offset)) {
6009 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6011 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6012 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6016 if (arm_is_imm12 (inst->inst_offset)) {
6017 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6019 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6020 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6022 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
6023 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
6025 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
6026 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
6028 if (arm_is_imm12 (inst->inst_offset + 4)) {
6029 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
6031 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
6032 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6036 if (arm_is_imm12 (inst->inst_offset)) {
6037 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
6039 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6040 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
6044 } else if (ainfo->storage == RegTypeFP) {
6045 int imm8, rot_amount;
6047 if ((imm8 = mono_arm_is_rotated_imm8 (inst->inst_offset, &rot_amount)) == -1) {
6048 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
6049 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
6051 ARM_ADD_REG_IMM (code, ARMREG_IP, inst->inst_basereg, imm8, rot_amount);
6053 if (ainfo->size == 8)
6054 ARM_FSTD (code, ainfo->reg, ARMREG_IP, 0);
6056 ARM_FSTS (code, ainfo->reg, ARMREG_IP, 0);
6057 } else if (ainfo->storage == RegTypeStructByVal) {
6058 int doffset = inst->inst_offset;
6062 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
6063 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
6064 if (arm_is_imm12 (doffset)) {
6065 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
6067 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
6068 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
6070 soffset += sizeof (gpointer);
6071 doffset += sizeof (gpointer);
6073 if (ainfo->vtsize) {
6074 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6075 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
6076 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
6078 } else if (ainfo->storage == RegTypeStructByAddr) {
6079 g_assert_not_reached ();
6080 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
6081 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
6083 g_assert_not_reached ();
6088 if (method->save_lmf)
6089 code = emit_save_lmf (cfg, code, alloc_size - lmf_offset);
6092 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
6094 if (cfg->arch.seq_point_info_var) {
6095 MonoInst *ins = cfg->arch.seq_point_info_var;
6097 /* Initialize the variable from a GOT slot */
6098 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
6099 #ifdef USE_JUMP_TABLES
6101 gpointer *jte = mono_jumptable_add_entry ();
6102 code = mono_arm_load_jumptable_entry (code, jte, ARMREG_IP);
6103 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_IP, 0);
6105 /** XXX: is it correct? */
6107 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6109 *(gpointer*)code = NULL;
6112 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
6114 g_assert (ins->opcode == OP_REGOFFSET);
6116 if (arm_is_imm12 (ins->inst_offset)) {
6117 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6119 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6120 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6124 /* Initialize ss_trigger_page_var */
6125 if (!cfg->soft_breakpoints) {
6126 MonoInst *info_var = cfg->arch.seq_point_info_var;
6127 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
6128 int dreg = ARMREG_LR;
6131 g_assert (info_var->opcode == OP_REGOFFSET);
6132 g_assert (arm_is_imm12 (info_var->inst_offset));
6134 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
6135 /* Load the trigger page addr */
6136 ARM_LDR_IMM (code, dreg, dreg, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
6137 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
6141 if (cfg->arch.seq_point_read_var) {
6142 MonoInst *read_ins = cfg->arch.seq_point_read_var;
6143 MonoInst *ss_method_ins = cfg->arch.seq_point_ss_method_var;
6144 MonoInst *bp_method_ins = cfg->arch.seq_point_bp_method_var;
6145 #ifdef USE_JUMP_TABLES
6148 g_assert (read_ins->opcode == OP_REGOFFSET);
6149 g_assert (arm_is_imm12 (read_ins->inst_offset));
6150 g_assert (ss_method_ins->opcode == OP_REGOFFSET);
6151 g_assert (arm_is_imm12 (ss_method_ins->inst_offset));
6152 g_assert (bp_method_ins->opcode == OP_REGOFFSET);
6153 g_assert (arm_is_imm12 (bp_method_ins->inst_offset));
6155 #ifdef USE_JUMP_TABLES
6156 jte = mono_jumptable_add_entries (3);
6157 jte [0] = (gpointer)&ss_trigger_var;
6158 jte [1] = single_step_func_wrapper;
6159 jte [2] = breakpoint_func_wrapper;
6160 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_LR);
6162 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
6164 *(volatile int **)code = &ss_trigger_var;
6166 *(gpointer*)code = single_step_func_wrapper;
6168 *(gpointer*)code = breakpoint_func_wrapper;
6172 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 0);
6173 ARM_STR_IMM (code, ARMREG_IP, read_ins->inst_basereg, read_ins->inst_offset);
6174 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 4);
6175 ARM_STR_IMM (code, ARMREG_IP, ss_method_ins->inst_basereg, ss_method_ins->inst_offset);
6176 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_LR, 8);
6177 ARM_STR_IMM (code, ARMREG_IP, bp_method_ins->inst_basereg, bp_method_ins->inst_offset);
6180 cfg->code_len = code - cfg->native_code;
6181 g_assert (cfg->code_len < cfg->code_size);
6188 mono_arch_emit_epilog (MonoCompile *cfg)
6190 MonoMethod *method = cfg->method;
6191 int pos, i, rot_amount;
6192 int max_epilog_size = 16 + 20*4;
6196 if (cfg->method->save_lmf)
6197 max_epilog_size += 128;
6199 if (mono_jit_trace_calls != NULL)
6200 max_epilog_size += 50;
6202 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
6203 max_epilog_size += 50;
6205 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6206 cfg->code_size *= 2;
6207 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6208 cfg->stat_code_reallocs++;
6212 * Keep in sync with OP_JMP
6214 code = cfg->native_code + cfg->code_len;
6216 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
6217 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
6221 /* Load returned vtypes into registers if needed */
6222 cinfo = cfg->arch.cinfo;
6223 if (cinfo->ret.storage == RegTypeStructByVal) {
6224 MonoInst *ins = cfg->ret;
6226 if (arm_is_imm12 (ins->inst_offset)) {
6227 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
6229 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
6230 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
6234 if (method->save_lmf) {
6235 int lmf_offset, reg, sp_adj, regmask;
6236 /* all but r0-r3, sp and pc */
6237 pos += sizeof (MonoLMF) - (MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6240 code = emit_restore_lmf (cfg, code, cfg->stack_usage - lmf_offset);
6242 /* This points to r4 inside MonoLMF->iregs */
6243 sp_adj = (sizeof (MonoLMF) - MONO_ARM_NUM_SAVED_REGS * sizeof (mgreg_t));
6245 regmask = 0x9ff0; /* restore lr to pc */
6246 /* Skip caller saved registers not used by the method */
6247 while (!(cfg->used_int_regs & (1 << reg)) && reg < ARMREG_FP) {
6248 regmask &= ~(1 << reg);
6253 /* Restored later */
6254 regmask &= ~(1 << ARMREG_PC);
6255 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
6256 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage - lmf_offset + sp_adj);
6258 ARM_POP (code, regmask);
6260 /* Restore saved r7, restore LR to PC */
6261 /* Skip lr from the lmf */
6262 ARM_ADD_REG_IMM (code, ARMREG_SP, ARMREG_SP, sizeof (gpointer), 0);
6263 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6266 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
6267 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
6269 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
6270 ARM_ADD_REG_REG (code, ARMREG_SP, cfg->frame_reg, ARMREG_IP);
6274 /* Restore saved gregs */
6275 if (cfg->used_int_regs)
6276 ARM_POP (code, cfg->used_int_regs);
6277 /* Restore saved r7, restore LR to PC */
6278 ARM_POP (code, (1 << ARMREG_R7) | (1 << ARMREG_PC));
6280 ARM_POP (code, cfg->used_int_regs | (1 << ARMREG_PC));
6284 cfg->code_len = code - cfg->native_code;
6286 g_assert (cfg->code_len < cfg->code_size);
6291 mono_arch_emit_exceptions (MonoCompile *cfg)
6293 MonoJumpInfo *patch_info;
6296 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
6297 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
6298 int max_epilog_size = 50;
6300 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
6301 exc_throw_pos [i] = NULL;
6302 exc_throw_found [i] = 0;
6305 /* count the number of exception infos */
6308 * make sure we have enough space for exceptions
6310 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6311 if (patch_info->type == MONO_PATCH_INFO_EXC) {
6312 i = mini_exception_id_by_name (patch_info->data.target);
6313 if (!exc_throw_found [i]) {
6314 max_epilog_size += 32;
6315 exc_throw_found [i] = TRUE;
6320 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
6321 cfg->code_size *= 2;
6322 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
6323 cfg->stat_code_reallocs++;
6326 code = cfg->native_code + cfg->code_len;
6328 /* add code to raise exceptions */
6329 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
6330 switch (patch_info->type) {
6331 case MONO_PATCH_INFO_EXC: {
6332 MonoClass *exc_class;
6333 unsigned char *ip = patch_info->ip.i + cfg->native_code;
6335 i = mini_exception_id_by_name (patch_info->data.target);
6336 if (exc_throw_pos [i]) {
6337 arm_patch (ip, exc_throw_pos [i]);
6338 patch_info->type = MONO_PATCH_INFO_NONE;
6341 exc_throw_pos [i] = code;
6343 arm_patch (ip, code);
6345 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
6346 g_assert (exc_class);
6348 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
6349 #ifdef USE_JUMP_TABLES
6351 gpointer *jte = mono_jumptable_add_entries (2);
6352 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6353 patch_info->data.name = "mono_arch_throw_corlib_exception";
6354 patch_info->ip.i = code - cfg->native_code;
6355 code = mono_arm_load_jumptable_entry_addr (code, jte, ARMREG_R0);
6356 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, 0);
6357 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, 4);
6358 ARM_BLX_REG (code, ARMREG_IP);
6359 jte [1] = GUINT_TO_POINTER (exc_class->type_token);
6362 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
6363 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
6364 patch_info->data.name = "mono_arch_throw_corlib_exception";
6365 patch_info->ip.i = code - cfg->native_code;
6367 *(guint32*)(gpointer)code = exc_class->type_token;
6378 cfg->code_len = code - cfg->native_code;
6380 g_assert (cfg->code_len < cfg->code_size);
6384 #endif /* #ifndef DISABLE_JIT */
6387 mono_arch_finish_init (void)
6392 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
6397 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
6404 mono_arch_print_tree (MonoInst *tree, int arity)
6414 mono_arch_get_patch_offset (guint8 *code)
6421 mono_arch_flush_register_windows (void)
6428 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
6430 int method_reg = mono_alloc_ireg (cfg);
6431 #ifdef USE_JUMP_TABLES
6432 int use_jumptables = TRUE;
6434 int use_jumptables = FALSE;
6437 if (cfg->compile_aot) {
6440 call->dynamic_imt_arg = TRUE;
6443 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6445 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
6446 ins->dreg = method_reg;
6447 ins->inst_p0 = call->method;
6448 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
6449 MONO_ADD_INS (cfg->cbb, ins);
6451 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6452 } else if (cfg->generic_context || imt_arg || mono_use_llvm || use_jumptables) {
6453 /* Always pass in a register for simplicity */
6454 call->dynamic_imt_arg = TRUE;
6456 cfg->uses_rgctx_reg = TRUE;
6459 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, method_reg, imt_arg->dreg);
6463 MONO_INST_NEW (cfg, ins, OP_PCONST);
6464 ins->inst_p0 = call->method;
6465 ins->dreg = method_reg;
6466 MONO_ADD_INS (cfg->cbb, ins);
6469 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
6473 #endif /* DISABLE_JIT */
6476 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
6478 #ifdef USE_JUMP_TABLES
6479 return (MonoMethod*)regs [ARMREG_V5];
6482 guint32 *code_ptr = (guint32*)code;
6484 method = GUINT_TO_POINTER (code_ptr [1]);
6488 return (MonoMethod*)regs [ARMREG_V5];
6490 /* The IMT value is stored in the code stream right after the LDC instruction. */
6491 /* This is no longer true for the gsharedvt_in trampoline */
6493 if (!IS_LDR_PC (code_ptr [0])) {
6494 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
6495 g_assert (IS_LDR_PC (code_ptr [0]));
6499 /* This is AOTed code, or the gsharedvt trampoline, the IMT method is in V5 */
6500 return (MonoMethod*)regs [ARMREG_V5];
6502 return (MonoMethod*) method;
6507 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
6509 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
6512 /* #define ENABLE_WRONG_METHOD_CHECK 1 */
6513 #define BASE_SIZE (6 * 4)
6514 #define BSEARCH_ENTRY_SIZE (4 * 4)
6515 #define CMP_SIZE (3 * 4)
6516 #define BRANCH_SIZE (1 * 4)
6517 #define CALL_SIZE (2 * 4)
6518 #define WMC_SIZE (8 * 4)
6519 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
6521 #ifdef USE_JUMP_TABLES
6523 set_jumptable_element (gpointer *base, guint32 index, gpointer value)
6525 g_assert (base [index] == NULL);
6526 base [index] = value;
6529 load_element_with_regbase_cond (arminstr_t *code, ARMReg dreg, ARMReg base, guint32 jti, int cond)
6531 if (arm_is_imm12 (jti * 4)) {
6532 ARM_LDR_IMM_COND (code, dreg, base, jti * 4, cond);
6534 ARM_MOVW_REG_IMM_COND (code, dreg, (jti * 4) & 0xffff, cond);
6535 if ((jti * 4) >> 16)
6536 ARM_MOVT_REG_IMM_COND (code, dreg, ((jti * 4) >> 16) & 0xffff, cond);
6537 ARM_LDR_REG_REG_SHIFT_COND (code, dreg, base, dreg, ARMSHIFT_LSL, 0, cond);
6543 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
6545 guint32 delta = DISTANCE (target, code);
6547 g_assert (delta >= 0 && delta <= 0xFFF);
6548 *target = *target | delta;
6554 #ifdef ENABLE_WRONG_METHOD_CHECK
6556 mini_dump_bad_imt (int input_imt, int compared_imt, int pc)
6558 g_print ("BAD IMT comparing %x with expected %x at ip %x", input_imt, compared_imt, pc);
6564 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
6565 gpointer fail_tramp)
6568 arminstr_t *code, *start;
6569 #ifdef USE_JUMP_TABLES
6572 gboolean large_offsets = FALSE;
6573 guint32 **constant_pool_starts;
6574 arminstr_t *vtable_target = NULL;
6575 int extra_space = 0;
6577 #ifdef ENABLE_WRONG_METHOD_CHECK
6582 #ifdef USE_JUMP_TABLES
6583 for (i = 0; i < count; ++i) {
6584 MonoIMTCheckItem *item = imt_entries [i];
6585 item->chunk_size += 4 * 16;
6586 if (!item->is_equals)
6587 imt_entries [item->check_target_idx]->compare_done = TRUE;
6588 size += item->chunk_size;
6591 constant_pool_starts = g_new0 (guint32*, count);
6593 for (i = 0; i < count; ++i) {
6594 MonoIMTCheckItem *item = imt_entries [i];
6595 if (item->is_equals) {
6596 gboolean fail_case = !item->check_target_idx && fail_tramp;
6598 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
6599 item->chunk_size += 32;
6600 large_offsets = TRUE;
6603 if (item->check_target_idx || fail_case) {
6604 if (!item->compare_done || fail_case)
6605 item->chunk_size += CMP_SIZE;
6606 item->chunk_size += BRANCH_SIZE;
6608 #ifdef ENABLE_WRONG_METHOD_CHECK
6609 item->chunk_size += WMC_SIZE;
6613 item->chunk_size += 16;
6614 large_offsets = TRUE;
6616 item->chunk_size += CALL_SIZE;
6618 item->chunk_size += BSEARCH_ENTRY_SIZE;
6619 imt_entries [item->check_target_idx]->compare_done = TRUE;
6621 size += item->chunk_size;
6625 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
6629 code = mono_method_alloc_generic_virtual_thunk (domain, size);
6631 code = mono_domain_code_reserve (domain, size);
6635 g_print ("Building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p fail_tramp %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable, fail_tramp);
6636 for (i = 0; i < count; ++i) {
6637 MonoIMTCheckItem *item = imt_entries [i];
6638 g_print ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, ((MonoMethod*)item->key)->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
6642 #ifdef USE_JUMP_TABLES
6643 ARM_PUSH3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6644 /* If jumptables we always pass the IMT method in R5 */
6645 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6646 #define VTABLE_JTI 0
6647 #define IMT_METHOD_OFFSET 0
6648 #define TARGET_CODE_OFFSET 1
6649 #define JUMP_CODE_OFFSET 2
6650 #define RECORDS_PER_ENTRY 3
6651 #define IMT_METHOD_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + IMT_METHOD_OFFSET)
6652 #define TARGET_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + TARGET_CODE_OFFSET)
6653 #define JUMP_CODE_JTI(idx) (1 + idx * RECORDS_PER_ENTRY + JUMP_CODE_OFFSET)
6655 jte = mono_jumptable_add_entries (RECORDS_PER_ENTRY * count + 1 /* vtable */);
6656 code = (arminstr_t *) mono_arm_load_jumptable_entry_addr ((guint8 *) code, jte, ARMREG_R2);
6657 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, VTABLE_JTI);
6658 set_jumptable_element (jte, VTABLE_JTI, vtable);
6661 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6663 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
6664 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
6665 vtable_target = code;
6666 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
6668 if (mono_use_llvm) {
6669 /* LLVM always passes the IMT method in R5 */
6670 ARM_MOV_REG_REG (code, ARMREG_R0, ARMREG_V5);
6672 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
6673 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
6674 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
6678 for (i = 0; i < count; ++i) {
6679 MonoIMTCheckItem *item = imt_entries [i];
6680 #ifdef USE_JUMP_TABLES
6681 guint32 imt_method_jti = 0, target_code_jti = 0;
6683 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
6685 gint32 vtable_offset;
6687 item->code_target = (guint8*)code;
6689 if (item->is_equals) {
6690 gboolean fail_case = !item->check_target_idx && fail_tramp;
6692 if (item->check_target_idx || fail_case) {
6693 if (!item->compare_done || fail_case) {
6694 #ifdef USE_JUMP_TABLES
6695 imt_method_jti = IMT_METHOD_JTI (i);
6696 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6699 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6701 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6703 #ifdef USE_JUMP_TABLES
6704 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_NE);
6705 ARM_BX_COND (code, ARMCOND_NE, ARMREG_R1);
6706 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6708 item->jmp_code = (guint8*)code;
6709 ARM_B_COND (code, ARMCOND_NE, 0);
6712 /*Enable the commented code to assert on wrong method*/
6713 #ifdef ENABLE_WRONG_METHOD_CHECK
6714 #ifdef USE_JUMP_TABLES
6715 imt_method_jti = IMT_METHOD_JTI (i);
6716 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, imt_method_jti, ARMCOND_AL);
6719 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6721 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6723 ARM_B_COND (code, ARMCOND_EQ, 0);
6725 /* Define this if your system is so bad that gdb is failing. */
6726 #ifdef BROKEN_DEV_ENV
6727 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
6729 arm_patch (code - 1, mini_dump_bad_imt);
6733 arm_patch (cond, code);
6737 if (item->has_target_code) {
6738 /* Load target address */
6739 #ifdef USE_JUMP_TABLES
6740 target_code_jti = TARGET_CODE_JTI (i);
6741 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6742 /* Restore registers */
6743 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6745 ARM_BX (code, ARMREG_R1);
6746 set_jumptable_element (jte, target_code_jti, item->value.target_code);
6748 target_code_ins = code;
6749 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6750 /* Save it to the fourth slot */
6751 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6752 /* Restore registers and branch */
6753 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6755 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
6758 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
6759 if (!arm_is_imm12 (vtable_offset)) {
6761 * We need to branch to a computed address but we don't have
6762 * a free register to store it, since IP must contain the
6763 * vtable address. So we push the two values to the stack, and
6764 * load them both using LDM.
6766 /* Compute target address */
6767 #ifdef USE_JUMP_TABLES
6768 ARM_MOVW_REG_IMM (code, ARMREG_R1, vtable_offset & 0xffff);
6769 if (vtable_offset >> 16)
6770 ARM_MOVT_REG_IMM (code, ARMREG_R1, (vtable_offset >> 16) & 0xffff);
6771 /* IP had vtable base. */
6772 ARM_LDR_REG_REG (code, ARMREG_IP, ARMREG_IP, ARMREG_R1);
6773 /* Restore registers and branch */
6774 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6775 ARM_BX (code, ARMREG_IP);
6777 vtable_offset_ins = code;
6778 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6779 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
6780 /* Save it to the fourth slot */
6781 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6782 /* Restore registers and branch */
6783 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6785 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
6788 #ifdef USE_JUMP_TABLES
6789 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_IP, vtable_offset);
6790 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6791 ARM_BX (code, ARMREG_IP);
6793 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
6795 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
6796 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
6802 #ifdef USE_JUMP_TABLES
6803 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), code);
6804 target_code_jti = TARGET_CODE_JTI (i);
6805 /* Load target address */
6806 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, target_code_jti, ARMCOND_AL);
6807 /* Restore registers */
6808 ARM_POP3 (code, ARMREG_R0, ARMREG_R1, ARMREG_R2);
6810 ARM_BX (code, ARMREG_R1);
6811 set_jumptable_element (jte, target_code_jti, fail_tramp);
6813 arm_patch (item->jmp_code, (guchar*)code);
6815 target_code_ins = code;
6816 /* Load target address */
6817 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6818 /* Save it to the fourth slot */
6819 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
6820 /* Restore registers and branch */
6821 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
6823 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
6825 item->jmp_code = NULL;
6828 #ifdef USE_JUMP_TABLES
6830 set_jumptable_element (jte, imt_method_jti, item->key);
6833 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
6835 /*must emit after unconditional branch*/
6836 if (vtable_target) {
6837 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
6838 item->chunk_size += 4;
6839 vtable_target = NULL;
6842 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
6843 constant_pool_starts [i] = code;
6845 code += extra_space;
6850 #ifdef USE_JUMP_TABLES
6851 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, IMT_METHOD_JTI (i), ARMCOND_AL);
6852 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6853 code = load_element_with_regbase_cond (code, ARMREG_R1, ARMREG_R2, JUMP_CODE_JTI (i), ARMCOND_HS);
6854 ARM_BX_COND (code, ARMCOND_HS, ARMREG_R1);
6855 item->jmp_code = GUINT_TO_POINTER (JUMP_CODE_JTI (i));
6857 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
6858 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
6860 item->jmp_code = (guint8*)code;
6861 ARM_B_COND (code, ARMCOND_HS, 0);
6867 for (i = 0; i < count; ++i) {
6868 MonoIMTCheckItem *item = imt_entries [i];
6869 if (item->jmp_code) {
6870 if (item->check_target_idx)
6871 #ifdef USE_JUMP_TABLES
6872 set_jumptable_element (jte, GPOINTER_TO_UINT (item->jmp_code), imt_entries [item->check_target_idx]->code_target);
6874 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
6877 if (i > 0 && item->is_equals) {
6879 #ifdef USE_JUMP_TABLES
6880 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j)
6881 set_jumptable_element (jte, IMT_METHOD_JTI (j), imt_entries [j]->key);
6883 arminstr_t *space_start = constant_pool_starts [i];
6884 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
6885 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
6893 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
6894 mono_disassemble_code (NULL, (guint8*)start, size, buff);
6899 #ifndef USE_JUMP_TABLES
6900 g_free (constant_pool_starts);
6903 mono_arch_flush_icache ((guint8*)start, size);
6904 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
6905 mono_stats.imt_thunks_size += code - start;
6907 g_assert (DISTANCE (start, code) <= size);
6912 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
6914 return ctx->regs [reg];
6918 mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
6920 ctx->regs [reg] = val;
6924 * mono_arch_get_trampolines:
6926 * Return a list of MonoTrampInfo structures describing arch specific trampolines
6930 mono_arch_get_trampolines (gboolean aot)
6932 return mono_arm_get_exception_trampolines (aot);
6936 mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
6943 bp = MONO_CONTEXT_GET_BP (ctx);
6944 lr_loc = (gpointer*)(bp + clause->exvar_offset);
6946 old_value = *lr_loc;
6947 if ((char*)old_value < (char*)ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
6950 *lr_loc = new_value;
6955 #if defined(MONO_ARCH_SOFT_DEBUG_SUPPORTED)
6957 * mono_arch_set_breakpoint:
6959 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
6960 * The location should contain code emitted by OP_SEQ_POINT.
6963 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
6966 guint32 native_offset = ip - (guint8*)ji->code_start;
6967 MonoDebugOptions *opt = mini_get_debug_options ();
6969 if (opt->soft_breakpoints) {
6970 g_assert (!ji->from_aot);
6972 ARM_BLX_REG (code, ARMREG_LR);
6973 mono_arch_flush_icache (code - 4, 4);
6974 } else if (ji->from_aot) {
6975 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
6977 g_assert (native_offset % 4 == 0);
6978 g_assert (info->bp_addrs [native_offset / 4] == 0);
6979 info->bp_addrs [native_offset / 4] = bp_trigger_page;
6981 int dreg = ARMREG_LR;
6983 /* Read from another trigger page */
6984 #ifdef USE_JUMP_TABLES
6985 gpointer *jte = mono_jumptable_add_entry ();
6986 code = mono_arm_load_jumptable_entry (code, jte, dreg);
6987 jte [0] = bp_trigger_page;
6989 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
6991 *(int*)code = (int)bp_trigger_page;
6994 ARM_LDR_IMM (code, dreg, dreg, 0);
6996 mono_arch_flush_icache (code - 16, 16);
6999 /* This is currently implemented by emitting an SWI instruction, which
7000 * qemu/linux seems to convert to a SIGILL.
7002 *(int*)code = (0xef << 24) | 8;
7004 mono_arch_flush_icache (code - 4, 4);
7010 * mono_arch_clear_breakpoint:
7012 * Clear the breakpoint at IP.
7015 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
7017 MonoDebugOptions *opt = mini_get_debug_options ();
7021 if (opt->soft_breakpoints) {
7022 g_assert (!ji->from_aot);
7025 mono_arch_flush_icache (code - 4, 4);
7026 } else if (ji->from_aot) {
7027 guint32 native_offset = ip - (guint8*)ji->code_start;
7028 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
7030 g_assert (native_offset % 4 == 0);
7031 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
7032 info->bp_addrs [native_offset / 4] = 0;
7034 for (i = 0; i < 4; ++i)
7037 mono_arch_flush_icache (ip, code - ip);
7042 * mono_arch_start_single_stepping:
7044 * Start single stepping.
7047 mono_arch_start_single_stepping (void)
7049 if (ss_trigger_page)
7050 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
7056 * mono_arch_stop_single_stepping:
7058 * Stop single stepping.
7061 mono_arch_stop_single_stepping (void)
7063 if (ss_trigger_page)
7064 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
7070 #define DBG_SIGNAL SIGBUS
7072 #define DBG_SIGNAL SIGSEGV
7076 * mono_arch_is_single_step_event:
7078 * Return whenever the machine state in SIGCTX corresponds to a single
7082 mono_arch_is_single_step_event (void *info, void *sigctx)
7084 siginfo_t *sinfo = info;
7086 if (!ss_trigger_page)
7089 /* Sometimes the address is off by 4 */
7090 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
7097 * mono_arch_is_breakpoint_event:
7099 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
7102 mono_arch_is_breakpoint_event (void *info, void *sigctx)
7104 siginfo_t *sinfo = info;
7106 if (!ss_trigger_page)
7109 if (sinfo->si_signo == DBG_SIGNAL) {
7110 /* Sometimes the address is off by 4 */
7111 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
7121 * mono_arch_skip_breakpoint:
7123 * See mini-amd64.c for docs.
7126 mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
7128 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7132 * mono_arch_skip_single_step:
7134 * See mini-amd64.c for docs.
7137 mono_arch_skip_single_step (MonoContext *ctx)
7139 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
7142 #endif /* MONO_ARCH_SOFT_DEBUG_SUPPORTED */
7145 * mono_arch_get_seq_point_info:
7147 * See mini-amd64.c for docs.
7150 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
7155 // FIXME: Add a free function
7157 mono_domain_lock (domain);
7158 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
7160 mono_domain_unlock (domain);
7163 ji = mono_jit_info_table_find (domain, (char*)code);
7166 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
7168 info->ss_trigger_page = ss_trigger_page;
7169 info->bp_trigger_page = bp_trigger_page;
7171 mono_domain_lock (domain);
7172 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
7174 mono_domain_unlock (domain);
7181 mono_arch_init_lmf_ext (MonoLMFExt *ext, gpointer prev_lmf)
7183 ext->lmf.previous_lmf = prev_lmf;
7184 /* Mark that this is a MonoLMFExt */
7185 ext->lmf.previous_lmf = (gpointer)(((gssize)ext->lmf.previous_lmf) | 2);
7186 ext->lmf.sp = (gssize)ext;
7190 * mono_arch_set_target:
7192 * Set the target architecture the JIT backend should generate code for, in the form
7193 * of a GNU target triplet. Only used in AOT mode.
7196 mono_arch_set_target (char *mtriple)
7198 /* The GNU target triple format is not very well documented */
7199 if (strstr (mtriple, "armv7")) {
7200 v5_supported = TRUE;
7201 v6_supported = TRUE;
7202 v7_supported = TRUE;
7204 if (strstr (mtriple, "armv6")) {
7205 v5_supported = TRUE;
7206 v6_supported = TRUE;
7208 if (strstr (mtriple, "armv7s")) {
7209 v7s_supported = TRUE;
7211 if (strstr (mtriple, "thumbv7s")) {
7212 v5_supported = TRUE;
7213 v6_supported = TRUE;
7214 v7_supported = TRUE;
7215 v7s_supported = TRUE;
7216 thumb_supported = TRUE;
7217 thumb2_supported = TRUE;
7219 if (strstr (mtriple, "darwin") || strstr (mtriple, "ios")) {
7220 v5_supported = TRUE;
7221 v6_supported = TRUE;
7222 thumb_supported = TRUE;
7225 if (strstr (mtriple, "gnueabi"))
7226 eabi_supported = TRUE;
7230 mono_arch_opcode_supported (int opcode)
7233 case OP_ATOMIC_ADD_I4:
7234 case OP_ATOMIC_EXCHANGE_I4:
7235 case OP_ATOMIC_CAS_I4:
7236 return v7_supported;
7242 #if defined(ENABLE_GSHAREDVT)
7244 #include "../../../mono-extensions/mono/mini/mini-arm-gsharedvt.c"
7246 #endif /* !MONOTOUCH */