2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
194 g_assert (size == 0);
199 emit_call_reg (guint8 *code, int reg)
202 ARM_BLX_REG (code, reg);
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
219 *(gpointer*)code = NULL;
221 code = emit_call_reg (code, ARMREG_IP);
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
234 case OP_FCALL_MEMBASE:
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
277 arg_info [0].offset = offset;
280 frame_size += sizeof (gpointer);
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
298 arg_info [k + 1].offset = offset;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
341 The call sequence could be also:
344 function pointer literal
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
356 /* Three possible code sequences can happen here:
360 * ldr pc, [rX - #offset]
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
372 * direct branch with mov:
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
388 #define MAX_ARCH_DELEGATE_PARAMS 3
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
425 *code_size = code - start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
437 mono_arch_get_delegate_invoke_impls (void)
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
468 mono_mini_arch_unlock ();
473 start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
477 mono_mini_arch_unlock ();
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
492 mono_mini_arch_unlock ();
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_trampoline (name);
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 return (gpointer)regs [ARMREG_R0];
518 * Initialize the cpu to execute managed code.
521 mono_arch_cpu_init (void)
526 * Initialize architecture specific code.
529 mono_arch_init (void)
531 InitializeCriticalSection (&mini_arch_mutex);
533 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
534 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
535 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
537 mono_aot_register_jit_icall ("mono_arm_throw_exception", mono_arm_throw_exception);
538 mono_aot_register_jit_icall ("mono_arm_throw_exception_by_token", mono_arm_throw_exception_by_token);
542 * Cleanup architecture specific code.
545 mono_arch_cleanup (void)
550 * This function returns the optimizations supported on this cpu.
553 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
556 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
557 if (cpu_arch != NULL) {
558 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
559 if (strncmp (cpu_arch, "armv", 4) == 0) {
560 v5_supported = cpu_arch [4] >= '5';
561 v7_supported = cpu_arch [4] >= '7';
565 thumb_supported = TRUE;
570 FILE *file = fopen ("/proc/cpuinfo", "r");
572 while ((line = fgets (buf, 512, file))) {
573 if (strncmp (line, "Processor", 9) == 0) {
574 char *ver = strstr (line, "(v");
575 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
577 if (ver && (ver [2] == '7'))
581 if (strncmp (line, "Features", 8) == 0) {
582 char *th = strstr (line, "thumb");
584 thumb_supported = TRUE;
592 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
597 /* no arm-specific optimizations yet */
605 is_regsize_var (MonoType *t) {
608 t = mini_type_get_underlying_type (NULL, t);
615 case MONO_TYPE_FNPTR:
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_STRING:
619 case MONO_TYPE_CLASS:
620 case MONO_TYPE_SZARRAY:
621 case MONO_TYPE_ARRAY:
623 case MONO_TYPE_GENERICINST:
624 if (!mono_type_generic_inst_is_valuetype (t))
627 case MONO_TYPE_VALUETYPE:
634 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
639 for (i = 0; i < cfg->num_varinfo; i++) {
640 MonoInst *ins = cfg->varinfo [i];
641 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
644 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
647 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
650 /* we can only allocate 32 bit values */
651 if (is_regsize_var (ins->inst_vtype)) {
652 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
653 g_assert (i == vmv->idx);
654 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
661 #define USE_EXTRA_TEMPS 0
664 mono_arch_get_global_int_regs (MonoCompile *cfg)
669 * FIXME: Interface calls might go through a static rgctx trampoline which
670 * sets V5, but it doesn't save it, so we need to save it ourselves, and
673 if (cfg->flags & MONO_CFG_HAS_CALLS)
674 cfg->uses_rgctx_reg = TRUE;
676 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
677 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
678 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
679 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
680 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
681 /* V5 is reserved for passing the vtable/rgctx/IMT method */
682 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
683 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
684 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
690 * mono_arch_regalloc_cost:
692 * Return the cost, in number of memory references, of the action of
693 * allocating the variable VMV into a register during global register
697 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
703 #endif /* #ifndef DISABLE_JIT */
705 #ifndef __GNUC_PREREQ
706 #define __GNUC_PREREQ(maj, min) (0)
710 mono_arch_flush_icache (guint8 *code, gint size)
713 sys_icache_invalidate (code, size);
714 #elif __GNUC_PREREQ(4, 1)
715 __clear_cache (code, code + size);
716 #elif defined(PLATFORM_ANDROID)
717 const int syscall = 0xf0002;
725 : "r" (code), "r" (code + size), "r" (syscall)
726 : "r0", "r1", "r7", "r2"
729 __asm __volatile ("mov r0, %0\n"
732 "swi 0x9f0002 @ sys_cacheflush"
734 : "r" (code), "r" (code + size), "r" (0)
735 : "r0", "r1", "r3" );
752 guint16 vtsize; /* in param area */
755 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
761 gboolean vtype_retaddr;
762 /* The index of the vret arg in the argument list */
772 /*#define __alignof__(a) sizeof(a)*/
773 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
779 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
782 if (*gr > ARMREG_R3) {
783 ainfo->offset = *stack_size;
784 ainfo->reg = ARMREG_SP; /* in the caller */
785 ainfo->storage = RegTypeBase;
788 ainfo->storage = RegTypeGeneral;
792 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
795 int i8_align = __alignof__ (gint64);
799 gboolean split = i8_align == 4;
801 gboolean split = TRUE;
804 if (*gr == ARMREG_R3 && split) {
805 /* first word in r3 and the second on the stack */
806 ainfo->offset = *stack_size;
807 ainfo->reg = ARMREG_SP; /* in the caller */
808 ainfo->storage = RegTypeBaseGen;
810 } else if (*gr >= ARMREG_R3) {
812 /* darwin aligns longs to 4 byte only */
818 ainfo->offset = *stack_size;
819 ainfo->reg = ARMREG_SP; /* in the caller */
820 ainfo->storage = RegTypeBase;
824 if (i8_align == 8 && ((*gr) & 1))
827 ainfo->storage = RegTypeIRegPair;
836 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
839 int n = sig->hasthis + sig->param_count;
840 MonoType *simpletype;
841 guint32 stack_size = 0;
845 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
847 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
852 /* FIXME: handle returning a struct */
853 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
856 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
857 cinfo->ret.storage = RegTypeStructByVal;
859 cinfo->vtype_retaddr = TRUE;
866 * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
867 * the first argument, allowing 'this' to be always passed in the first arg reg.
868 * Also do this if the first argument is a reference type, since virtual calls
869 * are sometimes made using calli without sig->hasthis set, like in the delegate
872 if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (sig->params [0])))) {
874 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
876 add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0], TRUE);
880 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
881 cinfo->vret_arg_index = 1;
885 add_general (&gr, &stack_size, cinfo->args + 0, TRUE);
889 if (cinfo->vtype_retaddr)
890 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
893 DEBUG(printf("params: %d\n", sig->param_count));
894 for (i = pstart; i < sig->param_count; ++i) {
895 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
896 /* Prevent implicit arguments and sig_cookie from
897 being passed in registers */
899 /* Emit the signature cookie just before the implicit arguments */
900 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
902 DEBUG(printf("param %d: ", i));
903 if (sig->params [i]->byref) {
904 DEBUG(printf("byref\n"));
905 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
909 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
910 switch (simpletype->type) {
911 case MONO_TYPE_BOOLEAN:
914 cinfo->args [n].size = 1;
915 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
921 cinfo->args [n].size = 2;
922 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
927 cinfo->args [n].size = 4;
928 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
934 case MONO_TYPE_FNPTR:
935 case MONO_TYPE_CLASS:
936 case MONO_TYPE_OBJECT:
937 case MONO_TYPE_STRING:
938 case MONO_TYPE_SZARRAY:
939 case MONO_TYPE_ARRAY:
941 cinfo->args [n].size = sizeof (gpointer);
942 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
945 case MONO_TYPE_GENERICINST:
946 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
947 cinfo->args [n].size = sizeof (gpointer);
948 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
953 case MONO_TYPE_TYPEDBYREF:
954 case MONO_TYPE_VALUETYPE: {
960 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
961 size = sizeof (MonoTypedRef);
962 align = sizeof (gpointer);
964 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
966 size = mono_class_native_size (klass, &align);
968 size = mono_class_value_size (klass, &align);
970 DEBUG(printf ("load %d bytes struct\n",
971 mono_class_native_size (sig->params [i]->data.klass, NULL)));
974 align_size += (sizeof (gpointer) - 1);
975 align_size &= ~(sizeof (gpointer) - 1);
976 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
977 cinfo->args [n].storage = RegTypeStructByVal;
978 /* FIXME: align stack_size if needed */
980 if (align >= 8 && (gr & 1))
983 if (gr > ARMREG_R3) {
984 cinfo->args [n].size = 0;
985 cinfo->args [n].vtsize = nwords;
987 int rest = ARMREG_R3 - gr + 1;
988 int n_in_regs = rest >= nwords? nwords: rest;
990 cinfo->args [n].size = n_in_regs;
991 cinfo->args [n].vtsize = nwords - n_in_regs;
992 cinfo->args [n].reg = gr;
996 cinfo->args [n].offset = stack_size;
997 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
998 stack_size += nwords * sizeof (gpointer);
1005 cinfo->args [n].size = 8;
1006 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
1010 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
1014 /* Handle the case where there are no implicit arguments */
1015 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1016 /* Prevent implicit arguments and sig_cookie from
1017 being passed in registers */
1019 /* Emit the signature cookie just before the implicit arguments */
1020 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1024 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1025 switch (simpletype->type) {
1026 case MONO_TYPE_BOOLEAN:
1031 case MONO_TYPE_CHAR:
1037 case MONO_TYPE_FNPTR:
1038 case MONO_TYPE_CLASS:
1039 case MONO_TYPE_OBJECT:
1040 case MONO_TYPE_SZARRAY:
1041 case MONO_TYPE_ARRAY:
1042 case MONO_TYPE_STRING:
1043 cinfo->ret.storage = RegTypeGeneral;
1044 cinfo->ret.reg = ARMREG_R0;
1048 cinfo->ret.storage = RegTypeIRegPair;
1049 cinfo->ret.reg = ARMREG_R0;
1053 cinfo->ret.storage = RegTypeFP;
1054 cinfo->ret.reg = ARMREG_R0;
1055 /* FIXME: cinfo->ret.reg = ???;
1056 cinfo->ret.storage = RegTypeFP;*/
1058 case MONO_TYPE_GENERICINST:
1059 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1060 cinfo->ret.storage = RegTypeGeneral;
1061 cinfo->ret.reg = ARMREG_R0;
1065 case MONO_TYPE_VALUETYPE:
1066 case MONO_TYPE_TYPEDBYREF:
1067 if (cinfo->ret.storage != RegTypeStructByVal)
1068 cinfo->ret.storage = RegTypeStructByAddr;
1070 case MONO_TYPE_VOID:
1073 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1077 /* align stack size to 8 */
1078 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1079 stack_size = (stack_size + 7) & ~7;
1081 cinfo->stack_usage = stack_size;
1088 * Set var information according to the calling convention. arm version.
1089 * The locals var stuff should most likely be split in another method.
1092 mono_arch_allocate_vars (MonoCompile *cfg)
1094 MonoMethodSignature *sig;
1095 MonoMethodHeader *header;
1097 int i, offset, size, align, curinst;
1098 int frame_reg = ARMREG_FP;
1102 sig = mono_method_signature (cfg->method);
1104 if (!cfg->arch.cinfo)
1105 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1106 cinfo = cfg->arch.cinfo;
1108 /* FIXME: this will change when we use FP as gcc does */
1109 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1111 /* allow room for the vararg method args: void* and long/double */
1112 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1113 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1115 header = cfg->header;
1118 * We use the frame register also for any method that has
1119 * exception clauses. This way, when the handlers are called,
1120 * the code will reference local variables using the frame reg instead of
1121 * the stack pointer: if we had to restore the stack pointer, we'd
1122 * corrupt the method frames that are already on the stack (since
1123 * filters get called before stack unwinding happens) when the filter
1124 * code would call any method (this also applies to finally etc.).
1126 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1127 frame_reg = ARMREG_FP;
1128 cfg->frame_reg = frame_reg;
1129 if (frame_reg != ARMREG_SP) {
1130 cfg->used_int_regs |= 1 << frame_reg;
1133 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1134 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1135 cfg->used_int_regs |= (1 << ARMREG_V5);
1139 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1140 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1141 case MONO_TYPE_VOID:
1144 cfg->ret->opcode = OP_REGVAR;
1145 cfg->ret->inst_c0 = ARMREG_R0;
1149 /* local vars are at a positive offset from the stack pointer */
1151 * also note that if the function uses alloca, we use FP
1152 * to point at the local variables.
1154 offset = 0; /* linkage area */
1155 /* align the offset to 16 bytes: not sure this is needed here */
1157 //offset &= ~(8 - 1);
1159 /* add parameter area size for called functions */
1160 offset += cfg->param_area;
1163 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1166 /* allow room to save the return value */
1167 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1170 /* the MonoLMF structure is stored just below the stack pointer */
1171 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1172 if (cinfo->ret.storage == RegTypeStructByVal) {
1173 cfg->ret->opcode = OP_REGOFFSET;
1174 cfg->ret->inst_basereg = cfg->frame_reg;
1175 offset += sizeof (gpointer) - 1;
1176 offset &= ~(sizeof (gpointer) - 1);
1177 cfg->ret->inst_offset = - offset;
1179 ins = cfg->vret_addr;
1180 offset += sizeof(gpointer) - 1;
1181 offset &= ~(sizeof(gpointer) - 1);
1182 ins->inst_offset = offset;
1183 ins->opcode = OP_REGOFFSET;
1184 ins->inst_basereg = frame_reg;
1185 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1186 printf ("vret_addr =");
1187 mono_print_ins (cfg->vret_addr);
1190 offset += sizeof(gpointer);
1193 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1194 if (cfg->arch.seq_point_info_var) {
1197 ins = cfg->arch.seq_point_info_var;
1201 offset += align - 1;
1202 offset &= ~(align - 1);
1203 ins->opcode = OP_REGOFFSET;
1204 ins->inst_basereg = frame_reg;
1205 ins->inst_offset = offset;
1208 ins = cfg->arch.ss_trigger_page_var;
1211 offset += align - 1;
1212 offset &= ~(align - 1);
1213 ins->opcode = OP_REGOFFSET;
1214 ins->inst_basereg = frame_reg;
1215 ins->inst_offset = offset;
1219 curinst = cfg->locals_start;
1220 for (i = curinst; i < cfg->num_varinfo; ++i) {
1221 ins = cfg->varinfo [i];
1222 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1225 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1226 * pinvoke wrappers when they call functions returning structure */
1227 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1228 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1232 size = mono_type_size (ins->inst_vtype, &align);
1234 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1235 * since it loads/stores misaligned words, which don't do the right thing.
1237 if (align < 4 && size >= 4)
1239 offset += align - 1;
1240 offset &= ~(align - 1);
1241 ins->opcode = OP_REGOFFSET;
1242 ins->inst_offset = offset;
1243 ins->inst_basereg = frame_reg;
1245 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1250 ins = cfg->args [curinst];
1251 if (ins->opcode != OP_REGVAR) {
1252 ins->opcode = OP_REGOFFSET;
1253 ins->inst_basereg = frame_reg;
1254 offset += sizeof (gpointer) - 1;
1255 offset &= ~(sizeof (gpointer) - 1);
1256 ins->inst_offset = offset;
1257 offset += sizeof (gpointer);
1262 if (sig->call_convention == MONO_CALL_VARARG) {
1266 /* Allocate a local slot to hold the sig cookie address */
1267 offset += align - 1;
1268 offset &= ~(align - 1);
1269 cfg->sig_cookie = offset;
1273 for (i = 0; i < sig->param_count; ++i) {
1274 ins = cfg->args [curinst];
1276 if (ins->opcode != OP_REGVAR) {
1277 ins->opcode = OP_REGOFFSET;
1278 ins->inst_basereg = frame_reg;
1279 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1281 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1282 * since it loads/stores misaligned words, which don't do the right thing.
1284 if (align < 4 && size >= 4)
1286 /* The code in the prolog () stores words when storing vtypes received in a register */
1287 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1289 offset += align - 1;
1290 offset &= ~(align - 1);
1291 ins->inst_offset = offset;
1297 /* align the offset to 8 bytes */
1302 cfg->stack_offset = offset;
1306 mono_arch_create_vars (MonoCompile *cfg)
1308 MonoMethodSignature *sig;
1311 sig = mono_method_signature (cfg->method);
1313 if (!cfg->arch.cinfo)
1314 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1315 cinfo = cfg->arch.cinfo;
1317 if (cinfo->ret.storage == RegTypeStructByVal)
1318 cfg->ret_var_is_local = TRUE;
1320 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1321 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1322 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1323 printf ("vret_addr = ");
1324 mono_print_ins (cfg->vret_addr);
1328 if (cfg->gen_seq_points && cfg->compile_aot) {
1329 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1330 ins->flags |= MONO_INST_VOLATILE;
1331 cfg->arch.seq_point_info_var = ins;
1333 /* Allocate a separate variable for this to save 1 load per seq point */
1334 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1335 ins->flags |= MONO_INST_VOLATILE;
1336 cfg->arch.ss_trigger_page_var = ins;
1341 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1343 MonoMethodSignature *tmp_sig;
1346 if (call->tail_call)
1349 /* FIXME: Add support for signature tokens to AOT */
1350 cfg->disable_aot = TRUE;
1352 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1355 * mono_ArgIterator_Setup assumes the signature cookie is
1356 * passed first and all the arguments which were before it are
1357 * passed on the stack after the signature. So compensate by
1358 * passing a different signature.
1360 tmp_sig = mono_metadata_signature_dup (call->signature);
1361 tmp_sig->param_count -= call->signature->sentinelpos;
1362 tmp_sig->sentinelpos = 0;
1363 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1365 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1366 sig_arg->dreg = mono_alloc_ireg (cfg);
1367 sig_arg->inst_p0 = tmp_sig;
1368 MONO_ADD_INS (cfg->cbb, sig_arg);
1370 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1375 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1380 LLVMCallInfo *linfo;
1382 n = sig->param_count + sig->hasthis;
1384 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1386 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1389 * LLVM always uses the native ABI while we use our own ABI, the
1390 * only difference is the handling of vtypes:
1391 * - we only pass/receive them in registers in some cases, and only
1392 * in 1 or 2 integer registers.
1394 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1395 cfg->exception_message = g_strdup ("unknown ret conv");
1396 cfg->disable_llvm = TRUE;
1400 for (i = 0; i < n; ++i) {
1401 ainfo = cinfo->args + i;
1403 linfo->args [i].storage = LLVMArgNone;
1405 switch (ainfo->storage) {
1406 case RegTypeGeneral:
1407 case RegTypeIRegPair:
1409 linfo->args [i].storage = LLVMArgInIReg;
1412 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1413 cfg->disable_llvm = TRUE;
1423 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1426 MonoMethodSignature *sig;
1430 sig = call->signature;
1431 n = sig->param_count + sig->hasthis;
1433 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1435 for (i = 0; i < n; ++i) {
1436 ArgInfo *ainfo = cinfo->args + i;
1439 if (i >= sig->hasthis)
1440 t = sig->params [i - sig->hasthis];
1442 t = &mono_defaults.int_class->byval_arg;
1443 t = mini_type_get_underlying_type (NULL, t);
1445 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1446 /* Emit the signature cookie just before the implicit arguments */
1447 emit_sig_cookie (cfg, call, cinfo);
1450 in = call->args [i];
1452 switch (ainfo->storage) {
1453 case RegTypeGeneral:
1454 case RegTypeIRegPair:
1455 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1456 MONO_INST_NEW (cfg, ins, OP_MOVE);
1457 ins->dreg = mono_alloc_ireg (cfg);
1458 ins->sreg1 = in->dreg + 1;
1459 MONO_ADD_INS (cfg->cbb, ins);
1460 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1462 MONO_INST_NEW (cfg, ins, OP_MOVE);
1463 ins->dreg = mono_alloc_ireg (cfg);
1464 ins->sreg1 = in->dreg + 2;
1465 MONO_ADD_INS (cfg->cbb, ins);
1466 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1467 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1468 #ifndef MONO_ARCH_SOFT_FLOAT
1472 if (ainfo->size == 4) {
1473 #ifdef MONO_ARCH_SOFT_FLOAT
1474 /* mono_emit_call_args () have already done the r8->r4 conversion */
1475 /* The converted value is in an int vreg */
1476 MONO_INST_NEW (cfg, ins, OP_MOVE);
1477 ins->dreg = mono_alloc_ireg (cfg);
1478 ins->sreg1 = in->dreg;
1479 MONO_ADD_INS (cfg->cbb, ins);
1480 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1482 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1483 creg = mono_alloc_ireg (cfg);
1484 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1485 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1488 #ifdef MONO_ARCH_SOFT_FLOAT
1489 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1490 ins->dreg = mono_alloc_ireg (cfg);
1491 ins->sreg1 = in->dreg;
1492 MONO_ADD_INS (cfg->cbb, ins);
1493 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1495 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1496 ins->dreg = mono_alloc_ireg (cfg);
1497 ins->sreg1 = in->dreg;
1498 MONO_ADD_INS (cfg->cbb, ins);
1499 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1501 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1502 creg = mono_alloc_ireg (cfg);
1503 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1504 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1505 creg = mono_alloc_ireg (cfg);
1506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1507 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1510 cfg->flags |= MONO_CFG_HAS_FPOUT;
1512 MONO_INST_NEW (cfg, ins, OP_MOVE);
1513 ins->dreg = mono_alloc_ireg (cfg);
1514 ins->sreg1 = in->dreg;
1515 MONO_ADD_INS (cfg->cbb, ins);
1517 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1520 case RegTypeStructByAddr:
1523 /* FIXME: where si the data allocated? */
1524 arg->backend.reg3 = ainfo->reg;
1525 call->used_iregs |= 1 << ainfo->reg;
1526 g_assert_not_reached ();
1529 case RegTypeStructByVal:
1530 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1531 ins->opcode = OP_OUTARG_VT;
1532 ins->sreg1 = in->dreg;
1533 ins->klass = in->klass;
1534 ins->inst_p0 = call;
1535 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1536 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1537 MONO_ADD_INS (cfg->cbb, ins);
1540 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1541 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1542 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1543 if (t->type == MONO_TYPE_R8) {
1544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1546 #ifdef MONO_ARCH_SOFT_FLOAT
1547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1549 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1556 case RegTypeBaseGen:
1557 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1558 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1559 MONO_INST_NEW (cfg, ins, OP_MOVE);
1560 ins->dreg = mono_alloc_ireg (cfg);
1561 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1562 MONO_ADD_INS (cfg->cbb, ins);
1563 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1564 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1567 #ifdef MONO_ARCH_SOFT_FLOAT
1568 g_assert_not_reached ();
1571 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1572 creg = mono_alloc_ireg (cfg);
1573 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1574 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1575 creg = mono_alloc_ireg (cfg);
1576 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1577 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1578 cfg->flags |= MONO_CFG_HAS_FPOUT;
1580 g_assert_not_reached ();
1587 arg->backend.reg3 = ainfo->reg;
1588 /* FP args are passed in int regs */
1589 call->used_iregs |= 1 << ainfo->reg;
1590 if (ainfo->size == 8) {
1591 arg->opcode = OP_OUTARG_R8;
1592 call->used_iregs |= 1 << (ainfo->reg + 1);
1594 arg->opcode = OP_OUTARG_R4;
1597 cfg->flags |= MONO_CFG_HAS_FPOUT;
1601 g_assert_not_reached ();
1605 /* Handle the case where there are no implicit arguments */
1606 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1607 emit_sig_cookie (cfg, call, cinfo);
1609 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1612 if (cinfo->ret.storage == RegTypeStructByVal) {
1613 /* The JIT will transform this into a normal call */
1614 call->vret_in_reg = TRUE;
1616 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1617 vtarg->sreg1 = call->vret_var->dreg;
1618 vtarg->dreg = mono_alloc_preg (cfg);
1619 MONO_ADD_INS (cfg->cbb, vtarg);
1621 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1625 call->stack_usage = cinfo->stack_usage;
1631 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1633 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1634 ArgInfo *ainfo = ins->inst_p1;
1635 int ovf_size = ainfo->vtsize;
1636 int doffset = ainfo->offset;
1637 int i, soffset, dreg;
1640 for (i = 0; i < ainfo->size; ++i) {
1641 dreg = mono_alloc_ireg (cfg);
1642 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1643 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1644 soffset += sizeof (gpointer);
1646 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1648 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1652 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1654 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1657 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1660 if (COMPILE_LLVM (cfg)) {
1661 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1663 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1664 ins->sreg1 = val->dreg + 1;
1665 ins->sreg2 = val->dreg + 2;
1666 MONO_ADD_INS (cfg->cbb, ins);
1670 #ifdef MONO_ARCH_SOFT_FLOAT
1671 if (ret->type == MONO_TYPE_R8) {
1674 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1675 ins->dreg = cfg->ret->dreg;
1676 ins->sreg1 = val->dreg;
1677 MONO_ADD_INS (cfg->cbb, ins);
1680 if (ret->type == MONO_TYPE_R4) {
1681 /* Already converted to an int in method_to_ir () */
1682 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1685 #elif defined(ARM_FPU_VFP)
1686 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1689 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1690 ins->dreg = cfg->ret->dreg;
1691 ins->sreg1 = val->dreg;
1692 MONO_ADD_INS (cfg->cbb, ins);
1696 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1697 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1704 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1707 #endif /* #ifndef DISABLE_JIT */
1710 mono_arch_is_inst_imm (gint64 imm)
1715 #define DYN_CALL_STACK_ARGS 6
1718 MonoMethodSignature *sig;
1723 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1729 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1733 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1736 switch (cinfo->ret.storage) {
1738 case RegTypeGeneral:
1739 case RegTypeIRegPair:
1740 case RegTypeStructByAddr:
1745 #elif defined(ARM_FPU_VFP)
1754 for (i = 0; i < cinfo->nargs; ++i) {
1755 switch (cinfo->args [i].storage) {
1756 case RegTypeGeneral:
1758 case RegTypeIRegPair:
1761 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1764 case RegTypeStructByVal:
1765 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1773 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1774 for (i = 0; i < sig->param_count; ++i) {
1775 MonoType *t = sig->params [i];
1783 #ifdef MONO_ARCH_SOFT_FLOAT
1802 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1804 ArchDynCallInfo *info;
1807 cinfo = get_call_info (NULL, sig, FALSE);
1809 if (!dyn_call_supported (cinfo, sig)) {
1814 info = g_new0 (ArchDynCallInfo, 1);
1815 // FIXME: Preprocess the info to speed up start_dyn_call ()
1817 info->cinfo = cinfo;
1819 return (MonoDynCallInfo*)info;
1823 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1825 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1827 g_free (ainfo->cinfo);
1832 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1834 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1835 DynCallArgs *p = (DynCallArgs*)buf;
1836 int arg_index, greg, i, j;
1837 MonoMethodSignature *sig = dinfo->sig;
1839 g_assert (buf_len >= sizeof (DynCallArgs));
1847 if (dinfo->cinfo->vtype_retaddr)
1848 p->regs [greg ++] = (mgreg_t)ret;
1851 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1853 for (i = 0; i < sig->param_count; i++) {
1854 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1855 gpointer *arg = args [arg_index ++];
1856 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1859 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1861 else if (ainfo->storage == RegTypeBase)
1862 slot = PARAM_REGS + (ainfo->offset / 4);
1864 g_assert_not_reached ();
1867 p->regs [slot] = (mgreg_t)*arg;
1872 case MONO_TYPE_STRING:
1873 case MONO_TYPE_CLASS:
1874 case MONO_TYPE_ARRAY:
1875 case MONO_TYPE_SZARRAY:
1876 case MONO_TYPE_OBJECT:
1880 p->regs [slot] = (mgreg_t)*arg;
1882 case MONO_TYPE_BOOLEAN:
1884 p->regs [slot] = *(guint8*)arg;
1887 p->regs [slot] = *(gint8*)arg;
1890 p->regs [slot] = *(gint16*)arg;
1893 case MONO_TYPE_CHAR:
1894 p->regs [slot] = *(guint16*)arg;
1897 p->regs [slot] = *(gint32*)arg;
1900 p->regs [slot] = *(guint32*)arg;
1904 p->regs [slot ++] = (mgreg_t)arg [0];
1905 p->regs [slot] = (mgreg_t)arg [1];
1908 p->regs [slot] = *(mgreg_t*)arg;
1911 p->regs [slot ++] = (mgreg_t)arg [0];
1912 p->regs [slot] = (mgreg_t)arg [1];
1914 case MONO_TYPE_GENERICINST:
1915 if (MONO_TYPE_IS_REFERENCE (t)) {
1916 p->regs [slot] = (mgreg_t)*arg;
1921 case MONO_TYPE_VALUETYPE:
1922 g_assert (ainfo->storage == RegTypeStructByVal);
1924 if (ainfo->size == 0)
1925 slot = PARAM_REGS + (ainfo->offset / 4);
1929 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1930 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1933 g_assert_not_reached ();
1939 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1941 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1942 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1943 guint8 *ret = ((DynCallArgs*)buf)->ret;
1944 mgreg_t res = ((DynCallArgs*)buf)->res;
1945 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1947 switch (mono_type_get_underlying_type (sig->ret)->type) {
1948 case MONO_TYPE_VOID:
1949 *(gpointer*)ret = NULL;
1951 case MONO_TYPE_STRING:
1952 case MONO_TYPE_CLASS:
1953 case MONO_TYPE_ARRAY:
1954 case MONO_TYPE_SZARRAY:
1955 case MONO_TYPE_OBJECT:
1959 *(gpointer*)ret = (gpointer)res;
1965 case MONO_TYPE_BOOLEAN:
1966 *(guint8*)ret = res;
1969 *(gint16*)ret = res;
1972 case MONO_TYPE_CHAR:
1973 *(guint16*)ret = res;
1976 *(gint32*)ret = res;
1979 *(guint32*)ret = res;
1983 /* This handles endianness as well */
1984 ((gint32*)ret) [0] = res;
1985 ((gint32*)ret) [1] = res2;
1987 case MONO_TYPE_GENERICINST:
1988 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1989 *(gpointer*)ret = (gpointer)res;
1994 case MONO_TYPE_VALUETYPE:
1995 g_assert (ainfo->cinfo->vtype_retaddr);
1998 #if defined(ARM_FPU_VFP)
2000 *(float*)ret = *(float*)&res;
2002 case MONO_TYPE_R8: {
2008 *(double*)ret = *(double*)®s;
2013 g_assert_not_reached ();
2020 * Allow tracing to work with this interface (with an optional argument)
2024 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2028 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2029 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2030 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2031 code = emit_call_reg (code, ARMREG_R2);
2044 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2047 int save_mode = SAVE_NONE;
2049 MonoMethod *method = cfg->method;
2050 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2051 int save_offset = cfg->param_area;
2055 offset = code - cfg->native_code;
2056 /* we need about 16 instructions */
2057 if (offset > (cfg->code_size - 16 * 4)) {
2058 cfg->code_size *= 2;
2059 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2060 code = cfg->native_code + offset;
2063 case MONO_TYPE_VOID:
2064 /* special case string .ctor icall */
2065 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2066 save_mode = SAVE_ONE;
2068 save_mode = SAVE_NONE;
2072 save_mode = SAVE_TWO;
2076 save_mode = SAVE_FP;
2078 case MONO_TYPE_VALUETYPE:
2079 save_mode = SAVE_STRUCT;
2082 save_mode = SAVE_ONE;
2086 switch (save_mode) {
2088 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2089 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2090 if (enable_arguments) {
2091 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2092 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2096 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2097 if (enable_arguments) {
2098 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2102 /* FIXME: what reg? */
2103 if (enable_arguments) {
2104 /* FIXME: what reg? */
2108 if (enable_arguments) {
2109 /* FIXME: get the actual address */
2110 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2118 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2119 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2120 code = emit_call_reg (code, ARMREG_IP);
2122 switch (save_mode) {
2124 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2125 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2128 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2142 * The immediate field for cond branches is big enough for all reasonable methods
2144 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2145 if (0 && ins->inst_true_bb->native_offset) { \
2146 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2148 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2149 ARM_B_COND (code, (condcode), 0); \
2152 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2154 /* emit an exception if condition is fail
2156 * We assign the extra code used to throw the implicit exceptions
2157 * to cfg->bb_exit as far as the big branch handling is concerned
2159 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2161 mono_add_patch_info (cfg, code - cfg->native_code, \
2162 MONO_PATCH_INFO_EXC, exc_name); \
2163 ARM_BL_COND (code, (condcode), 0); \
2166 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2169 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2174 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2176 MonoInst *ins, *n, *last_ins = NULL;
2178 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2179 switch (ins->opcode) {
2182 /* Already done by an arch-independent pass */
2184 case OP_LOAD_MEMBASE:
2185 case OP_LOADI4_MEMBASE:
2187 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2188 * OP_LOAD_MEMBASE offset(basereg), reg
2190 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2191 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2192 ins->inst_basereg == last_ins->inst_destbasereg &&
2193 ins->inst_offset == last_ins->inst_offset) {
2194 if (ins->dreg == last_ins->sreg1) {
2195 MONO_DELETE_INS (bb, ins);
2198 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2199 ins->opcode = OP_MOVE;
2200 ins->sreg1 = last_ins->sreg1;
2204 * Note: reg1 must be different from the basereg in the second load
2205 * OP_LOAD_MEMBASE offset(basereg), reg1
2206 * OP_LOAD_MEMBASE offset(basereg), reg2
2208 * OP_LOAD_MEMBASE offset(basereg), reg1
2209 * OP_MOVE reg1, reg2
2211 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2212 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2213 ins->inst_basereg != last_ins->dreg &&
2214 ins->inst_basereg == last_ins->inst_basereg &&
2215 ins->inst_offset == last_ins->inst_offset) {
2217 if (ins->dreg == last_ins->dreg) {
2218 MONO_DELETE_INS (bb, ins);
2221 ins->opcode = OP_MOVE;
2222 ins->sreg1 = last_ins->dreg;
2225 //g_assert_not_reached ();
2229 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2230 * OP_LOAD_MEMBASE offset(basereg), reg
2232 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2233 * OP_ICONST reg, imm
2235 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2236 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2237 ins->inst_basereg == last_ins->inst_destbasereg &&
2238 ins->inst_offset == last_ins->inst_offset) {
2239 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2240 ins->opcode = OP_ICONST;
2241 ins->inst_c0 = last_ins->inst_imm;
2242 g_assert_not_reached (); // check this rule
2246 case OP_LOADU1_MEMBASE:
2247 case OP_LOADI1_MEMBASE:
2248 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2249 ins->inst_basereg == last_ins->inst_destbasereg &&
2250 ins->inst_offset == last_ins->inst_offset) {
2251 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2252 ins->sreg1 = last_ins->sreg1;
2255 case OP_LOADU2_MEMBASE:
2256 case OP_LOADI2_MEMBASE:
2257 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2258 ins->inst_basereg == last_ins->inst_destbasereg &&
2259 ins->inst_offset == last_ins->inst_offset) {
2260 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2261 ins->sreg1 = last_ins->sreg1;
2265 ins->opcode = OP_MOVE;
2269 if (ins->dreg == ins->sreg1) {
2270 MONO_DELETE_INS (bb, ins);
2274 * OP_MOVE sreg, dreg
2275 * OP_MOVE dreg, sreg
2277 if (last_ins && last_ins->opcode == OP_MOVE &&
2278 ins->sreg1 == last_ins->dreg &&
2279 ins->dreg == last_ins->sreg1) {
2280 MONO_DELETE_INS (bb, ins);
2288 bb->last_ins = last_ins;
2292 * the branch_cc_table should maintain the order of these
2306 branch_cc_table [] = {
2320 #define NEW_INS(cfg,dest,op) do { \
2321 MONO_INST_NEW ((cfg), (dest), (op)); \
2322 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2326 map_to_reg_reg_op (int op)
2335 case OP_COMPARE_IMM:
2337 case OP_ICOMPARE_IMM:
2351 case OP_LOAD_MEMBASE:
2352 return OP_LOAD_MEMINDEX;
2353 case OP_LOADI4_MEMBASE:
2354 return OP_LOADI4_MEMINDEX;
2355 case OP_LOADU4_MEMBASE:
2356 return OP_LOADU4_MEMINDEX;
2357 case OP_LOADU1_MEMBASE:
2358 return OP_LOADU1_MEMINDEX;
2359 case OP_LOADI2_MEMBASE:
2360 return OP_LOADI2_MEMINDEX;
2361 case OP_LOADU2_MEMBASE:
2362 return OP_LOADU2_MEMINDEX;
2363 case OP_LOADI1_MEMBASE:
2364 return OP_LOADI1_MEMINDEX;
2365 case OP_STOREI1_MEMBASE_REG:
2366 return OP_STOREI1_MEMINDEX;
2367 case OP_STOREI2_MEMBASE_REG:
2368 return OP_STOREI2_MEMINDEX;
2369 case OP_STOREI4_MEMBASE_REG:
2370 return OP_STOREI4_MEMINDEX;
2371 case OP_STORE_MEMBASE_REG:
2372 return OP_STORE_MEMINDEX;
2373 case OP_STORER4_MEMBASE_REG:
2374 return OP_STORER4_MEMINDEX;
2375 case OP_STORER8_MEMBASE_REG:
2376 return OP_STORER8_MEMINDEX;
2377 case OP_STORE_MEMBASE_IMM:
2378 return OP_STORE_MEMBASE_REG;
2379 case OP_STOREI1_MEMBASE_IMM:
2380 return OP_STOREI1_MEMBASE_REG;
2381 case OP_STOREI2_MEMBASE_IMM:
2382 return OP_STOREI2_MEMBASE_REG;
2383 case OP_STOREI4_MEMBASE_IMM:
2384 return OP_STOREI4_MEMBASE_REG;
2386 g_assert_not_reached ();
2390 * Remove from the instruction list the instructions that can't be
2391 * represented with very simple instructions with no register
2395 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2397 MonoInst *ins, *temp, *last_ins = NULL;
2398 int rot_amount, imm8, low_imm;
2400 MONO_BB_FOR_EACH_INS (bb, ins) {
2402 switch (ins->opcode) {
2406 case OP_COMPARE_IMM:
2407 case OP_ICOMPARE_IMM:
2421 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2422 NEW_INS (cfg, temp, OP_ICONST);
2423 temp->inst_c0 = ins->inst_imm;
2424 temp->dreg = mono_alloc_ireg (cfg);
2425 ins->sreg2 = temp->dreg;
2426 ins->opcode = mono_op_imm_to_op (ins->opcode);
2428 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2434 if (ins->inst_imm == 1) {
2435 ins->opcode = OP_MOVE;
2438 if (ins->inst_imm == 0) {
2439 ins->opcode = OP_ICONST;
2443 imm8 = mono_is_power_of_two (ins->inst_imm);
2445 ins->opcode = OP_SHL_IMM;
2446 ins->inst_imm = imm8;
2449 NEW_INS (cfg, temp, OP_ICONST);
2450 temp->inst_c0 = ins->inst_imm;
2451 temp->dreg = mono_alloc_ireg (cfg);
2452 ins->sreg2 = temp->dreg;
2453 ins->opcode = OP_IMUL;
2459 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2460 /* ARM sets the C flag to 1 if there was _no_ overflow */
2461 ins->next->opcode = OP_COND_EXC_NC;
2463 case OP_LOCALLOC_IMM:
2464 NEW_INS (cfg, temp, OP_ICONST);
2465 temp->inst_c0 = ins->inst_imm;
2466 temp->dreg = mono_alloc_ireg (cfg);
2467 ins->sreg1 = temp->dreg;
2468 ins->opcode = OP_LOCALLOC;
2470 case OP_LOAD_MEMBASE:
2471 case OP_LOADI4_MEMBASE:
2472 case OP_LOADU4_MEMBASE:
2473 case OP_LOADU1_MEMBASE:
2474 /* we can do two things: load the immed in a register
2475 * and use an indexed load, or see if the immed can be
2476 * represented as an ad_imm + a load with a smaller offset
2477 * that fits. We just do the first for now, optimize later.
2479 if (arm_is_imm12 (ins->inst_offset))
2481 NEW_INS (cfg, temp, OP_ICONST);
2482 temp->inst_c0 = ins->inst_offset;
2483 temp->dreg = mono_alloc_ireg (cfg);
2484 ins->sreg2 = temp->dreg;
2485 ins->opcode = map_to_reg_reg_op (ins->opcode);
2487 case OP_LOADI2_MEMBASE:
2488 case OP_LOADU2_MEMBASE:
2489 case OP_LOADI1_MEMBASE:
2490 if (arm_is_imm8 (ins->inst_offset))
2492 NEW_INS (cfg, temp, OP_ICONST);
2493 temp->inst_c0 = ins->inst_offset;
2494 temp->dreg = mono_alloc_ireg (cfg);
2495 ins->sreg2 = temp->dreg;
2496 ins->opcode = map_to_reg_reg_op (ins->opcode);
2498 case OP_LOADR4_MEMBASE:
2499 case OP_LOADR8_MEMBASE:
2500 if (arm_is_fpimm8 (ins->inst_offset))
2502 low_imm = ins->inst_offset & 0x1ff;
2503 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2504 NEW_INS (cfg, temp, OP_ADD_IMM);
2505 temp->inst_imm = ins->inst_offset & ~0x1ff;
2506 temp->sreg1 = ins->inst_basereg;
2507 temp->dreg = mono_alloc_ireg (cfg);
2508 ins->inst_basereg = temp->dreg;
2509 ins->inst_offset = low_imm;
2512 /* VFP/FPA doesn't have indexed load instructions */
2513 g_assert_not_reached ();
2515 case OP_STORE_MEMBASE_REG:
2516 case OP_STOREI4_MEMBASE_REG:
2517 case OP_STOREI1_MEMBASE_REG:
2518 if (arm_is_imm12 (ins->inst_offset))
2520 NEW_INS (cfg, temp, OP_ICONST);
2521 temp->inst_c0 = ins->inst_offset;
2522 temp->dreg = mono_alloc_ireg (cfg);
2523 ins->sreg2 = temp->dreg;
2524 ins->opcode = map_to_reg_reg_op (ins->opcode);
2526 case OP_STOREI2_MEMBASE_REG:
2527 if (arm_is_imm8 (ins->inst_offset))
2529 NEW_INS (cfg, temp, OP_ICONST);
2530 temp->inst_c0 = ins->inst_offset;
2531 temp->dreg = mono_alloc_ireg (cfg);
2532 ins->sreg2 = temp->dreg;
2533 ins->opcode = map_to_reg_reg_op (ins->opcode);
2535 case OP_STORER4_MEMBASE_REG:
2536 case OP_STORER8_MEMBASE_REG:
2537 if (arm_is_fpimm8 (ins->inst_offset))
2539 low_imm = ins->inst_offset & 0x1ff;
2540 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2541 NEW_INS (cfg, temp, OP_ADD_IMM);
2542 temp->inst_imm = ins->inst_offset & ~0x1ff;
2543 temp->sreg1 = ins->inst_destbasereg;
2544 temp->dreg = mono_alloc_ireg (cfg);
2545 ins->inst_destbasereg = temp->dreg;
2546 ins->inst_offset = low_imm;
2549 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2550 /* VFP/FPA doesn't have indexed store instructions */
2551 g_assert_not_reached ();
2553 case OP_STORE_MEMBASE_IMM:
2554 case OP_STOREI1_MEMBASE_IMM:
2555 case OP_STOREI2_MEMBASE_IMM:
2556 case OP_STOREI4_MEMBASE_IMM:
2557 NEW_INS (cfg, temp, OP_ICONST);
2558 temp->inst_c0 = ins->inst_imm;
2559 temp->dreg = mono_alloc_ireg (cfg);
2560 ins->sreg1 = temp->dreg;
2561 ins->opcode = map_to_reg_reg_op (ins->opcode);
2563 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2565 gboolean swap = FALSE;
2569 /* Optimized away */
2574 /* Some fp compares require swapped operands */
2575 switch (ins->next->opcode) {
2577 ins->next->opcode = OP_FBLT;
2581 ins->next->opcode = OP_FBLT_UN;
2585 ins->next->opcode = OP_FBGE;
2589 ins->next->opcode = OP_FBGE_UN;
2597 ins->sreg1 = ins->sreg2;
2606 bb->last_ins = last_ins;
2607 bb->max_vreg = cfg->next_vreg;
2611 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2615 if (long_ins->opcode == OP_LNEG) {
2617 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2618 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2624 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2626 /* sreg is a float, dreg is an integer reg */
2628 ARM_FIXZ (code, dreg, sreg);
2629 #elif defined(ARM_FPU_VFP)
2631 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2633 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2634 ARM_FMRS (code, dreg, ARM_VFP_F0);
2638 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2639 else if (size == 2) {
2640 ARM_SHL_IMM (code, dreg, dreg, 16);
2641 ARM_SHR_IMM (code, dreg, dreg, 16);
2645 ARM_SHL_IMM (code, dreg, dreg, 24);
2646 ARM_SAR_IMM (code, dreg, dreg, 24);
2647 } else if (size == 2) {
2648 ARM_SHL_IMM (code, dreg, dreg, 16);
2649 ARM_SAR_IMM (code, dreg, dreg, 16);
2655 #endif /* #ifndef DISABLE_JIT */
2659 const guchar *target;
2664 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2667 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2668 PatchData *pdata = (PatchData*)user_data;
2669 guchar *code = data;
2670 guint32 *thunks = data;
2671 guint32 *endthunks = (guint32*)(code + bsize);
2673 int difflow, diffhigh;
2675 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2676 difflow = (char*)pdata->code - (char*)thunks;
2677 diffhigh = (char*)pdata->code - (char*)endthunks;
2678 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2682 * The thunk is composed of 3 words:
2683 * load constant from thunks [2] into ARM_IP
2686 * Note that the LR register is already setup
2688 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2689 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2690 while (thunks < endthunks) {
2691 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2692 if (thunks [2] == (guint32)pdata->target) {
2693 arm_patch (pdata->code, (guchar*)thunks);
2694 mono_arch_flush_icache (pdata->code, 4);
2697 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2698 /* found a free slot instead: emit thunk */
2699 /* ARMREG_IP is fine to use since this can't be an IMT call
2702 code = (guchar*)thunks;
2703 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2704 if (thumb_supported)
2705 ARM_BX (code, ARMREG_IP);
2707 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2708 thunks [2] = (guint32)pdata->target;
2709 mono_arch_flush_icache ((guchar*)thunks, 12);
2711 arm_patch (pdata->code, (guchar*)thunks);
2712 mono_arch_flush_icache (pdata->code, 4);
2716 /* skip 12 bytes, the size of the thunk */
2720 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2726 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2731 domain = mono_domain_get ();
2734 pdata.target = target;
2735 pdata.absolute = absolute;
2738 mono_domain_lock (domain);
2739 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2742 /* this uses the first available slot */
2744 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2746 mono_domain_unlock (domain);
2748 if (pdata.found != 1)
2749 g_print ("thunk failed for %p from %p\n", target, code);
2750 g_assert (pdata.found == 1);
2754 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2756 guint32 *code32 = (void*)code;
2757 guint32 ins = *code32;
2758 guint32 prim = (ins >> 25) & 7;
2759 guint32 tval = GPOINTER_TO_UINT (target);
2761 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2762 if (prim == 5) { /* 101b */
2763 /* the diff starts 8 bytes from the branch opcode */
2764 gint diff = target - code - 8;
2766 gint tmask = 0xffffffff;
2767 if (tval & 1) { /* entering thumb mode */
2768 diff = target - 1 - code - 8;
2769 g_assert (thumb_supported);
2770 tbits = 0xf << 28; /* bl->blx bit pattern */
2771 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2772 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2776 tmask = ~(1 << 24); /* clear the link bit */
2777 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2782 if (diff <= 33554431) {
2784 ins = (ins & 0xff000000) | diff;
2786 *code32 = ins | tbits;
2790 /* diff between 0 and -33554432 */
2791 if (diff >= -33554432) {
2793 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2795 *code32 = ins | tbits;
2800 handle_thunk (domain, TRUE, code, target);
2805 * The alternative call sequences looks like this:
2807 * ldr ip, [pc] // loads the address constant
2808 * b 1f // jumps around the constant
2809 * address constant embedded in the code
2814 * There are two cases for patching:
2815 * a) at the end of method emission: in this case code points to the start
2816 * of the call sequence
2817 * b) during runtime patching of the call site: in this case code points
2818 * to the mov pc, ip instruction
2820 * We have to handle also the thunk jump code sequence:
2824 * address constant // execution never reaches here
2826 if ((ins & 0x0ffffff0) == 0x12fff10) {
2827 /* Branch and exchange: the address is constructed in a reg
2828 * We can patch BX when the code sequence is the following:
2829 * ldr ip, [pc, #0] ; 0x8
2836 guint8 *emit = (guint8*)ccode;
2837 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2839 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2840 ARM_BX (emit, ARMREG_IP);
2842 /*patching from magic trampoline*/
2843 if (ins == ccode [3]) {
2844 g_assert (code32 [-4] == ccode [0]);
2845 g_assert (code32 [-3] == ccode [1]);
2846 g_assert (code32 [-1] == ccode [2]);
2847 code32 [-2] = (guint32)target;
2850 /*patching from JIT*/
2851 if (ins == ccode [0]) {
2852 g_assert (code32 [1] == ccode [1]);
2853 g_assert (code32 [3] == ccode [2]);
2854 g_assert (code32 [4] == ccode [3]);
2855 code32 [2] = (guint32)target;
2858 g_assert_not_reached ();
2859 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2867 guint8 *emit = (guint8*)ccode;
2868 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2870 ARM_BLX_REG (emit, ARMREG_IP);
2872 g_assert (code32 [-3] == ccode [0]);
2873 g_assert (code32 [-2] == ccode [1]);
2874 g_assert (code32 [0] == ccode [2]);
2876 code32 [-1] = (guint32)target;
2879 guint32 *tmp = ccode;
2880 guint8 *emit = (guint8*)tmp;
2881 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2882 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2883 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2884 ARM_BX (emit, ARMREG_IP);
2885 if (ins == ccode [2]) {
2886 g_assert_not_reached (); // should be -2 ...
2887 code32 [-1] = (guint32)target;
2890 if (ins == ccode [0]) {
2891 /* handles both thunk jump code and the far call sequence */
2892 code32 [2] = (guint32)target;
2895 g_assert_not_reached ();
2897 // g_print ("patched with 0x%08x\n", ins);
2901 arm_patch (guchar *code, const guchar *target)
2903 arm_patch_general (NULL, code, target);
2907 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2908 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2909 * to be used with the emit macros.
2910 * Return -1 otherwise.
2913 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2916 for (i = 0; i < 31; i+= 2) {
2917 res = (val << (32 - i)) | (val >> i);
2920 *rot_amount = i? 32 - i: 0;
2927 * Emits in code a sequence of instructions that load the value 'val'
2928 * into the dreg register. Uses at most 4 instructions.
2931 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2933 int imm8, rot_amount;
2935 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2936 /* skip the constant pool */
2942 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2943 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2944 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2945 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2948 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2950 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2954 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2956 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2958 if (val & 0xFF0000) {
2959 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2961 if (val & 0xFF000000) {
2962 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2964 } else if (val & 0xFF00) {
2965 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2966 if (val & 0xFF0000) {
2967 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2969 if (val & 0xFF000000) {
2970 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2972 } else if (val & 0xFF0000) {
2973 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2974 if (val & 0xFF000000) {
2975 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2978 //g_assert_not_reached ();
2984 mono_arm_thumb_supported (void)
2986 return thumb_supported;
2992 * emit_load_volatile_arguments:
2994 * Load volatile arguments from the stack to the original input registers.
2995 * Required before a tail call.
2998 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
3000 MonoMethod *method = cfg->method;
3001 MonoMethodSignature *sig;
3006 /* FIXME: Generate intermediate code instead */
3008 sig = mono_method_signature (method);
3010 /* This is the opposite of the code in emit_prolog */
3014 cinfo = get_call_info (NULL, sig, sig->pinvoke);
3016 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3017 ArgInfo *ainfo = &cinfo->ret;
3018 inst = cfg->vret_addr;
3019 g_assert (arm_is_imm12 (inst->inst_offset));
3020 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3022 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3023 ArgInfo *ainfo = cinfo->args + i;
3024 inst = cfg->args [pos];
3026 if (cfg->verbose_level > 2)
3027 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3028 if (inst->opcode == OP_REGVAR) {
3029 if (ainfo->storage == RegTypeGeneral)
3030 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3031 else if (ainfo->storage == RegTypeFP) {
3032 g_assert_not_reached ();
3033 } else if (ainfo->storage == RegTypeBase) {
3037 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3038 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3040 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3041 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3045 g_assert_not_reached ();
3047 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3048 switch (ainfo->size) {
3055 g_assert (arm_is_imm12 (inst->inst_offset));
3056 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3057 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3058 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3061 if (arm_is_imm12 (inst->inst_offset)) {
3062 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3064 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3065 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3069 } else if (ainfo->storage == RegTypeBaseGen) {
3072 } else if (ainfo->storage == RegTypeBase) {
3074 } else if (ainfo->storage == RegTypeFP) {
3075 g_assert_not_reached ();
3076 } else if (ainfo->storage == RegTypeStructByVal) {
3077 int doffset = inst->inst_offset;
3081 if (mono_class_from_mono_type (inst->inst_vtype))
3082 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3083 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3084 if (arm_is_imm12 (doffset)) {
3085 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3087 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3088 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3090 soffset += sizeof (gpointer);
3091 doffset += sizeof (gpointer);
3096 } else if (ainfo->storage == RegTypeStructByAddr) {
3111 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3116 guint8 *code = cfg->native_code + cfg->code_len;
3117 MonoInst *last_ins = NULL;
3118 guint last_offset = 0;
3120 int imm8, rot_amount;
3122 /* we don't align basic blocks of loops on arm */
3124 if (cfg->verbose_level > 2)
3125 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3127 cpos = bb->max_offset;
3129 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3130 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3131 //g_assert (!mono_compile_aot);
3134 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3135 /* this is not thread save, but good enough */
3136 /* fixme: howto handle overflows? */
3137 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3140 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3141 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3142 (gpointer)"mono_break");
3143 code = emit_call_seq (cfg, code);
3146 MONO_BB_FOR_EACH_INS (bb, ins) {
3147 offset = code - cfg->native_code;
3149 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3151 if (offset > (cfg->code_size - max_len - 16)) {
3152 cfg->code_size *= 2;
3153 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3154 code = cfg->native_code + offset;
3156 // if (ins->cil_code)
3157 // g_print ("cil code\n");
3158 mono_debug_record_line_number (cfg, ins, offset);
3160 switch (ins->opcode) {
3161 case OP_MEMORY_BARRIER:
3164 #ifdef HAVE_AEABI_READ_TP
3165 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3166 (gpointer)"__aeabi_read_tp");
3167 code = emit_call_seq (cfg, code);
3169 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3171 g_assert_not_reached ();
3175 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3176 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3179 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3180 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3182 case OP_STOREI1_MEMBASE_IMM:
3183 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3184 g_assert (arm_is_imm12 (ins->inst_offset));
3185 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3187 case OP_STOREI2_MEMBASE_IMM:
3188 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3189 g_assert (arm_is_imm8 (ins->inst_offset));
3190 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3192 case OP_STORE_MEMBASE_IMM:
3193 case OP_STOREI4_MEMBASE_IMM:
3194 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3195 g_assert (arm_is_imm12 (ins->inst_offset));
3196 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3198 case OP_STOREI1_MEMBASE_REG:
3199 g_assert (arm_is_imm12 (ins->inst_offset));
3200 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3202 case OP_STOREI2_MEMBASE_REG:
3203 g_assert (arm_is_imm8 (ins->inst_offset));
3204 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3206 case OP_STORE_MEMBASE_REG:
3207 case OP_STOREI4_MEMBASE_REG:
3208 /* this case is special, since it happens for spill code after lowering has been called */
3209 if (arm_is_imm12 (ins->inst_offset)) {
3210 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3212 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3213 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3216 case OP_STOREI1_MEMINDEX:
3217 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3219 case OP_STOREI2_MEMINDEX:
3220 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3222 case OP_STORE_MEMINDEX:
3223 case OP_STOREI4_MEMINDEX:
3224 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3227 g_assert_not_reached ();
3229 case OP_LOAD_MEMINDEX:
3230 case OP_LOADI4_MEMINDEX:
3231 case OP_LOADU4_MEMINDEX:
3232 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3234 case OP_LOADI1_MEMINDEX:
3235 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3237 case OP_LOADU1_MEMINDEX:
3238 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3240 case OP_LOADI2_MEMINDEX:
3241 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3243 case OP_LOADU2_MEMINDEX:
3244 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3246 case OP_LOAD_MEMBASE:
3247 case OP_LOADI4_MEMBASE:
3248 case OP_LOADU4_MEMBASE:
3249 /* this case is special, since it happens for spill code after lowering has been called */
3250 if (arm_is_imm12 (ins->inst_offset)) {
3251 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3253 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3254 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3257 case OP_LOADI1_MEMBASE:
3258 g_assert (arm_is_imm8 (ins->inst_offset));
3259 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3261 case OP_LOADU1_MEMBASE:
3262 g_assert (arm_is_imm12 (ins->inst_offset));
3263 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3265 case OP_LOADU2_MEMBASE:
3266 g_assert (arm_is_imm8 (ins->inst_offset));
3267 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3269 case OP_LOADI2_MEMBASE:
3270 g_assert (arm_is_imm8 (ins->inst_offset));
3271 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3273 case OP_ICONV_TO_I1:
3274 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3275 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3277 case OP_ICONV_TO_I2:
3278 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3279 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3281 case OP_ICONV_TO_U1:
3282 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3284 case OP_ICONV_TO_U2:
3285 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3286 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3290 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3292 case OP_COMPARE_IMM:
3293 case OP_ICOMPARE_IMM:
3294 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3295 g_assert (imm8 >= 0);
3296 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3300 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3301 * So instead of emitting a trap, we emit a call a C function and place a
3304 //*(int*)code = 0xef9f0001;
3307 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3308 (gpointer)"mono_break");
3309 code = emit_call_seq (cfg, code);
3311 case OP_RELAXED_NOP:
3316 case OP_DUMMY_STORE:
3317 case OP_NOT_REACHED:
3320 case OP_SEQ_POINT: {
3322 MonoInst *info_var = cfg->arch.seq_point_info_var;
3323 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3325 int dreg = ARMREG_LR;
3328 * For AOT, we use one got slot per method, which will point to a
3329 * SeqPointInfo structure, containing all the information required
3330 * by the code below.
3332 if (cfg->compile_aot) {
3333 g_assert (info_var);
3334 g_assert (info_var->opcode == OP_REGOFFSET);
3335 g_assert (arm_is_imm12 (info_var->inst_offset));
3339 * Read from the single stepping trigger page. This will cause a
3340 * SIGSEGV when single stepping is enabled.
3341 * We do this _before_ the breakpoint, so single stepping after
3342 * a breakpoint is hit will step to the next IL offset.
3344 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3346 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3347 if (cfg->compile_aot) {
3348 /* Load the trigger page addr from the variable initialized in the prolog */
3349 var = ss_trigger_page_var;
3351 g_assert (var->opcode == OP_REGOFFSET);
3352 g_assert (arm_is_imm12 (var->inst_offset));
3353 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3355 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3357 *(int*)code = (int)ss_trigger_page;
3360 ARM_LDR_IMM (code, dreg, dreg, 0);
3363 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3365 if (cfg->compile_aot) {
3366 guint32 offset = code - cfg->native_code;
3369 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3370 /* Add the offset */
3371 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3372 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3374 * Have to emit nops to keep the difference between the offset
3375 * stored in seq_points and breakpoint instruction constant,
3376 * mono_arch_get_ip_for_breakpoint () depends on this.
3379 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3383 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3386 g_assert (!(val & 0xFF000000));
3387 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3388 ARM_LDR_IMM (code, dreg, dreg, 0);
3390 /* What is faster, a branch or a load ? */
3391 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3392 /* The breakpoint instruction */
3393 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3396 * A placeholder for a possible breakpoint inserted by
3397 * mono_arch_set_breakpoint ().
3399 for (i = 0; i < 4; ++i)
3406 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3409 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3413 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3416 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3417 g_assert (imm8 >= 0);
3418 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3422 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3423 g_assert (imm8 >= 0);
3424 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3428 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3429 g_assert (imm8 >= 0);
3430 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3433 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3434 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3436 case OP_IADD_OVF_UN:
3437 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3438 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3441 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3442 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3444 case OP_ISUB_OVF_UN:
3445 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3446 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3448 case OP_ADD_OVF_CARRY:
3449 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3450 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3452 case OP_ADD_OVF_UN_CARRY:
3453 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3454 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3456 case OP_SUB_OVF_CARRY:
3457 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3458 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3460 case OP_SUB_OVF_UN_CARRY:
3461 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3462 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3466 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3469 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3470 g_assert (imm8 >= 0);
3471 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3474 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3482 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3483 g_assert (imm8 >= 0);
3484 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3488 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3489 g_assert (imm8 >= 0);
3490 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3492 case OP_ARM_RSBS_IMM:
3493 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3494 g_assert (imm8 >= 0);
3495 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3497 case OP_ARM_RSC_IMM:
3498 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3499 g_assert (imm8 >= 0);
3500 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3503 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3507 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3508 g_assert (imm8 >= 0);
3509 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3517 /* crappy ARM arch doesn't have a DIV instruction */
3518 g_assert_not_reached ();
3520 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3524 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3525 g_assert (imm8 >= 0);
3526 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3529 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3533 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3534 g_assert (imm8 >= 0);
3535 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3538 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3543 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3544 else if (ins->dreg != ins->sreg1)
3545 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3548 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3553 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3554 else if (ins->dreg != ins->sreg1)
3555 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3558 case OP_ISHR_UN_IMM:
3560 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3561 else if (ins->dreg != ins->sreg1)
3562 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3565 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3568 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3571 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3574 if (ins->dreg == ins->sreg2)
3575 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3577 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3580 g_assert_not_reached ();
3583 /* FIXME: handle ovf/ sreg2 != dreg */
3584 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3585 /* FIXME: MUL doesn't set the C/O flags on ARM */
3587 case OP_IMUL_OVF_UN:
3588 /* FIXME: handle ovf/ sreg2 != dreg */
3589 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3590 /* FIXME: MUL doesn't set the C/O flags on ARM */
3593 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3596 /* Load the GOT offset */
3597 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3598 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3600 *(gpointer*)code = NULL;
3602 /* Load the value from the GOT */
3603 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3605 case OP_ICONV_TO_I4:
3606 case OP_ICONV_TO_U4:
3608 if (ins->dreg != ins->sreg1)
3609 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3612 int saved = ins->sreg2;
3613 if (ins->sreg2 == ARM_LSW_REG) {
3614 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3617 if (ins->sreg1 != ARM_LSW_REG)
3618 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3619 if (saved != ARM_MSW_REG)
3620 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3625 ARM_MVFD (code, ins->dreg, ins->sreg1);
3626 #elif defined(ARM_FPU_VFP)
3627 ARM_CPYD (code, ins->dreg, ins->sreg1);
3630 case OP_FCONV_TO_R4:
3632 ARM_MVFS (code, ins->dreg, ins->sreg1);
3633 #elif defined(ARM_FPU_VFP)
3634 ARM_CVTD (code, ins->dreg, ins->sreg1);
3635 ARM_CVTS (code, ins->dreg, ins->dreg);
3640 * Keep in sync with mono_arch_emit_epilog
3642 g_assert (!cfg->method->save_lmf);
3644 code = emit_load_volatile_arguments (cfg, code);
3646 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3647 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3648 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3649 if (cfg->compile_aot) {
3650 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3652 *(gpointer*)code = NULL;
3654 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3660 /* ensure ins->sreg1 is not NULL */
3661 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3664 g_assert (cfg->sig_cookie < 128);
3665 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3666 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3675 call = (MonoCallInst*)ins;
3676 if (ins->flags & MONO_INST_HAS_METHOD)
3677 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3679 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3680 code = emit_call_seq (cfg, code);
3681 code = emit_move_return_value (cfg, ins, code);
3687 case OP_VOIDCALL_REG:
3689 code = emit_call_reg (code, ins->sreg1);
3690 code = emit_move_return_value (cfg, ins, code);
3692 case OP_FCALL_MEMBASE:
3693 case OP_LCALL_MEMBASE:
3694 case OP_VCALL_MEMBASE:
3695 case OP_VCALL2_MEMBASE:
3696 case OP_VOIDCALL_MEMBASE:
3697 case OP_CALL_MEMBASE:
3698 g_assert (arm_is_imm12 (ins->inst_offset));
3699 g_assert (ins->sreg1 != ARMREG_LR);
3700 call = (MonoCallInst*)ins;
3701 if (call->dynamic_imt_arg || call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3702 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3703 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3705 * We can't embed the method in the code stream in PIC code, or
3707 * Instead, we put it in V5 in code emitted by
3708 * mono_arch_emit_imt_argument (), and embed NULL here to
3709 * signal the IMT thunk that the value is in V5.
3711 if (call->dynamic_imt_arg)
3712 *((gpointer*)code) = NULL;
3714 *((gpointer*)code) = (gpointer)call->method;
3717 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3718 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3720 code = emit_move_return_value (cfg, ins, code);
3723 /* keep alignment */
3724 int alloca_waste = cfg->param_area;
3727 /* round the size to 8 bytes */
3728 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3729 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3731 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3732 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3733 /* memzero the area: dreg holds the size, sp is the pointer */
3734 if (ins->flags & MONO_INST_INIT) {
3735 guint8 *start_loop, *branch_to_cond;
3736 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3737 branch_to_cond = code;
3740 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3741 arm_patch (branch_to_cond, code);
3742 /* decrement by 4 and set flags */
3743 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3744 ARM_B_COND (code, ARMCOND_GE, 0);
3745 arm_patch (code - 4, start_loop);
3747 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3752 MonoInst *var = cfg->dyn_call_var;
3754 g_assert (var->opcode == OP_REGOFFSET);
3755 g_assert (arm_is_imm12 (var->inst_offset));
3757 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3758 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3760 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3762 /* Save args buffer */
3763 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3765 /* Set stack slots using R0 as scratch reg */
3766 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3767 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3768 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3769 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3772 /* Set argument registers */
3773 for (i = 0; i < PARAM_REGS; ++i)
3774 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3777 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3778 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3781 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3782 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3783 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3787 if (ins->sreg1 != ARMREG_R0)
3788 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3789 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3790 (gpointer)"mono_arch_throw_exception");
3791 code = emit_call_seq (cfg, code);
3795 if (ins->sreg1 != ARMREG_R0)
3796 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3797 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3798 (gpointer)"mono_arch_rethrow_exception");
3799 code = emit_call_seq (cfg, code);
3802 case OP_START_HANDLER: {
3803 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3805 if (arm_is_imm12 (spvar->inst_offset)) {
3806 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3808 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3809 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3813 case OP_ENDFILTER: {
3814 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3816 if (ins->sreg1 != ARMREG_R0)
3817 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3818 if (arm_is_imm12 (spvar->inst_offset)) {
3819 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3821 g_assert (ARMREG_IP != spvar->inst_basereg);
3822 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3823 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3825 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3828 case OP_ENDFINALLY: {
3829 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3831 if (arm_is_imm12 (spvar->inst_offset)) {
3832 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3834 g_assert (ARMREG_IP != spvar->inst_basereg);
3835 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3836 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3838 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3841 case OP_CALL_HANDLER:
3842 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3844 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3847 ins->inst_c0 = code - cfg->native_code;
3850 /*if (ins->inst_target_bb->native_offset) {
3852 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3854 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3859 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3863 * In the normal case we have:
3864 * ldr pc, [pc, ins->sreg1 << 2]
3867 * ldr lr, [pc, ins->sreg1 << 2]
3869 * After follows the data.
3870 * FIXME: add aot support.
3872 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3873 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3874 if (offset > (cfg->code_size - max_len - 16)) {
3875 cfg->code_size += max_len;
3876 cfg->code_size *= 2;
3877 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3878 code = cfg->native_code + offset;
3880 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3882 code += 4 * GPOINTER_TO_INT (ins->klass);
3886 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3887 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3891 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3892 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3896 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3897 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3901 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3902 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3906 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3907 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3909 case OP_COND_EXC_EQ:
3910 case OP_COND_EXC_NE_UN:
3911 case OP_COND_EXC_LT:
3912 case OP_COND_EXC_LT_UN:
3913 case OP_COND_EXC_GT:
3914 case OP_COND_EXC_GT_UN:
3915 case OP_COND_EXC_GE:
3916 case OP_COND_EXC_GE_UN:
3917 case OP_COND_EXC_LE:
3918 case OP_COND_EXC_LE_UN:
3919 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3921 case OP_COND_EXC_IEQ:
3922 case OP_COND_EXC_INE_UN:
3923 case OP_COND_EXC_ILT:
3924 case OP_COND_EXC_ILT_UN:
3925 case OP_COND_EXC_IGT:
3926 case OP_COND_EXC_IGT_UN:
3927 case OP_COND_EXC_IGE:
3928 case OP_COND_EXC_IGE_UN:
3929 case OP_COND_EXC_ILE:
3930 case OP_COND_EXC_ILE_UN:
3931 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3934 case OP_COND_EXC_IC:
3935 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3937 case OP_COND_EXC_OV:
3938 case OP_COND_EXC_IOV:
3939 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3941 case OP_COND_EXC_NC:
3942 case OP_COND_EXC_INC:
3943 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3945 case OP_COND_EXC_NO:
3946 case OP_COND_EXC_INO:
3947 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3959 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3962 /* floating point opcodes */
3965 if (cfg->compile_aot) {
3966 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3968 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3970 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3973 /* FIXME: we can optimize the imm load by dealing with part of
3974 * the displacement in LDFD (aligning to 512).
3976 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3977 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3981 if (cfg->compile_aot) {
3982 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3984 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3987 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3988 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3991 case OP_STORER8_MEMBASE_REG:
3992 /* This is generated by the local regalloc pass which runs after the lowering pass */
3993 if (!arm_is_fpimm8 (ins->inst_offset)) {
3994 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3995 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3996 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3998 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4001 case OP_LOADR8_MEMBASE:
4002 /* This is generated by the local regalloc pass which runs after the lowering pass */
4003 if (!arm_is_fpimm8 (ins->inst_offset)) {
4004 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4005 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4006 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
4008 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4011 case OP_STORER4_MEMBASE_REG:
4012 g_assert (arm_is_fpimm8 (ins->inst_offset));
4013 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4015 case OP_LOADR4_MEMBASE:
4016 g_assert (arm_is_fpimm8 (ins->inst_offset));
4017 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4019 case OP_ICONV_TO_R_UN: {
4021 tmpreg = ins->dreg == 0? 1: 0;
4022 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4023 ARM_FLTD (code, ins->dreg, ins->sreg1);
4024 ARM_B_COND (code, ARMCOND_GE, 8);
4025 /* save the temp register */
4026 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4027 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
4028 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
4029 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
4030 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
4031 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4032 /* skip the constant pool */
4035 *(int*)code = 0x41f00000;
4040 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4041 * adfltd fdest, fdest, ftemp
4045 case OP_ICONV_TO_R4:
4046 ARM_FLTS (code, ins->dreg, ins->sreg1);
4048 case OP_ICONV_TO_R8:
4049 ARM_FLTD (code, ins->dreg, ins->sreg1);
4052 #elif defined(ARM_FPU_VFP)
4055 if (cfg->compile_aot) {
4056 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4058 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4060 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4063 /* FIXME: we can optimize the imm load by dealing with part of
4064 * the displacement in LDFD (aligning to 512).
4066 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4067 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4071 if (cfg->compile_aot) {
4072 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4074 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4076 ARM_CVTS (code, ins->dreg, ins->dreg);
4078 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4079 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4080 ARM_CVTS (code, ins->dreg, ins->dreg);
4083 case OP_STORER8_MEMBASE_REG:
4084 /* This is generated by the local regalloc pass which runs after the lowering pass */
4085 if (!arm_is_fpimm8 (ins->inst_offset)) {
4086 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4087 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4088 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4090 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4093 case OP_LOADR8_MEMBASE:
4094 /* This is generated by the local regalloc pass which runs after the lowering pass */
4095 if (!arm_is_fpimm8 (ins->inst_offset)) {
4096 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4097 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4098 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4100 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4103 case OP_STORER4_MEMBASE_REG:
4104 g_assert (arm_is_fpimm8 (ins->inst_offset));
4105 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4106 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4108 case OP_LOADR4_MEMBASE:
4109 g_assert (arm_is_fpimm8 (ins->inst_offset));
4110 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4111 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4113 case OP_ICONV_TO_R_UN: {
4114 g_assert_not_reached ();
4117 case OP_ICONV_TO_R4:
4118 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4119 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4120 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4122 case OP_ICONV_TO_R8:
4123 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4124 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4128 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4129 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4130 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4132 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4138 case OP_FCONV_TO_I1:
4139 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4141 case OP_FCONV_TO_U1:
4142 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4144 case OP_FCONV_TO_I2:
4145 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4147 case OP_FCONV_TO_U2:
4148 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4150 case OP_FCONV_TO_I4:
4152 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4154 case OP_FCONV_TO_U4:
4156 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4158 case OP_FCONV_TO_I8:
4159 case OP_FCONV_TO_U8:
4160 g_assert_not_reached ();
4161 /* Implemented as helper calls */
4163 case OP_LCONV_TO_R_UN:
4164 g_assert_not_reached ();
4165 /* Implemented as helper calls */
4167 case OP_LCONV_TO_OVF_I4_2: {
4168 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4170 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4173 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4174 high_bit_not_set = code;
4175 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4177 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4178 valid_negative = code;
4179 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4180 invalid_negative = code;
4181 ARM_B_COND (code, ARMCOND_AL, 0);
4183 arm_patch (high_bit_not_set, code);
4185 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4186 valid_positive = code;
4187 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4189 arm_patch (invalid_negative, code);
4190 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4192 arm_patch (valid_negative, code);
4193 arm_patch (valid_positive, code);
4195 if (ins->dreg != ins->sreg1)
4196 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4201 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4204 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4207 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4210 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4213 ARM_MNFD (code, ins->dreg, ins->sreg1);
4215 #elif defined(ARM_FPU_VFP)
4217 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4220 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4223 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4226 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4229 ARM_NEGD (code, ins->dreg, ins->sreg1);
4234 g_assert_not_reached ();
4238 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4239 #elif defined(ARM_FPU_VFP)
4240 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4246 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4247 #elif defined(ARM_FPU_VFP)
4248 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4251 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4252 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4256 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4257 #elif defined(ARM_FPU_VFP)
4258 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4261 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4262 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4266 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4267 #elif defined(ARM_FPU_VFP)
4268 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4271 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4272 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4273 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4278 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4279 #elif defined(ARM_FPU_VFP)
4280 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4283 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4284 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4289 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4290 #elif defined(ARM_FPU_VFP)
4291 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4294 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4295 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4296 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4298 /* ARM FPA flags table:
4299 * N Less than ARMCOND_MI
4300 * Z Equal ARMCOND_EQ
4301 * C Greater Than or Equal ARMCOND_CS
4302 * V Unordered ARMCOND_VS
4305 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4308 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4311 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4314 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4315 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4321 g_assert_not_reached ();
4325 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4327 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4328 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4329 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4333 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4334 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4339 if (ins->dreg != ins->sreg1)
4340 ARM_MVFD (code, ins->dreg, ins->sreg1);
4341 #elif defined(ARM_FPU_VFP)
4342 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4343 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4345 *(guint32*)code = 0xffffffff;
4347 *(guint32*)code = 0x7fefffff;
4349 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4351 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4352 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4354 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4356 ARM_CPYD (code, ins->dreg, ins->sreg1);
4361 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4362 g_assert_not_reached ();
4365 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4366 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4367 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4368 g_assert_not_reached ();
4374 last_offset = offset;
4377 cfg->code_len = code - cfg->native_code;
4380 #endif /* DISABLE_JIT */
4382 #ifdef HAVE_AEABI_READ_TP
4383 void __aeabi_read_tp (void);
4387 mono_arch_register_lowlevel_calls (void)
4389 /* The signature doesn't matter */
4390 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4391 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4393 #ifndef MONO_CROSS_COMPILE
4394 #ifdef HAVE_AEABI_READ_TP
4395 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4400 #define patch_lis_ori(ip,val) do {\
4401 guint16 *__lis_ori = (guint16*)(ip); \
4402 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4403 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4407 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4409 MonoJumpInfo *patch_info;
4410 gboolean compile_aot = !run_cctors;
4412 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4413 unsigned char *ip = patch_info->ip.i + code;
4414 const unsigned char *target;
4416 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4417 gpointer *jt = (gpointer*)(ip + 8);
4419 /* jt is the inlined jump table, 2 instructions after ip
4420 * In the normal case we store the absolute addresses,
4421 * otherwise the displacements.
4423 for (i = 0; i < patch_info->data.table->table_size; i++)
4424 jt [i] = code + (int)patch_info->data.table->table [i];
4427 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4430 switch (patch_info->type) {
4431 case MONO_PATCH_INFO_BB:
4432 case MONO_PATCH_INFO_LABEL:
4435 /* No need to patch these */
4440 switch (patch_info->type) {
4441 case MONO_PATCH_INFO_IP:
4442 g_assert_not_reached ();
4443 patch_lis_ori (ip, ip);
4445 case MONO_PATCH_INFO_METHOD_REL:
4446 g_assert_not_reached ();
4447 *((gpointer *)(ip)) = code + patch_info->data.offset;
4449 case MONO_PATCH_INFO_METHODCONST:
4450 case MONO_PATCH_INFO_CLASS:
4451 case MONO_PATCH_INFO_IMAGE:
4452 case MONO_PATCH_INFO_FIELD:
4453 case MONO_PATCH_INFO_VTABLE:
4454 case MONO_PATCH_INFO_IID:
4455 case MONO_PATCH_INFO_SFLDA:
4456 case MONO_PATCH_INFO_LDSTR:
4457 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4458 case MONO_PATCH_INFO_LDTOKEN:
4459 g_assert_not_reached ();
4460 /* from OP_AOTCONST : lis + ori */
4461 patch_lis_ori (ip, target);
4463 case MONO_PATCH_INFO_R4:
4464 case MONO_PATCH_INFO_R8:
4465 g_assert_not_reached ();
4466 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4468 case MONO_PATCH_INFO_EXC_NAME:
4469 g_assert_not_reached ();
4470 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4472 case MONO_PATCH_INFO_NONE:
4473 case MONO_PATCH_INFO_BB_OVF:
4474 case MONO_PATCH_INFO_EXC_OVF:
4475 /* everything is dealt with at epilog output time */
4480 arm_patch_general (domain, ip, target);
4487 * Stack frame layout:
4489 * ------------------- fp
4490 * MonoLMF structure or saved registers
4491 * -------------------
4493 * -------------------
4495 * -------------------
4496 * optional 8 bytes for tracing
4497 * -------------------
4498 * param area size is cfg->param_area
4499 * ------------------- sp
4502 mono_arch_emit_prolog (MonoCompile *cfg)
4504 MonoMethod *method = cfg->method;
4506 MonoMethodSignature *sig;
4508 int alloc_size, pos, max_offset, i, rot_amount;
4513 int prev_sp_offset, reg_offset;
4515 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4518 sig = mono_method_signature (method);
4519 cfg->code_size = 256 + sig->param_count * 20;
4520 code = cfg->native_code = g_malloc (cfg->code_size);
4522 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4524 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4526 alloc_size = cfg->stack_offset;
4529 if (!method->save_lmf) {
4530 /* We save SP by storing it into IP and saving IP */
4531 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4532 prev_sp_offset = 8; /* ip and lr */
4533 for (i = 0; i < 16; ++i) {
4534 if (cfg->used_int_regs & (1 << i))
4535 prev_sp_offset += 4;
4537 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4539 for (i = 0; i < 16; ++i) {
4540 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4541 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4546 ARM_PUSH (code, 0x5ff0);
4547 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4548 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4550 for (i = 0; i < 16; ++i) {
4551 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4552 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4556 pos += sizeof (MonoLMF) - prev_sp_offset;
4560 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4561 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4562 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4563 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4566 /* the stack used in the pushed regs */
4567 if (prev_sp_offset & 4)
4569 cfg->stack_usage = alloc_size;
4571 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4572 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4574 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4575 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4577 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4579 if (cfg->frame_reg != ARMREG_SP) {
4580 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4581 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4583 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4584 prev_sp_offset += alloc_size;
4586 /* compute max_offset in order to use short forward jumps
4587 * we could skip do it on arm because the immediate displacement
4588 * for jumps is large enough, it may be useful later for constant pools
4591 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4592 MonoInst *ins = bb->code;
4593 bb->max_offset = max_offset;
4595 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4598 MONO_BB_FOR_EACH_INS (bb, ins)
4599 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4602 /* store runtime generic context */
4603 if (cfg->rgctx_var) {
4604 MonoInst *ins = cfg->rgctx_var;
4606 g_assert (ins->opcode == OP_REGOFFSET);
4608 if (arm_is_imm12 (ins->inst_offset)) {
4609 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4611 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4612 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4616 /* load arguments allocated to register from the stack */
4619 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4621 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4622 ArgInfo *ainfo = &cinfo->ret;
4623 inst = cfg->vret_addr;
4624 g_assert (arm_is_imm12 (inst->inst_offset));
4625 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4628 if (sig->call_convention == MONO_CALL_VARARG) {
4629 ArgInfo *cookie = &cinfo->sig_cookie;
4631 /* Save the sig cookie address */
4632 g_assert (cookie->storage == RegTypeBase);
4634 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4635 g_assert (arm_is_imm12 (cfg->sig_cookie));
4636 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4637 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4640 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4641 ArgInfo *ainfo = cinfo->args + i;
4642 inst = cfg->args [pos];
4644 if (cfg->verbose_level > 2)
4645 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4646 if (inst->opcode == OP_REGVAR) {
4647 if (ainfo->storage == RegTypeGeneral)
4648 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4649 else if (ainfo->storage == RegTypeFP) {
4650 g_assert_not_reached ();
4651 } else if (ainfo->storage == RegTypeBase) {
4652 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4653 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4655 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4656 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4659 g_assert_not_reached ();
4661 if (cfg->verbose_level > 2)
4662 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4664 /* the argument should be put on the stack: FIXME handle size != word */
4665 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4666 switch (ainfo->size) {
4668 if (arm_is_imm12 (inst->inst_offset))
4669 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4671 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4672 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4676 if (arm_is_imm8 (inst->inst_offset)) {
4677 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4679 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4680 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4684 g_assert (arm_is_imm12 (inst->inst_offset));
4685 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4686 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4687 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4690 if (arm_is_imm12 (inst->inst_offset)) {
4691 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4693 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4694 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4698 } else if (ainfo->storage == RegTypeBaseGen) {
4699 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4700 g_assert (arm_is_imm12 (inst->inst_offset));
4701 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4702 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4703 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4704 } else if (ainfo->storage == RegTypeBase) {
4705 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4706 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4708 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4709 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4712 switch (ainfo->size) {
4714 if (arm_is_imm8 (inst->inst_offset)) {
4715 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4717 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4718 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4722 if (arm_is_imm8 (inst->inst_offset)) {
4723 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4725 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4726 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4730 if (arm_is_imm12 (inst->inst_offset)) {
4731 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4733 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4734 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4736 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4737 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4739 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4740 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4742 if (arm_is_imm12 (inst->inst_offset + 4)) {
4743 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4745 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4746 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4750 if (arm_is_imm12 (inst->inst_offset)) {
4751 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4753 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4754 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4758 } else if (ainfo->storage == RegTypeFP) {
4759 g_assert_not_reached ();
4760 } else if (ainfo->storage == RegTypeStructByVal) {
4761 int doffset = inst->inst_offset;
4765 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4766 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4767 if (arm_is_imm12 (doffset)) {
4768 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4770 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4771 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4773 soffset += sizeof (gpointer);
4774 doffset += sizeof (gpointer);
4776 if (ainfo->vtsize) {
4777 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4778 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4779 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4781 } else if (ainfo->storage == RegTypeStructByAddr) {
4782 g_assert_not_reached ();
4783 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4784 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4786 g_assert_not_reached ();
4791 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4792 if (cfg->compile_aot)
4793 /* AOT code is only used in the root domain */
4794 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4796 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4797 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4798 (gpointer)"mono_jit_thread_attach");
4799 code = emit_call_seq (cfg, code);
4802 if (method->save_lmf) {
4803 gboolean get_lmf_fast = FALSE;
4805 #ifdef HAVE_AEABI_READ_TP
4806 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4808 if (lmf_addr_tls_offset != -1) {
4809 get_lmf_fast = TRUE;
4811 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4812 (gpointer)"__aeabi_read_tp");
4813 code = emit_call_seq (cfg, code);
4815 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4816 get_lmf_fast = TRUE;
4819 if (!get_lmf_fast) {
4820 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4821 (gpointer)"mono_get_lmf_addr");
4822 code = emit_call_seq (cfg, code);
4824 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4825 /* lmf_offset is the offset from the previous stack pointer,
4826 * alloc_size is the total stack space allocated, so the offset
4827 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4828 * The pointer to the struct is put in r1 (new_lmf).
4829 * r2 is used as scratch
4830 * The callee-saved registers are already in the MonoLMF structure
4832 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4833 /* r0 is the result from mono_get_lmf_addr () */
4834 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4835 /* new_lmf->previous_lmf = *lmf_addr */
4836 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4837 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4838 /* *(lmf_addr) = r1 */
4839 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4840 /* Skip method (only needed for trampoline LMF frames) */
4841 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4842 /* save the current IP */
4843 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4844 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4848 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4850 if (cfg->arch.seq_point_info_var) {
4851 MonoInst *ins = cfg->arch.seq_point_info_var;
4853 /* Initialize the variable from a GOT slot */
4854 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4855 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4857 *(gpointer*)code = NULL;
4859 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4861 g_assert (ins->opcode == OP_REGOFFSET);
4863 if (arm_is_imm12 (ins->inst_offset)) {
4864 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4866 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4867 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4871 /* Initialize ss_trigger_page_var */
4873 MonoInst *info_var = cfg->arch.seq_point_info_var;
4874 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4875 int dreg = ARMREG_LR;
4878 g_assert (info_var->opcode == OP_REGOFFSET);
4879 g_assert (arm_is_imm12 (info_var->inst_offset));
4881 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4882 /* Load the trigger page addr */
4883 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4884 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4888 cfg->code_len = code - cfg->native_code;
4889 g_assert (cfg->code_len < cfg->code_size);
4896 mono_arch_emit_epilog (MonoCompile *cfg)
4898 MonoMethod *method = cfg->method;
4899 int pos, i, rot_amount;
4900 int max_epilog_size = 16 + 20*4;
4904 if (cfg->method->save_lmf)
4905 max_epilog_size += 128;
4907 if (mono_jit_trace_calls != NULL)
4908 max_epilog_size += 50;
4910 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4911 max_epilog_size += 50;
4913 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4914 cfg->code_size *= 2;
4915 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4916 mono_jit_stats.code_reallocs++;
4920 * Keep in sync with OP_JMP
4922 code = cfg->native_code + cfg->code_len;
4924 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4925 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4929 /* Load returned vtypes into registers if needed */
4930 cinfo = cfg->arch.cinfo;
4931 if (cinfo->ret.storage == RegTypeStructByVal) {
4932 MonoInst *ins = cfg->ret;
4934 if (arm_is_imm12 (ins->inst_offset)) {
4935 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4937 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4938 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4942 if (method->save_lmf) {
4944 /* all but r0-r3, sp and pc */
4945 pos += sizeof (MonoLMF) - (4 * 10);
4947 /* r2 contains the pointer to the current LMF */
4948 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4949 /* ip = previous_lmf */
4950 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4952 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4953 /* *(lmf_addr) = previous_lmf */
4954 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4955 /* FIXME: speedup: there is no actual need to restore the registers if
4956 * we didn't actually change them (idea from Zoltan).
4959 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4960 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4961 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4963 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4964 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4966 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4967 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4969 /* FIXME: add v4 thumb interworking support */
4970 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4973 cfg->code_len = code - cfg->native_code;
4975 g_assert (cfg->code_len < cfg->code_size);
4979 /* remove once throw_exception_by_name is eliminated */
4981 exception_id_by_name (const char *name)
4983 if (strcmp (name, "IndexOutOfRangeException") == 0)
4984 return MONO_EXC_INDEX_OUT_OF_RANGE;
4985 if (strcmp (name, "OverflowException") == 0)
4986 return MONO_EXC_OVERFLOW;
4987 if (strcmp (name, "ArithmeticException") == 0)
4988 return MONO_EXC_ARITHMETIC;
4989 if (strcmp (name, "DivideByZeroException") == 0)
4990 return MONO_EXC_DIVIDE_BY_ZERO;
4991 if (strcmp (name, "InvalidCastException") == 0)
4992 return MONO_EXC_INVALID_CAST;
4993 if (strcmp (name, "NullReferenceException") == 0)
4994 return MONO_EXC_NULL_REF;
4995 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4996 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4997 g_error ("Unknown intrinsic exception %s\n", name);
5002 mono_arch_emit_exceptions (MonoCompile *cfg)
5004 MonoJumpInfo *patch_info;
5007 guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM];
5008 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM];
5009 int max_epilog_size = 50;
5011 for (i = 0; i < MONO_EXC_INTRINS_NUM; i++) {
5012 exc_throw_pos [i] = NULL;
5013 exc_throw_found [i] = 0;
5016 /* count the number of exception infos */
5019 * make sure we have enough space for exceptions
5021 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5022 if (patch_info->type == MONO_PATCH_INFO_EXC) {
5023 i = exception_id_by_name (patch_info->data.target);
5024 if (!exc_throw_found [i]) {
5025 max_epilog_size += 32;
5026 exc_throw_found [i] = TRUE;
5031 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5032 cfg->code_size *= 2;
5033 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5034 mono_jit_stats.code_reallocs++;
5037 code = cfg->native_code + cfg->code_len;
5039 /* add code to raise exceptions */
5040 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5041 switch (patch_info->type) {
5042 case MONO_PATCH_INFO_EXC: {
5043 MonoClass *exc_class;
5044 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5046 i = exception_id_by_name (patch_info->data.target);
5047 if (exc_throw_pos [i]) {
5048 arm_patch (ip, exc_throw_pos [i]);
5049 patch_info->type = MONO_PATCH_INFO_NONE;
5052 exc_throw_pos [i] = code;
5054 arm_patch (ip, code);
5056 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5057 g_assert (exc_class);
5059 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5060 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5061 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5062 patch_info->data.name = "mono_arch_throw_corlib_exception";
5063 patch_info->ip.i = code - cfg->native_code;
5065 *(guint32*)(gpointer)code = exc_class->type_token;
5075 cfg->code_len = code - cfg->native_code;
5077 g_assert (cfg->code_len < cfg->code_size);
5081 #endif /* #ifndef DISABLE_JIT */
5083 static gboolean tls_offset_inited = FALSE;
5086 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5088 if (!tls_offset_inited) {
5089 tls_offset_inited = TRUE;
5091 lmf_tls_offset = mono_get_lmf_tls_offset ();
5092 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5097 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5102 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5109 mono_arch_print_tree (MonoInst *tree, int arity)
5115 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5117 return mono_get_domain_intrinsic (cfg);
5121 mono_arch_get_patch_offset (guint8 *code)
5128 mono_arch_flush_register_windows (void)
5132 #ifdef MONO_ARCH_HAVE_IMT
5137 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5139 if (cfg->compile_aot) {
5140 int method_reg = mono_alloc_ireg (cfg);
5143 call->dynamic_imt_arg = TRUE;
5146 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5148 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5149 ins->dreg = method_reg;
5150 ins->inst_p0 = call->method;
5151 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5152 MONO_ADD_INS (cfg->cbb, ins);
5154 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5156 } else if (cfg->generic_context || imt_arg) {
5158 /* Always pass in a register for simplicity */
5159 call->dynamic_imt_arg = TRUE;
5161 cfg->uses_rgctx_reg = TRUE;
5164 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5167 int method_reg = mono_alloc_preg (cfg);
5169 MONO_INST_NEW (cfg, ins, OP_PCONST);
5170 ins->inst_p0 = call->method;
5171 ins->dreg = method_reg;
5172 MONO_ADD_INS (cfg->cbb, ins);
5174 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5179 #endif /* DISABLE_JIT */
5182 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5184 guint32 *code_ptr = (guint32*)code;
5186 /* The IMT value is stored in the code stream right after the LDC instruction. */
5187 if (!IS_LDR_PC (code_ptr [0])) {
5188 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5189 g_assert (IS_LDR_PC (code_ptr [0]));
5191 if (code_ptr [1] == 0)
5192 /* This is AOTed code, the IMT method is in V5 */
5193 return (MonoMethod*)regs [ARMREG_V5];
5195 return (MonoMethod*) code_ptr [1];
5199 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5201 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5204 #define ENABLE_WRONG_METHOD_CHECK 0
5205 #define BASE_SIZE (6 * 4)
5206 #define BSEARCH_ENTRY_SIZE (4 * 4)
5207 #define CMP_SIZE (3 * 4)
5208 #define BRANCH_SIZE (1 * 4)
5209 #define CALL_SIZE (2 * 4)
5210 #define WMC_SIZE (5 * 4)
5211 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5214 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5216 guint32 delta = DISTANCE (target, code);
5218 g_assert (delta >= 0 && delta <= 0xFFF);
5219 *target = *target | delta;
5225 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5226 gpointer fail_tramp)
5228 int size, i, extra_space = 0;
5229 arminstr_t *code, *start, *vtable_target = NULL;
5230 gboolean large_offsets = FALSE;
5231 guint32 **constant_pool_starts;
5234 constant_pool_starts = g_new0 (guint32*, count);
5236 for (i = 0; i < count; ++i) {
5237 MonoIMTCheckItem *item = imt_entries [i];
5238 if (item->is_equals) {
5239 gboolean fail_case = !item->check_target_idx && fail_tramp;
5241 if (item->has_target_code || !arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5242 item->chunk_size += 32;
5243 large_offsets = TRUE;
5246 if (item->check_target_idx || fail_case) {
5247 if (!item->compare_done || fail_case)
5248 item->chunk_size += CMP_SIZE;
5249 item->chunk_size += BRANCH_SIZE;
5251 #if ENABLE_WRONG_METHOD_CHECK
5252 item->chunk_size += WMC_SIZE;
5256 item->chunk_size += 16;
5257 large_offsets = TRUE;
5259 item->chunk_size += CALL_SIZE;
5261 item->chunk_size += BSEARCH_ENTRY_SIZE;
5262 imt_entries [item->check_target_idx]->compare_done = TRUE;
5264 size += item->chunk_size;
5268 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5271 code = mono_method_alloc_generic_virtual_thunk (domain, size);
5273 code = mono_domain_code_reserve (domain, size);
5277 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5278 for (i = 0; i < count; ++i) {
5279 MonoIMTCheckItem *item = imt_entries [i];
5280 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5285 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5287 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5288 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5289 vtable_target = code;
5290 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5292 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5293 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5294 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5296 for (i = 0; i < count; ++i) {
5297 MonoIMTCheckItem *item = imt_entries [i];
5298 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL, *target_code_ins = NULL;
5299 gint32 vtable_offset;
5301 item->code_target = (guint8*)code;
5303 if (item->is_equals) {
5304 gboolean fail_case = !item->check_target_idx && fail_tramp;
5306 if (item->check_target_idx || fail_case) {
5307 if (!item->compare_done || fail_case) {
5309 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5310 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5312 item->jmp_code = (guint8*)code;
5313 ARM_B_COND (code, ARMCOND_NE, 0);
5315 /*Enable the commented code to assert on wrong method*/
5316 #if ENABLE_WRONG_METHOD_CHECK
5318 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5319 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5320 ARM_B_COND (code, ARMCOND_NE, 1);
5326 if (item->has_target_code) {
5327 target_code_ins = code;
5328 /* Load target address */
5329 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5330 /* Save it to the fourth slot */
5331 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5332 /* Restore registers and branch */
5333 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5335 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)item->value.target_code);
5337 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5338 if (!arm_is_imm12 (vtable_offset)) {
5340 * We need to branch to a computed address but we don't have
5341 * a free register to store it, since IP must contain the
5342 * vtable address. So we push the two values to the stack, and
5343 * load them both using LDM.
5345 /* Compute target address */
5346 vtable_offset_ins = code;
5347 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5348 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5349 /* Save it to the fourth slot */
5350 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5351 /* Restore registers and branch */
5352 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5354 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5356 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5358 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5359 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5364 arm_patch (item->jmp_code, (guchar*)code);
5366 target_code_ins = code;
5367 /* Load target address */
5368 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5369 /* Save it to the fourth slot */
5370 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5371 /* Restore registers and branch */
5372 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5374 code = arm_emit_value_and_patch_ldr (code, target_code_ins, (gsize)fail_tramp);
5375 item->jmp_code = NULL;
5379 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5381 /*must emit after unconditional branch*/
5382 if (vtable_target) {
5383 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5384 item->chunk_size += 4;
5385 vtable_target = NULL;
5388 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5389 constant_pool_starts [i] = code;
5391 code += extra_space;
5395 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5396 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5398 item->jmp_code = (guint8*)code;
5399 ARM_B_COND (code, ARMCOND_GE, 0);
5404 for (i = 0; i < count; ++i) {
5405 MonoIMTCheckItem *item = imt_entries [i];
5406 if (item->jmp_code) {
5407 if (item->check_target_idx)
5408 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5410 if (i > 0 && item->is_equals) {
5412 arminstr_t *space_start = constant_pool_starts [i];
5413 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5414 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5421 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5422 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5427 g_free (constant_pool_starts);
5429 mono_arch_flush_icache ((guint8*)start, size);
5430 mono_stats.imt_thunks_size += code - start;
5432 g_assert (DISTANCE (start, code) <= size);
5439 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5441 if (reg == ARMREG_SP)
5442 return (gpointer)ctx->esp;
5444 return (gpointer)ctx->regs [reg];
5448 * mono_arch_set_breakpoint:
5450 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5451 * The location should contain code emitted by OP_SEQ_POINT.
5454 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5457 guint32 native_offset = ip - (guint8*)ji->code_start;
5460 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5462 g_assert (native_offset % 4 == 0);
5463 g_assert (info->bp_addrs [native_offset / 4] == 0);
5464 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5466 int dreg = ARMREG_LR;
5468 /* Read from another trigger page */
5469 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5471 *(int*)code = (int)bp_trigger_page;
5473 ARM_LDR_IMM (code, dreg, dreg, 0);
5475 mono_arch_flush_icache (code - 16, 16);
5478 /* This is currently implemented by emitting an SWI instruction, which
5479 * qemu/linux seems to convert to a SIGILL.
5481 *(int*)code = (0xef << 24) | 8;
5483 mono_arch_flush_icache (code - 4, 4);
5489 * mono_arch_clear_breakpoint:
5491 * Clear the breakpoint at IP.
5494 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5500 guint32 native_offset = ip - (guint8*)ji->code_start;
5501 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5503 g_assert (native_offset % 4 == 0);
5504 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5505 info->bp_addrs [native_offset / 4] = 0;
5507 for (i = 0; i < 4; ++i)
5510 mono_arch_flush_icache (ip, code - ip);
5515 * mono_arch_start_single_stepping:
5517 * Start single stepping.
5520 mono_arch_start_single_stepping (void)
5522 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5526 * mono_arch_stop_single_stepping:
5528 * Stop single stepping.
5531 mono_arch_stop_single_stepping (void)
5533 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5537 #define DBG_SIGNAL SIGBUS
5539 #define DBG_SIGNAL SIGSEGV
5543 * mono_arch_is_single_step_event:
5545 * Return whenever the machine state in SIGCTX corresponds to a single
5549 mono_arch_is_single_step_event (void *info, void *sigctx)
5551 siginfo_t *sinfo = info;
5553 /* Sometimes the address is off by 4 */
5554 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5561 * mono_arch_is_breakpoint_event:
5563 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5566 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5568 siginfo_t *sinfo = info;
5570 if (sinfo->si_signo == DBG_SIGNAL) {
5571 /* Sometimes the address is off by 4 */
5572 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5582 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5584 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5595 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5597 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5605 * mono_arch_skip_breakpoint:
5607 * See mini-amd64.c for docs.
5610 mono_arch_skip_breakpoint (MonoContext *ctx)
5612 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5616 * mono_arch_skip_single_step:
5618 * See mini-amd64.c for docs.
5621 mono_arch_skip_single_step (MonoContext *ctx)
5623 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5627 * mono_arch_get_seq_point_info:
5629 * See mini-amd64.c for docs.
5632 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5637 // FIXME: Add a free function
5639 mono_domain_lock (domain);
5640 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5642 mono_domain_unlock (domain);
5645 ji = mono_jit_info_table_find (domain, (char*)code);
5648 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5650 info->ss_trigger_page = ss_trigger_page;
5651 info->bp_trigger_page = bp_trigger_page;
5653 mono_domain_lock (domain);
5654 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5656 mono_domain_unlock (domain);