2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
88 mono_arch_regname (int reg)
90 static const char * rnames[] = {
91 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
92 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
93 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
96 if (reg >= 0 && reg < 16)
102 mono_arch_fregname (int reg)
104 static const char * rnames[] = {
105 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
106 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
107 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
108 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
109 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
110 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
113 if (reg >= 0 && reg < 32)
119 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
121 int imm8, rot_amount;
122 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
123 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
126 g_assert (dreg != sreg);
127 code = mono_arm_emit_load_imm (code, dreg, imm);
128 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
133 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
135 /* we can use r0-r3, since this is called only for incoming args on the stack */
136 if (size > sizeof (gpointer) * 4) {
138 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
139 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
140 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
141 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
142 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
143 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
144 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
145 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
146 ARM_B_COND (code, ARMCOND_NE, 0);
147 arm_patch (code - 4, start_loop);
150 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
151 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
153 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
154 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
160 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
161 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
162 doffset = soffset = 0;
164 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
165 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
171 g_assert (size == 0);
176 emit_call_reg (guint8 *code, int reg)
179 ARM_BLX_REG (code, reg);
181 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
185 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
191 emit_call_seq (MonoCompile *cfg, guint8 *code)
193 if (cfg->method->dynamic) {
194 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
196 *(gpointer*)code = NULL;
198 code = emit_call_reg (code, ARMREG_IP);
206 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
208 switch (ins->opcode) {
211 case OP_FCALL_MEMBASE:
213 if (ins->dreg != ARM_FPA_F0)
214 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
215 #elif defined(ARM_FPU_VFP)
216 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
217 ARM_FMSR (code, ins->dreg, ARMREG_R0);
218 ARM_CVTS (code, ins->dreg, ins->dreg);
220 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
230 * mono_arch_get_argument_info:
231 * @csig: a method signature
232 * @param_count: the number of parameters to consider
233 * @arg_info: an array to store the result infos
235 * Gathers information on parameters such as size, alignment and
236 * padding. arg_info should be large enought to hold param_count + 1 entries.
238 * Returns the size of the activation frame.
241 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
243 int k, frame_size = 0;
244 guint32 size, align, pad;
247 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
248 frame_size += sizeof (gpointer);
252 arg_info [0].offset = offset;
255 frame_size += sizeof (gpointer);
259 arg_info [0].size = frame_size;
261 for (k = 0; k < param_count; k++) {
262 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
264 /* ignore alignment for now */
267 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
268 arg_info [k].pad = pad;
270 arg_info [k + 1].pad = 0;
271 arg_info [k + 1].size = size;
273 arg_info [k + 1].offset = offset;
277 align = MONO_ARCH_FRAME_ALIGNMENT;
278 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
279 arg_info [k].pad = pad;
285 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
289 reg = (ldr >> 16 ) & 0xf;
290 offset = ldr & 0xfff;
291 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
293 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
294 o = (gpointer)regs [reg];
296 *displacement = offset;
301 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
303 guint32* code = (guint32*)code_ptr;
305 /* Locate the address of the method-specific trampoline. The call using
306 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
307 looks something like this:
316 The call sequence could be also:
319 function pointer literal
323 Note that on ARM5+ we can use one instruction instead of the last two.
324 Therefore, we need to locate the 'ldr rA' instruction to know which
325 register was used to hold the method addrs.
328 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
331 /* Three possible code sequences can happen here:
335 * ldr pc, [rX - #offset]
341 * ldr pc, [rX - #offset]
343 * direct branch with bl:
347 * direct branch with mov:
351 * We only need to identify interface and virtual calls, the others can be ignored.
354 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
355 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
357 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
358 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
363 #define MAX_ARCH_DELEGATE_PARAMS 3
366 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
368 guint8 *code, *start;
371 start = code = mono_global_codeman_reserve (12);
373 /* Replace the this argument with the target */
374 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
375 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
376 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
378 g_assert ((code - start) <= 12);
380 mono_arch_flush_icache (start, 12);
384 size = 8 + param_count * 4;
385 start = code = mono_global_codeman_reserve (size);
387 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
388 /* slide down the arguments */
389 for (i = 0; i < param_count; ++i) {
390 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
392 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
394 g_assert ((code - start) <= size);
396 mono_arch_flush_icache (start, size);
400 *code_size = code - start;
406 * mono_arch_get_delegate_invoke_impls:
408 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
412 mono_arch_get_delegate_invoke_impls (void)
419 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
420 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
422 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
423 code = get_delegate_invoke_impl (FALSE, i, &code_len);
424 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
431 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
433 guint8 *code, *start;
435 /* FIXME: Support more cases */
436 if (MONO_TYPE_ISSTRUCT (sig->ret))
440 static guint8* cached = NULL;
441 mono_mini_arch_lock ();
443 mono_mini_arch_unlock ();
448 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
450 start = get_delegate_invoke_impl (TRUE, 0, NULL);
452 mono_mini_arch_unlock ();
455 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
458 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
460 for (i = 0; i < sig->param_count; ++i)
461 if (!mono_is_regsize_var (sig->params [i]))
464 mono_mini_arch_lock ();
465 code = cache [sig->param_count];
467 mono_mini_arch_unlock ();
472 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
473 start = mono_aot_get_named_code (name);
476 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
478 cache [sig->param_count] = start;
479 mono_mini_arch_unlock ();
487 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
489 /* FIXME: handle returning a struct */
490 if (MONO_TYPE_ISSTRUCT (sig->ret))
491 return (gpointer)regs [ARMREG_R1];
492 return (gpointer)regs [ARMREG_R0];
496 * Initialize the cpu to execute managed code.
499 mono_arch_cpu_init (void)
504 * Initialize architecture specific code.
507 mono_arch_init (void)
509 InitializeCriticalSection (&mini_arch_mutex);
511 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
512 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
513 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
517 * Cleanup architecture specific code.
520 mono_arch_cleanup (void)
525 * This function returns the optimizations supported on this cpu.
528 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
532 thumb_supported = TRUE;
537 FILE *file = fopen ("/proc/cpuinfo", "r");
539 while ((line = fgets (buf, 512, file))) {
540 if (strncmp (line, "Processor", 9) == 0) {
541 char *ver = strstr (line, "(v");
542 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
544 if (ver && (ver [2] == '7'))
548 if (strncmp (line, "Features", 8) == 0) {
549 char *th = strstr (line, "thumb");
551 thumb_supported = TRUE;
559 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
563 /* no arm-specific optimizations yet */
569 is_regsize_var (MonoType *t) {
572 t = mini_type_get_underlying_type (NULL, t);
579 case MONO_TYPE_FNPTR:
581 case MONO_TYPE_OBJECT:
582 case MONO_TYPE_STRING:
583 case MONO_TYPE_CLASS:
584 case MONO_TYPE_SZARRAY:
585 case MONO_TYPE_ARRAY:
587 case MONO_TYPE_GENERICINST:
588 if (!mono_type_generic_inst_is_valuetype (t))
591 case MONO_TYPE_VALUETYPE:
598 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
603 for (i = 0; i < cfg->num_varinfo; i++) {
604 MonoInst *ins = cfg->varinfo [i];
605 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
608 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
611 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
614 /* we can only allocate 32 bit values */
615 if (is_regsize_var (ins->inst_vtype)) {
616 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
617 g_assert (i == vmv->idx);
618 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
625 #define USE_EXTRA_TEMPS 0
628 mono_arch_get_global_int_regs (MonoCompile *cfg)
633 * FIXME: Interface calls might go through a static rgctx trampoline which
634 * sets V5, but it doesn't save it, so we need to save it ourselves, and
637 if (cfg->flags & MONO_CFG_HAS_CALLS)
638 cfg->uses_rgctx_reg = TRUE;
640 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
641 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
642 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
643 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
644 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
645 /* V5 is reserved for passing the vtable/rgctx/IMT method */
646 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
647 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
648 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
654 * mono_arch_regalloc_cost:
656 * Return the cost, in number of memory references, of the action of
657 * allocating the variable VMV into a register during global register
661 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
667 #ifndef __GNUC_PREREQ
668 #define __GNUC_PREREQ(maj, min) (0)
672 mono_arch_flush_icache (guint8 *code, gint size)
675 sys_icache_invalidate (code, size);
676 #elif __GNUC_PREREQ(4, 1)
677 __clear_cache (code, code + size);
678 #elif defined(PLATFORM_ANDROID)
679 const int syscall = 0xf0002;
687 : "r" (code), "r" (code + size), "r" (syscall)
691 __asm __volatile ("mov r0, %0\n"
694 "swi 0x9f0002 @ sys_cacheflush"
696 : "r" (code), "r" (code + size), "r" (0)
697 : "r0", "r1", "r3" );
714 guint16 vtsize; /* in param area */
716 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
717 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
724 gboolean vtype_retaddr;
733 /*#define __alignof__(a) sizeof(a)*/
734 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
740 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
743 if (*gr > ARMREG_R3) {
744 ainfo->offset = *stack_size;
745 ainfo->reg = ARMREG_SP; /* in the caller */
746 ainfo->regtype = RegTypeBase;
749 ainfo->regtype = RegTypeGeneral;
753 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
756 int i8_align = __alignof__ (gint64);
760 gboolean split = i8_align == 4;
762 gboolean split = TRUE;
765 if (*gr == ARMREG_R3 && split) {
766 /* first word in r3 and the second on the stack */
767 ainfo->offset = *stack_size;
768 ainfo->reg = ARMREG_SP; /* in the caller */
769 ainfo->regtype = RegTypeBaseGen;
771 } else if (*gr >= ARMREG_R3) {
773 /* darwin aligns longs to 4 byte only */
779 ainfo->offset = *stack_size;
780 ainfo->reg = ARMREG_SP; /* in the caller */
781 ainfo->regtype = RegTypeBase;
785 if (i8_align == 8 && ((*gr) & 1))
788 ainfo->regtype = RegTypeIRegPair;
797 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
800 int n = sig->hasthis + sig->param_count;
801 MonoType *simpletype;
802 guint32 stack_size = 0;
803 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
808 /* FIXME: handle returning a struct */
809 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
810 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
811 cinfo->struct_ret = ARMREG_R0;
812 cinfo->vtype_retaddr = TRUE;
817 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
820 DEBUG(printf("params: %d\n", sig->param_count));
821 for (i = 0; i < sig->param_count; ++i) {
822 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
823 /* Prevent implicit arguments and sig_cookie from
824 being passed in registers */
826 /* Emit the signature cookie just before the implicit arguments */
827 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
829 DEBUG(printf("param %d: ", i));
830 if (sig->params [i]->byref) {
831 DEBUG(printf("byref\n"));
832 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
836 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
837 switch (simpletype->type) {
838 case MONO_TYPE_BOOLEAN:
841 cinfo->args [n].size = 1;
842 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
848 cinfo->args [n].size = 2;
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
854 cinfo->args [n].size = 4;
855 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
861 case MONO_TYPE_FNPTR:
862 case MONO_TYPE_CLASS:
863 case MONO_TYPE_OBJECT:
864 case MONO_TYPE_STRING:
865 case MONO_TYPE_SZARRAY:
866 case MONO_TYPE_ARRAY:
868 cinfo->args [n].size = sizeof (gpointer);
869 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
872 case MONO_TYPE_GENERICINST:
873 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
874 cinfo->args [n].size = sizeof (gpointer);
875 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
880 case MONO_TYPE_TYPEDBYREF:
881 case MONO_TYPE_VALUETYPE: {
887 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
888 size = sizeof (MonoTypedRef);
889 align = sizeof (gpointer);
891 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
893 size = mono_class_native_size (klass, &align);
895 size = mono_class_value_size (klass, &align);
897 DEBUG(printf ("load %d bytes struct\n",
898 mono_class_native_size (sig->params [i]->data.klass, NULL)));
901 align_size += (sizeof (gpointer) - 1);
902 align_size &= ~(sizeof (gpointer) - 1);
903 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
904 cinfo->args [n].regtype = RegTypeStructByVal;
905 /* FIXME: align stack_size if needed */
907 if (align >= 8 && (gr & 1))
910 if (gr > ARMREG_R3) {
911 cinfo->args [n].size = 0;
912 cinfo->args [n].vtsize = nwords;
914 int rest = ARMREG_R3 - gr + 1;
915 int n_in_regs = rest >= nwords? nwords: rest;
917 cinfo->args [n].size = n_in_regs;
918 cinfo->args [n].vtsize = nwords - n_in_regs;
919 cinfo->args [n].reg = gr;
923 cinfo->args [n].offset = stack_size;
924 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
925 stack_size += nwords * sizeof (gpointer);
932 cinfo->args [n].size = 8;
933 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
937 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
942 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
943 switch (simpletype->type) {
944 case MONO_TYPE_BOOLEAN:
955 case MONO_TYPE_FNPTR:
956 case MONO_TYPE_CLASS:
957 case MONO_TYPE_OBJECT:
958 case MONO_TYPE_SZARRAY:
959 case MONO_TYPE_ARRAY:
960 case MONO_TYPE_STRING:
961 cinfo->ret.regtype = RegTypeGeneral;
962 cinfo->ret.reg = ARMREG_R0;
966 cinfo->ret.regtype = RegTypeIRegPair;
967 cinfo->ret.reg = ARMREG_R0;
971 cinfo->ret.regtype = RegTypeFP;
972 cinfo->ret.reg = ARMREG_R0;
973 /* FIXME: cinfo->ret.reg = ???;
974 cinfo->ret.regtype = RegTypeFP;*/
976 case MONO_TYPE_GENERICINST:
977 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
978 cinfo->ret.regtype = RegTypeGeneral;
979 cinfo->ret.reg = ARMREG_R0;
982 cinfo->ret.regtype = RegTypeStructByAddr;
984 case MONO_TYPE_VALUETYPE:
985 cinfo->ret.regtype = RegTypeStructByAddr;
987 case MONO_TYPE_TYPEDBYREF:
988 cinfo->ret.regtype = RegTypeStructByAddr;
993 g_error ("Can't handle as return value 0x%x", sig->ret->type);
997 /* align stack size to 8 */
998 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
999 stack_size = (stack_size + 7) & ~7;
1001 cinfo->stack_usage = stack_size;
1007 * Set var information according to the calling convention. arm version.
1008 * The locals var stuff should most likely be split in another method.
1011 mono_arch_allocate_vars (MonoCompile *cfg)
1013 MonoMethodSignature *sig;
1014 MonoMethodHeader *header;
1016 int i, offset, size, align, curinst;
1017 int frame_reg = ARMREG_FP;
1019 /* FIXME: this will change when we use FP as gcc does */
1020 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1022 /* allow room for the vararg method args: void* and long/double */
1023 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1024 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1026 header = mono_method_get_header (cfg->method);
1029 * We use the frame register also for any method that has
1030 * exception clauses. This way, when the handlers are called,
1031 * the code will reference local variables using the frame reg instead of
1032 * the stack pointer: if we had to restore the stack pointer, we'd
1033 * corrupt the method frames that are already on the stack (since
1034 * filters get called before stack unwinding happens) when the filter
1035 * code would call any method (this also applies to finally etc.).
1037 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1038 frame_reg = ARMREG_FP;
1039 cfg->frame_reg = frame_reg;
1040 if (frame_reg != ARMREG_SP) {
1041 cfg->used_int_regs |= 1 << frame_reg;
1044 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1045 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1046 cfg->used_int_regs |= (1 << ARMREG_V5);
1048 sig = mono_method_signature (cfg->method);
1052 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1053 /* FIXME: handle long and FP values */
1054 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1055 case MONO_TYPE_VOID:
1058 cfg->ret->opcode = OP_REGVAR;
1059 cfg->ret->inst_c0 = ARMREG_R0;
1063 /* local vars are at a positive offset from the stack pointer */
1065 * also note that if the function uses alloca, we use FP
1066 * to point at the local variables.
1068 offset = 0; /* linkage area */
1069 /* align the offset to 16 bytes: not sure this is needed here */
1071 //offset &= ~(8 - 1);
1073 /* add parameter area size for called functions */
1074 offset += cfg->param_area;
1077 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1080 /* allow room to save the return value */
1081 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1084 /* the MonoLMF structure is stored just below the stack pointer */
1086 if (sig->call_convention == MONO_CALL_VARARG) {
1087 cfg->sig_cookie = 0;
1090 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1091 inst = cfg->vret_addr;
1092 offset += sizeof(gpointer) - 1;
1093 offset &= ~(sizeof(gpointer) - 1);
1094 inst->inst_offset = offset;
1095 inst->opcode = OP_REGOFFSET;
1096 inst->inst_basereg = frame_reg;
1097 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1098 printf ("vret_addr =");
1099 mono_print_ins (cfg->vret_addr);
1101 offset += sizeof(gpointer);
1102 if (sig->call_convention == MONO_CALL_VARARG)
1103 cfg->sig_cookie += sizeof (gpointer);
1106 curinst = cfg->locals_start;
1107 for (i = curinst; i < cfg->num_varinfo; ++i) {
1108 inst = cfg->varinfo [i];
1109 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1112 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1113 * pinvoke wrappers when they call functions returning structure */
1114 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1116 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1120 size = mono_type_size (inst->inst_vtype, &align);
1122 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1123 * since it loads/stores misaligned words, which don't do the right thing.
1125 if (align < 4 && size >= 4)
1127 offset += align - 1;
1128 offset &= ~(align - 1);
1129 inst->inst_offset = offset;
1130 inst->opcode = OP_REGOFFSET;
1131 inst->inst_basereg = frame_reg;
1133 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1138 inst = cfg->args [curinst];
1139 if (inst->opcode != OP_REGVAR) {
1140 inst->opcode = OP_REGOFFSET;
1141 inst->inst_basereg = frame_reg;
1142 offset += sizeof (gpointer) - 1;
1143 offset &= ~(sizeof (gpointer) - 1);
1144 inst->inst_offset = offset;
1145 offset += sizeof (gpointer);
1146 if (sig->call_convention == MONO_CALL_VARARG)
1147 cfg->sig_cookie += sizeof (gpointer);
1152 for (i = 0; i < sig->param_count; ++i) {
1153 inst = cfg->args [curinst];
1154 if (inst->opcode != OP_REGVAR) {
1155 inst->opcode = OP_REGOFFSET;
1156 inst->inst_basereg = frame_reg;
1157 size = mono_type_size (sig->params [i], &align);
1158 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1159 * since it loads/stores misaligned words, which don't do the right thing.
1161 if (align < 4 && size >= 4)
1163 offset += align - 1;
1164 offset &= ~(align - 1);
1165 inst->inst_offset = offset;
1167 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1168 cfg->sig_cookie += size;
1173 /* align the offset to 8 bytes */
1178 cfg->stack_offset = offset;
1182 mono_arch_create_vars (MonoCompile *cfg)
1184 MonoMethodSignature *sig;
1186 sig = mono_method_signature (cfg->method);
1188 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1189 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1190 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1191 printf ("vret_addr = ");
1192 mono_print_ins (cfg->vret_addr);
1196 if (cfg->gen_seq_points && cfg->compile_aot) {
1197 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1198 ins->flags |= MONO_INST_VOLATILE;
1199 cfg->arch.seq_point_info_var = ins;
1201 /* Allocate a separate variable for this to save 1 load per seq point */
1202 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1203 ins->flags |= MONO_INST_VOLATILE;
1204 cfg->arch.ss_trigger_page_var = ins;
1209 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1212 MonoMethodSignature *sig;
1216 sig = call->signature;
1217 n = sig->param_count + sig->hasthis;
1219 cinfo = get_call_info (sig, sig->pinvoke);
1221 for (i = 0; i < n; ++i) {
1222 ArgInfo *ainfo = cinfo->args + i;
1225 if (i >= sig->hasthis)
1226 t = sig->params [i - sig->hasthis];
1228 t = &mono_defaults.int_class->byval_arg;
1229 t = mini_type_get_underlying_type (NULL, t);
1231 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1236 in = call->args [i];
1238 switch (ainfo->regtype) {
1239 case RegTypeGeneral:
1240 case RegTypeIRegPair:
1241 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1242 MONO_INST_NEW (cfg, ins, OP_MOVE);
1243 ins->dreg = mono_alloc_ireg (cfg);
1244 ins->sreg1 = in->dreg + 1;
1245 MONO_ADD_INS (cfg->cbb, ins);
1246 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1248 MONO_INST_NEW (cfg, ins, OP_MOVE);
1249 ins->dreg = mono_alloc_ireg (cfg);
1250 ins->sreg1 = in->dreg + 2;
1251 MONO_ADD_INS (cfg->cbb, ins);
1252 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1253 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1254 #ifndef MONO_ARCH_SOFT_FLOAT
1258 if (ainfo->size == 4) {
1259 #ifdef MONO_ARCH_SOFT_FLOAT
1260 /* mono_emit_call_args () have already done the r8->r4 conversion */
1261 /* The converted value is in an int vreg */
1262 MONO_INST_NEW (cfg, ins, OP_MOVE);
1263 ins->dreg = mono_alloc_ireg (cfg);
1264 ins->sreg1 = in->dreg;
1265 MONO_ADD_INS (cfg->cbb, ins);
1266 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1268 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1269 creg = mono_alloc_ireg (cfg);
1270 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1271 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1274 #ifdef MONO_ARCH_SOFT_FLOAT
1275 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1276 ins->dreg = mono_alloc_ireg (cfg);
1277 ins->sreg1 = in->dreg;
1278 MONO_ADD_INS (cfg->cbb, ins);
1279 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1281 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1282 ins->dreg = mono_alloc_ireg (cfg);
1283 ins->sreg1 = in->dreg;
1284 MONO_ADD_INS (cfg->cbb, ins);
1285 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1287 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1288 creg = mono_alloc_ireg (cfg);
1289 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1290 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1291 creg = mono_alloc_ireg (cfg);
1292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1293 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1296 cfg->flags |= MONO_CFG_HAS_FPOUT;
1298 MONO_INST_NEW (cfg, ins, OP_MOVE);
1299 ins->dreg = mono_alloc_ireg (cfg);
1300 ins->sreg1 = in->dreg;
1301 MONO_ADD_INS (cfg->cbb, ins);
1303 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1306 case RegTypeStructByAddr:
1309 /* FIXME: where si the data allocated? */
1310 arg->backend.reg3 = ainfo->reg;
1311 call->used_iregs |= 1 << ainfo->reg;
1312 g_assert_not_reached ();
1315 case RegTypeStructByVal:
1316 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1317 ins->opcode = OP_OUTARG_VT;
1318 ins->sreg1 = in->dreg;
1319 ins->klass = in->klass;
1320 ins->inst_p0 = call;
1321 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1322 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1323 MONO_ADD_INS (cfg->cbb, ins);
1326 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1327 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1328 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1329 if (t->type == MONO_TYPE_R8) {
1330 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1332 #ifdef MONO_ARCH_SOFT_FLOAT
1333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1342 case RegTypeBaseGen:
1343 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1344 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1345 MONO_INST_NEW (cfg, ins, OP_MOVE);
1346 ins->dreg = mono_alloc_ireg (cfg);
1347 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1348 MONO_ADD_INS (cfg->cbb, ins);
1349 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1350 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1353 #ifdef MONO_ARCH_SOFT_FLOAT
1354 g_assert_not_reached ();
1357 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1358 creg = mono_alloc_ireg (cfg);
1359 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1360 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1361 creg = mono_alloc_ireg (cfg);
1362 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1363 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1364 cfg->flags |= MONO_CFG_HAS_FPOUT;
1366 g_assert_not_reached ();
1373 arg->backend.reg3 = ainfo->reg;
1374 /* FP args are passed in int regs */
1375 call->used_iregs |= 1 << ainfo->reg;
1376 if (ainfo->size == 8) {
1377 arg->opcode = OP_OUTARG_R8;
1378 call->used_iregs |= 1 << (ainfo->reg + 1);
1380 arg->opcode = OP_OUTARG_R4;
1383 cfg->flags |= MONO_CFG_HAS_FPOUT;
1387 g_assert_not_reached ();
1391 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1394 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1395 vtarg->sreg1 = call->vret_var->dreg;
1396 vtarg->dreg = mono_alloc_preg (cfg);
1397 MONO_ADD_INS (cfg->cbb, vtarg);
1399 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1402 call->stack_usage = cinfo->stack_usage;
1408 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1410 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1411 ArgInfo *ainfo = ins->inst_p1;
1412 int ovf_size = ainfo->vtsize;
1413 int doffset = ainfo->offset;
1414 int i, soffset, dreg;
1417 for (i = 0; i < ainfo->size; ++i) {
1418 dreg = mono_alloc_ireg (cfg);
1419 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1420 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1421 soffset += sizeof (gpointer);
1423 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1425 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1429 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1431 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1434 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1437 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1438 ins->sreg1 = val->dreg + 1;
1439 ins->sreg2 = val->dreg + 2;
1440 MONO_ADD_INS (cfg->cbb, ins);
1443 #ifdef MONO_ARCH_SOFT_FLOAT
1444 if (ret->type == MONO_TYPE_R8) {
1447 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1448 ins->dreg = cfg->ret->dreg;
1449 ins->sreg1 = val->dreg;
1450 MONO_ADD_INS (cfg->cbb, ins);
1453 if (ret->type == MONO_TYPE_R4) {
1454 /* Already converted to an int in method_to_ir () */
1455 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1458 #elif defined(ARM_FPU_VFP)
1459 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1462 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1463 ins->dreg = cfg->ret->dreg;
1464 ins->sreg1 = val->dreg;
1465 MONO_ADD_INS (cfg->cbb, ins);
1469 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1470 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1477 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1481 mono_arch_is_inst_imm (gint64 imm)
1486 #define DYN_CALL_STACK_ARGS 6
1489 MonoMethodSignature *sig;
1494 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1500 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1504 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1507 switch (cinfo->ret.regtype) {
1509 case RegTypeGeneral:
1510 case RegTypeIRegPair:
1511 case RegTypeStructByAddr:
1516 #elif defined(ARM_FPU_VFP)
1525 for (i = 0; i < cinfo->nargs; ++i) {
1526 switch (cinfo->args [i].regtype) {
1527 case RegTypeGeneral:
1529 case RegTypeIRegPair:
1532 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1535 case RegTypeStructByVal:
1536 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1544 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1545 for (i = 0; i < sig->param_count; ++i) {
1546 MonoType *t = sig->params [i];
1554 #ifdef MONO_ARCH_SOFT_FLOAT
1573 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1575 ArchDynCallInfo *info;
1578 cinfo = get_call_info (sig, FALSE);
1580 if (!dyn_call_supported (cinfo, sig)) {
1585 info = g_new0 (ArchDynCallInfo, 1);
1586 // FIXME: Preprocess the info to speed up start_dyn_call ()
1588 info->cinfo = cinfo;
1590 return (MonoDynCallInfo*)info;
1594 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1596 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1598 g_free (ainfo->cinfo);
1603 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1605 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1606 DynCallArgs *p = (DynCallArgs*)buf;
1607 int arg_index, greg, i, j;
1608 MonoMethodSignature *sig = dinfo->sig;
1610 g_assert (buf_len >= sizeof (DynCallArgs));
1618 if (dinfo->cinfo->vtype_retaddr)
1619 p->regs [greg ++] = (mgreg_t)ret;
1622 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1624 for (i = 0; i < sig->param_count; i++) {
1625 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1626 gpointer *arg = args [arg_index ++];
1627 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1630 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair || ainfo->regtype == RegTypeStructByVal)
1632 else if (ainfo->regtype == RegTypeBase)
1633 slot = PARAM_REGS + (ainfo->offset / 4);
1635 g_assert_not_reached ();
1638 p->regs [slot] = (mgreg_t)*arg;
1643 case MONO_TYPE_STRING:
1644 case MONO_TYPE_CLASS:
1645 case MONO_TYPE_ARRAY:
1646 case MONO_TYPE_SZARRAY:
1647 case MONO_TYPE_OBJECT:
1651 p->regs [slot] = (mgreg_t)*arg;
1653 case MONO_TYPE_BOOLEAN:
1655 p->regs [slot] = *(guint8*)arg;
1658 p->regs [slot] = *(gint8*)arg;
1661 p->regs [slot] = *(gint16*)arg;
1664 case MONO_TYPE_CHAR:
1665 p->regs [slot] = *(guint16*)arg;
1668 p->regs [slot] = *(gint32*)arg;
1671 p->regs [slot] = *(guint32*)arg;
1675 p->regs [slot ++] = (mgreg_t)arg [0];
1676 p->regs [slot] = (mgreg_t)arg [1];
1679 p->regs [slot] = *(mgreg_t*)arg;
1682 p->regs [slot ++] = (mgreg_t)arg [0];
1683 p->regs [slot] = (mgreg_t)arg [1];
1685 case MONO_TYPE_GENERICINST:
1686 if (MONO_TYPE_IS_REFERENCE (t)) {
1687 p->regs [slot] = (mgreg_t)*arg;
1692 case MONO_TYPE_VALUETYPE:
1693 g_assert (ainfo->regtype == RegTypeStructByVal);
1695 if (ainfo->size == 0)
1696 slot = PARAM_REGS + (ainfo->offset / 4);
1700 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1701 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1704 g_assert_not_reached ();
1710 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1712 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1713 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1714 guint8 *ret = ((DynCallArgs*)buf)->ret;
1715 mgreg_t res = ((DynCallArgs*)buf)->res;
1716 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1718 switch (mono_type_get_underlying_type (sig->ret)->type) {
1719 case MONO_TYPE_VOID:
1720 *(gpointer*)ret = NULL;
1722 case MONO_TYPE_STRING:
1723 case MONO_TYPE_CLASS:
1724 case MONO_TYPE_ARRAY:
1725 case MONO_TYPE_SZARRAY:
1726 case MONO_TYPE_OBJECT:
1730 *(gpointer*)ret = (gpointer)res;
1736 case MONO_TYPE_BOOLEAN:
1737 *(guint8*)ret = res;
1740 *(gint16*)ret = res;
1743 case MONO_TYPE_CHAR:
1744 *(guint16*)ret = res;
1747 *(gint32*)ret = res;
1750 *(guint32*)ret = res;
1754 /* This handles endianness as well */
1755 ((gint32*)ret) [0] = res;
1756 ((gint32*)ret) [1] = res2;
1758 case MONO_TYPE_GENERICINST:
1759 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1760 *(gpointer*)ret = (gpointer)res;
1765 case MONO_TYPE_VALUETYPE:
1766 g_assert (ainfo->cinfo->vtype_retaddr);
1769 #if defined(ARM_FPU_VFP)
1771 *(float*)ret = *(float*)&res;
1773 case MONO_TYPE_R8: {
1779 *(double*)ret = *(double*)®s;
1784 g_assert_not_reached ();
1789 * Allow tracing to work with this interface (with an optional argument)
1793 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1797 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1798 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1799 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1800 code = emit_call_reg (code, ARMREG_R2);
1813 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1816 int save_mode = SAVE_NONE;
1818 MonoMethod *method = cfg->method;
1819 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1820 int save_offset = cfg->param_area;
1824 offset = code - cfg->native_code;
1825 /* we need about 16 instructions */
1826 if (offset > (cfg->code_size - 16 * 4)) {
1827 cfg->code_size *= 2;
1828 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1829 code = cfg->native_code + offset;
1832 case MONO_TYPE_VOID:
1833 /* special case string .ctor icall */
1834 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1835 save_mode = SAVE_ONE;
1837 save_mode = SAVE_NONE;
1841 save_mode = SAVE_TWO;
1845 save_mode = SAVE_FP;
1847 case MONO_TYPE_VALUETYPE:
1848 save_mode = SAVE_STRUCT;
1851 save_mode = SAVE_ONE;
1855 switch (save_mode) {
1857 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1858 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1859 if (enable_arguments) {
1860 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1861 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1865 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1866 if (enable_arguments) {
1867 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1871 /* FIXME: what reg? */
1872 if (enable_arguments) {
1873 /* FIXME: what reg? */
1877 if (enable_arguments) {
1878 /* FIXME: get the actual address */
1879 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1887 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1888 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1889 code = emit_call_reg (code, ARMREG_IP);
1891 switch (save_mode) {
1893 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1894 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1897 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1911 * The immediate field for cond branches is big enough for all reasonable methods
1913 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1914 if (0 && ins->inst_true_bb->native_offset) { \
1915 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1917 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1918 ARM_B_COND (code, (condcode), 0); \
1921 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1923 /* emit an exception if condition is fail
1925 * We assign the extra code used to throw the implicit exceptions
1926 * to cfg->bb_exit as far as the big branch handling is concerned
1928 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1930 mono_add_patch_info (cfg, code - cfg->native_code, \
1931 MONO_PATCH_INFO_EXC, exc_name); \
1932 ARM_BL_COND (code, (condcode), 0); \
1935 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1938 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1943 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1945 MonoInst *ins, *n, *last_ins = NULL;
1947 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1948 switch (ins->opcode) {
1951 /* Already done by an arch-independent pass */
1953 case OP_LOAD_MEMBASE:
1954 case OP_LOADI4_MEMBASE:
1956 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1957 * OP_LOAD_MEMBASE offset(basereg), reg
1959 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1960 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1961 ins->inst_basereg == last_ins->inst_destbasereg &&
1962 ins->inst_offset == last_ins->inst_offset) {
1963 if (ins->dreg == last_ins->sreg1) {
1964 MONO_DELETE_INS (bb, ins);
1967 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1968 ins->opcode = OP_MOVE;
1969 ins->sreg1 = last_ins->sreg1;
1973 * Note: reg1 must be different from the basereg in the second load
1974 * OP_LOAD_MEMBASE offset(basereg), reg1
1975 * OP_LOAD_MEMBASE offset(basereg), reg2
1977 * OP_LOAD_MEMBASE offset(basereg), reg1
1978 * OP_MOVE reg1, reg2
1980 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1981 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1982 ins->inst_basereg != last_ins->dreg &&
1983 ins->inst_basereg == last_ins->inst_basereg &&
1984 ins->inst_offset == last_ins->inst_offset) {
1986 if (ins->dreg == last_ins->dreg) {
1987 MONO_DELETE_INS (bb, ins);
1990 ins->opcode = OP_MOVE;
1991 ins->sreg1 = last_ins->dreg;
1994 //g_assert_not_reached ();
1998 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1999 * OP_LOAD_MEMBASE offset(basereg), reg
2001 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2002 * OP_ICONST reg, imm
2004 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2005 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2006 ins->inst_basereg == last_ins->inst_destbasereg &&
2007 ins->inst_offset == last_ins->inst_offset) {
2008 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2009 ins->opcode = OP_ICONST;
2010 ins->inst_c0 = last_ins->inst_imm;
2011 g_assert_not_reached (); // check this rule
2015 case OP_LOADU1_MEMBASE:
2016 case OP_LOADI1_MEMBASE:
2017 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2018 ins->inst_basereg == last_ins->inst_destbasereg &&
2019 ins->inst_offset == last_ins->inst_offset) {
2020 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2021 ins->sreg1 = last_ins->sreg1;
2024 case OP_LOADU2_MEMBASE:
2025 case OP_LOADI2_MEMBASE:
2026 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2027 ins->inst_basereg == last_ins->inst_destbasereg &&
2028 ins->inst_offset == last_ins->inst_offset) {
2029 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2030 ins->sreg1 = last_ins->sreg1;
2034 ins->opcode = OP_MOVE;
2038 if (ins->dreg == ins->sreg1) {
2039 MONO_DELETE_INS (bb, ins);
2043 * OP_MOVE sreg, dreg
2044 * OP_MOVE dreg, sreg
2046 if (last_ins && last_ins->opcode == OP_MOVE &&
2047 ins->sreg1 == last_ins->dreg &&
2048 ins->dreg == last_ins->sreg1) {
2049 MONO_DELETE_INS (bb, ins);
2057 bb->last_ins = last_ins;
2061 * the branch_cc_table should maintain the order of these
2075 branch_cc_table [] = {
2089 #define NEW_INS(cfg,dest,op) do { \
2090 MONO_INST_NEW ((cfg), (dest), (op)); \
2091 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2095 map_to_reg_reg_op (int op)
2104 case OP_COMPARE_IMM:
2106 case OP_ICOMPARE_IMM:
2120 case OP_LOAD_MEMBASE:
2121 return OP_LOAD_MEMINDEX;
2122 case OP_LOADI4_MEMBASE:
2123 return OP_LOADI4_MEMINDEX;
2124 case OP_LOADU4_MEMBASE:
2125 return OP_LOADU4_MEMINDEX;
2126 case OP_LOADU1_MEMBASE:
2127 return OP_LOADU1_MEMINDEX;
2128 case OP_LOADI2_MEMBASE:
2129 return OP_LOADI2_MEMINDEX;
2130 case OP_LOADU2_MEMBASE:
2131 return OP_LOADU2_MEMINDEX;
2132 case OP_LOADI1_MEMBASE:
2133 return OP_LOADI1_MEMINDEX;
2134 case OP_STOREI1_MEMBASE_REG:
2135 return OP_STOREI1_MEMINDEX;
2136 case OP_STOREI2_MEMBASE_REG:
2137 return OP_STOREI2_MEMINDEX;
2138 case OP_STOREI4_MEMBASE_REG:
2139 return OP_STOREI4_MEMINDEX;
2140 case OP_STORE_MEMBASE_REG:
2141 return OP_STORE_MEMINDEX;
2142 case OP_STORER4_MEMBASE_REG:
2143 return OP_STORER4_MEMINDEX;
2144 case OP_STORER8_MEMBASE_REG:
2145 return OP_STORER8_MEMINDEX;
2146 case OP_STORE_MEMBASE_IMM:
2147 return OP_STORE_MEMBASE_REG;
2148 case OP_STOREI1_MEMBASE_IMM:
2149 return OP_STOREI1_MEMBASE_REG;
2150 case OP_STOREI2_MEMBASE_IMM:
2151 return OP_STOREI2_MEMBASE_REG;
2152 case OP_STOREI4_MEMBASE_IMM:
2153 return OP_STOREI4_MEMBASE_REG;
2155 g_assert_not_reached ();
2159 * Remove from the instruction list the instructions that can't be
2160 * represented with very simple instructions with no register
2164 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2166 MonoInst *ins, *temp, *last_ins = NULL;
2167 int rot_amount, imm8, low_imm;
2169 MONO_BB_FOR_EACH_INS (bb, ins) {
2171 switch (ins->opcode) {
2175 case OP_COMPARE_IMM:
2176 case OP_ICOMPARE_IMM:
2190 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2191 NEW_INS (cfg, temp, OP_ICONST);
2192 temp->inst_c0 = ins->inst_imm;
2193 temp->dreg = mono_alloc_ireg (cfg);
2194 ins->sreg2 = temp->dreg;
2195 ins->opcode = mono_op_imm_to_op (ins->opcode);
2197 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2203 if (ins->inst_imm == 1) {
2204 ins->opcode = OP_MOVE;
2207 if (ins->inst_imm == 0) {
2208 ins->opcode = OP_ICONST;
2212 imm8 = mono_is_power_of_two (ins->inst_imm);
2214 ins->opcode = OP_SHL_IMM;
2215 ins->inst_imm = imm8;
2218 NEW_INS (cfg, temp, OP_ICONST);
2219 temp->inst_c0 = ins->inst_imm;
2220 temp->dreg = mono_alloc_ireg (cfg);
2221 ins->sreg2 = temp->dreg;
2222 ins->opcode = OP_IMUL;
2228 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2229 /* ARM sets the C flag to 1 if there was _no_ overflow */
2230 ins->next->opcode = OP_COND_EXC_NC;
2232 case OP_LOCALLOC_IMM:
2233 NEW_INS (cfg, temp, OP_ICONST);
2234 temp->inst_c0 = ins->inst_imm;
2235 temp->dreg = mono_alloc_ireg (cfg);
2236 ins->sreg1 = temp->dreg;
2237 ins->opcode = OP_LOCALLOC;
2239 case OP_LOAD_MEMBASE:
2240 case OP_LOADI4_MEMBASE:
2241 case OP_LOADU4_MEMBASE:
2242 case OP_LOADU1_MEMBASE:
2243 /* we can do two things: load the immed in a register
2244 * and use an indexed load, or see if the immed can be
2245 * represented as an ad_imm + a load with a smaller offset
2246 * that fits. We just do the first for now, optimize later.
2248 if (arm_is_imm12 (ins->inst_offset))
2250 NEW_INS (cfg, temp, OP_ICONST);
2251 temp->inst_c0 = ins->inst_offset;
2252 temp->dreg = mono_alloc_ireg (cfg);
2253 ins->sreg2 = temp->dreg;
2254 ins->opcode = map_to_reg_reg_op (ins->opcode);
2256 case OP_LOADI2_MEMBASE:
2257 case OP_LOADU2_MEMBASE:
2258 case OP_LOADI1_MEMBASE:
2259 if (arm_is_imm8 (ins->inst_offset))
2261 NEW_INS (cfg, temp, OP_ICONST);
2262 temp->inst_c0 = ins->inst_offset;
2263 temp->dreg = mono_alloc_ireg (cfg);
2264 ins->sreg2 = temp->dreg;
2265 ins->opcode = map_to_reg_reg_op (ins->opcode);
2267 case OP_LOADR4_MEMBASE:
2268 case OP_LOADR8_MEMBASE:
2269 if (arm_is_fpimm8 (ins->inst_offset))
2271 low_imm = ins->inst_offset & 0x1ff;
2272 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2273 NEW_INS (cfg, temp, OP_ADD_IMM);
2274 temp->inst_imm = ins->inst_offset & ~0x1ff;
2275 temp->sreg1 = ins->inst_basereg;
2276 temp->dreg = mono_alloc_ireg (cfg);
2277 ins->inst_basereg = temp->dreg;
2278 ins->inst_offset = low_imm;
2281 /* VFP/FPA doesn't have indexed load instructions */
2282 g_assert_not_reached ();
2284 case OP_STORE_MEMBASE_REG:
2285 case OP_STOREI4_MEMBASE_REG:
2286 case OP_STOREI1_MEMBASE_REG:
2287 if (arm_is_imm12 (ins->inst_offset))
2289 NEW_INS (cfg, temp, OP_ICONST);
2290 temp->inst_c0 = ins->inst_offset;
2291 temp->dreg = mono_alloc_ireg (cfg);
2292 ins->sreg2 = temp->dreg;
2293 ins->opcode = map_to_reg_reg_op (ins->opcode);
2295 case OP_STOREI2_MEMBASE_REG:
2296 if (arm_is_imm8 (ins->inst_offset))
2298 NEW_INS (cfg, temp, OP_ICONST);
2299 temp->inst_c0 = ins->inst_offset;
2300 temp->dreg = mono_alloc_ireg (cfg);
2301 ins->sreg2 = temp->dreg;
2302 ins->opcode = map_to_reg_reg_op (ins->opcode);
2304 case OP_STORER4_MEMBASE_REG:
2305 case OP_STORER8_MEMBASE_REG:
2306 if (arm_is_fpimm8 (ins->inst_offset))
2308 low_imm = ins->inst_offset & 0x1ff;
2309 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2310 NEW_INS (cfg, temp, OP_ADD_IMM);
2311 temp->inst_imm = ins->inst_offset & ~0x1ff;
2312 temp->sreg1 = ins->inst_destbasereg;
2313 temp->dreg = mono_alloc_ireg (cfg);
2314 ins->inst_destbasereg = temp->dreg;
2315 ins->inst_offset = low_imm;
2318 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2319 /* VFP/FPA doesn't have indexed store instructions */
2320 g_assert_not_reached ();
2322 case OP_STORE_MEMBASE_IMM:
2323 case OP_STOREI1_MEMBASE_IMM:
2324 case OP_STOREI2_MEMBASE_IMM:
2325 case OP_STOREI4_MEMBASE_IMM:
2326 NEW_INS (cfg, temp, OP_ICONST);
2327 temp->inst_c0 = ins->inst_imm;
2328 temp->dreg = mono_alloc_ireg (cfg);
2329 ins->sreg1 = temp->dreg;
2330 ins->opcode = map_to_reg_reg_op (ins->opcode);
2332 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2334 gboolean swap = FALSE;
2337 /* Some fp compares require swapped operands */
2338 g_assert (ins->next);
2339 switch (ins->next->opcode) {
2341 ins->next->opcode = OP_FBLT;
2345 ins->next->opcode = OP_FBLT_UN;
2349 ins->next->opcode = OP_FBGE;
2353 ins->next->opcode = OP_FBGE_UN;
2361 ins->sreg1 = ins->sreg2;
2370 bb->last_ins = last_ins;
2371 bb->max_vreg = cfg->next_vreg;
2375 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2379 if (long_ins->opcode == OP_LNEG) {
2381 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2382 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2388 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2390 /* sreg is a float, dreg is an integer reg */
2392 ARM_FIXZ (code, dreg, sreg);
2393 #elif defined(ARM_FPU_VFP)
2395 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2397 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2398 ARM_FMRS (code, dreg, ARM_VFP_F0);
2402 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2403 else if (size == 2) {
2404 ARM_SHL_IMM (code, dreg, dreg, 16);
2405 ARM_SHR_IMM (code, dreg, dreg, 16);
2409 ARM_SHL_IMM (code, dreg, dreg, 24);
2410 ARM_SAR_IMM (code, dreg, dreg, 24);
2411 } else if (size == 2) {
2412 ARM_SHL_IMM (code, dreg, dreg, 16);
2413 ARM_SAR_IMM (code, dreg, dreg, 16);
2421 const guchar *target;
2426 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2429 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2430 PatchData *pdata = (PatchData*)user_data;
2431 guchar *code = data;
2432 guint32 *thunks = data;
2433 guint32 *endthunks = (guint32*)(code + bsize);
2435 int difflow, diffhigh;
2437 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2438 difflow = (char*)pdata->code - (char*)thunks;
2439 diffhigh = (char*)pdata->code - (char*)endthunks;
2440 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2444 * The thunk is composed of 3 words:
2445 * load constant from thunks [2] into ARM_IP
2448 * Note that the LR register is already setup
2450 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2451 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2452 while (thunks < endthunks) {
2453 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2454 if (thunks [2] == (guint32)pdata->target) {
2455 arm_patch (pdata->code, (guchar*)thunks);
2456 mono_arch_flush_icache (pdata->code, 4);
2459 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2460 /* found a free slot instead: emit thunk */
2461 /* ARMREG_IP is fine to use since this can't be an IMT call
2464 code = (guchar*)thunks;
2465 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2466 if (thumb_supported)
2467 ARM_BX (code, ARMREG_IP);
2469 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2470 thunks [2] = (guint32)pdata->target;
2471 mono_arch_flush_icache ((guchar*)thunks, 12);
2473 arm_patch (pdata->code, (guchar*)thunks);
2474 mono_arch_flush_icache (pdata->code, 4);
2478 /* skip 12 bytes, the size of the thunk */
2482 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2488 handle_thunk (int absolute, guchar *code, const guchar *target) {
2489 MonoDomain *domain = mono_domain_get ();
2493 pdata.target = target;
2494 pdata.absolute = absolute;
2497 mono_domain_lock (domain);
2498 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2501 /* this uses the first available slot */
2503 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2505 mono_domain_unlock (domain);
2507 if (pdata.found != 1)
2508 g_print ("thunk failed for %p from %p\n", target, code);
2509 g_assert (pdata.found == 1);
2513 arm_patch (guchar *code, const guchar *target)
2515 guint32 *code32 = (void*)code;
2516 guint32 ins = *code32;
2517 guint32 prim = (ins >> 25) & 7;
2518 guint32 tval = GPOINTER_TO_UINT (target);
2520 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2521 if (prim == 5) { /* 101b */
2522 /* the diff starts 8 bytes from the branch opcode */
2523 gint diff = target - code - 8;
2525 gint tmask = 0xffffffff;
2526 if (tval & 1) { /* entering thumb mode */
2527 diff = target - 1 - code - 8;
2528 g_assert (thumb_supported);
2529 tbits = 0xf << 28; /* bl->blx bit pattern */
2530 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2531 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2535 tmask = ~(1 << 24); /* clear the link bit */
2536 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2541 if (diff <= 33554431) {
2543 ins = (ins & 0xff000000) | diff;
2545 *code32 = ins | tbits;
2549 /* diff between 0 and -33554432 */
2550 if (diff >= -33554432) {
2552 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2554 *code32 = ins | tbits;
2559 handle_thunk (TRUE, code, target);
2564 * The alternative call sequences looks like this:
2566 * ldr ip, [pc] // loads the address constant
2567 * b 1f // jumps around the constant
2568 * address constant embedded in the code
2573 * There are two cases for patching:
2574 * a) at the end of method emission: in this case code points to the start
2575 * of the call sequence
2576 * b) during runtime patching of the call site: in this case code points
2577 * to the mov pc, ip instruction
2579 * We have to handle also the thunk jump code sequence:
2583 * address constant // execution never reaches here
2585 if ((ins & 0x0ffffff0) == 0x12fff10) {
2586 /* Branch and exchange: the address is constructed in a reg
2587 * We can patch BX when the code sequence is the following:
2588 * ldr ip, [pc, #0] ; 0x8
2595 guint8 *emit = (guint8*)ccode;
2596 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2598 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2599 ARM_BX (emit, ARMREG_IP);
2601 /*patching from magic trampoline*/
2602 if (ins == ccode [3]) {
2603 g_assert (code32 [-4] == ccode [0]);
2604 g_assert (code32 [-3] == ccode [1]);
2605 g_assert (code32 [-1] == ccode [2]);
2606 code32 [-2] = (guint32)target;
2609 /*patching from JIT*/
2610 if (ins == ccode [0]) {
2611 g_assert (code32 [1] == ccode [1]);
2612 g_assert (code32 [3] == ccode [2]);
2613 g_assert (code32 [4] == ccode [3]);
2614 code32 [2] = (guint32)target;
2617 g_assert_not_reached ();
2618 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2626 guint8 *emit = (guint8*)ccode;
2627 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2629 ARM_BLX_REG (emit, ARMREG_IP);
2631 g_assert (code32 [-3] == ccode [0]);
2632 g_assert (code32 [-2] == ccode [1]);
2633 g_assert (code32 [0] == ccode [2]);
2635 code32 [-1] = (guint32)target;
2638 guint32 *tmp = ccode;
2639 guint8 *emit = (guint8*)tmp;
2640 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2641 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2642 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2643 ARM_BX (emit, ARMREG_IP);
2644 if (ins == ccode [2]) {
2645 g_assert_not_reached (); // should be -2 ...
2646 code32 [-1] = (guint32)target;
2649 if (ins == ccode [0]) {
2650 /* handles both thunk jump code and the far call sequence */
2651 code32 [2] = (guint32)target;
2654 g_assert_not_reached ();
2656 // g_print ("patched with 0x%08x\n", ins);
2660 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2661 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2662 * to be used with the emit macros.
2663 * Return -1 otherwise.
2666 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2669 for (i = 0; i < 31; i+= 2) {
2670 res = (val << (32 - i)) | (val >> i);
2673 *rot_amount = i? 32 - i: 0;
2680 * Emits in code a sequence of instructions that load the value 'val'
2681 * into the dreg register. Uses at most 4 instructions.
2684 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2686 int imm8, rot_amount;
2688 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2689 /* skip the constant pool */
2695 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2696 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2697 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2698 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2701 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2703 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2707 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2709 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2711 if (val & 0xFF0000) {
2712 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2714 if (val & 0xFF000000) {
2715 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2717 } else if (val & 0xFF00) {
2718 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2719 if (val & 0xFF0000) {
2720 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2722 if (val & 0xFF000000) {
2723 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2725 } else if (val & 0xFF0000) {
2726 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2727 if (val & 0xFF000000) {
2728 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2731 //g_assert_not_reached ();
2737 * emit_load_volatile_arguments:
2739 * Load volatile arguments from the stack to the original input registers.
2740 * Required before a tail call.
2743 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2745 MonoMethod *method = cfg->method;
2746 MonoMethodSignature *sig;
2751 /* FIXME: Generate intermediate code instead */
2753 sig = mono_method_signature (method);
2755 /* This is the opposite of the code in emit_prolog */
2759 cinfo = get_call_info (sig, sig->pinvoke);
2761 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2762 ArgInfo *ainfo = &cinfo->ret;
2763 inst = cfg->vret_addr;
2764 g_assert (arm_is_imm12 (inst->inst_offset));
2765 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2767 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2768 ArgInfo *ainfo = cinfo->args + i;
2769 inst = cfg->args [pos];
2771 if (cfg->verbose_level > 2)
2772 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2773 if (inst->opcode == OP_REGVAR) {
2774 if (ainfo->regtype == RegTypeGeneral)
2775 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2776 else if (ainfo->regtype == RegTypeFP) {
2777 g_assert_not_reached ();
2778 } else if (ainfo->regtype == RegTypeBase) {
2782 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2783 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2785 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2786 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2790 g_assert_not_reached ();
2792 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
2793 switch (ainfo->size) {
2800 g_assert (arm_is_imm12 (inst->inst_offset));
2801 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2802 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2803 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2806 if (arm_is_imm12 (inst->inst_offset)) {
2807 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2809 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2810 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2814 } else if (ainfo->regtype == RegTypeBaseGen) {
2817 } else if (ainfo->regtype == RegTypeBase) {
2819 } else if (ainfo->regtype == RegTypeFP) {
2820 g_assert_not_reached ();
2821 } else if (ainfo->regtype == RegTypeStructByVal) {
2822 int doffset = inst->inst_offset;
2826 if (mono_class_from_mono_type (inst->inst_vtype))
2827 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2828 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2829 if (arm_is_imm12 (doffset)) {
2830 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2832 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2833 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2835 soffset += sizeof (gpointer);
2836 doffset += sizeof (gpointer);
2841 } else if (ainfo->regtype == RegTypeStructByAddr) {
2858 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2863 guint8 *code = cfg->native_code + cfg->code_len;
2864 MonoInst *last_ins = NULL;
2865 guint last_offset = 0;
2867 int imm8, rot_amount;
2869 /* we don't align basic blocks of loops on arm */
2871 if (cfg->verbose_level > 2)
2872 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2874 cpos = bb->max_offset;
2876 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2877 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2878 //g_assert (!mono_compile_aot);
2881 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2882 /* this is not thread save, but good enough */
2883 /* fixme: howto handle overflows? */
2884 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2887 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2888 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2889 (gpointer)"mono_break");
2890 code = emit_call_seq (cfg, code);
2893 MONO_BB_FOR_EACH_INS (bb, ins) {
2894 offset = code - cfg->native_code;
2896 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2898 if (offset > (cfg->code_size - max_len - 16)) {
2899 cfg->code_size *= 2;
2900 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2901 code = cfg->native_code + offset;
2903 // if (ins->cil_code)
2904 // g_print ("cil code\n");
2905 mono_debug_record_line_number (cfg, ins, offset);
2907 switch (ins->opcode) {
2908 case OP_MEMORY_BARRIER:
2911 #ifdef HAVE_AEABI_READ_TP
2912 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2913 (gpointer)"__aeabi_read_tp");
2914 code = emit_call_seq (cfg, code);
2916 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
2918 g_assert_not_reached ();
2922 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2923 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2926 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2927 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2929 case OP_STOREI1_MEMBASE_IMM:
2930 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2931 g_assert (arm_is_imm12 (ins->inst_offset));
2932 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2934 case OP_STOREI2_MEMBASE_IMM:
2935 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2936 g_assert (arm_is_imm8 (ins->inst_offset));
2937 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2939 case OP_STORE_MEMBASE_IMM:
2940 case OP_STOREI4_MEMBASE_IMM:
2941 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2942 g_assert (arm_is_imm12 (ins->inst_offset));
2943 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2945 case OP_STOREI1_MEMBASE_REG:
2946 g_assert (arm_is_imm12 (ins->inst_offset));
2947 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2949 case OP_STOREI2_MEMBASE_REG:
2950 g_assert (arm_is_imm8 (ins->inst_offset));
2951 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2953 case OP_STORE_MEMBASE_REG:
2954 case OP_STOREI4_MEMBASE_REG:
2955 /* this case is special, since it happens for spill code after lowering has been called */
2956 if (arm_is_imm12 (ins->inst_offset)) {
2957 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2959 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2960 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2963 case OP_STOREI1_MEMINDEX:
2964 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2966 case OP_STOREI2_MEMINDEX:
2967 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2969 case OP_STORE_MEMINDEX:
2970 case OP_STOREI4_MEMINDEX:
2971 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2974 g_assert_not_reached ();
2976 case OP_LOAD_MEMINDEX:
2977 case OP_LOADI4_MEMINDEX:
2978 case OP_LOADU4_MEMINDEX:
2979 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2981 case OP_LOADI1_MEMINDEX:
2982 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2984 case OP_LOADU1_MEMINDEX:
2985 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2987 case OP_LOADI2_MEMINDEX:
2988 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2990 case OP_LOADU2_MEMINDEX:
2991 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2993 case OP_LOAD_MEMBASE:
2994 case OP_LOADI4_MEMBASE:
2995 case OP_LOADU4_MEMBASE:
2996 /* this case is special, since it happens for spill code after lowering has been called */
2997 if (arm_is_imm12 (ins->inst_offset)) {
2998 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3000 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3001 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3004 case OP_LOADI1_MEMBASE:
3005 g_assert (arm_is_imm8 (ins->inst_offset));
3006 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3008 case OP_LOADU1_MEMBASE:
3009 g_assert (arm_is_imm12 (ins->inst_offset));
3010 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3012 case OP_LOADU2_MEMBASE:
3013 g_assert (arm_is_imm8 (ins->inst_offset));
3014 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3016 case OP_LOADI2_MEMBASE:
3017 g_assert (arm_is_imm8 (ins->inst_offset));
3018 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3020 case OP_ICONV_TO_I1:
3021 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3022 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3024 case OP_ICONV_TO_I2:
3025 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3026 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3028 case OP_ICONV_TO_U1:
3029 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3031 case OP_ICONV_TO_U2:
3032 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3033 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3037 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3039 case OP_COMPARE_IMM:
3040 case OP_ICOMPARE_IMM:
3041 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3042 g_assert (imm8 >= 0);
3043 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3047 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3048 * So instead of emitting a trap, we emit a call a C function and place a
3051 //*(int*)code = 0xef9f0001;
3054 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3055 (gpointer)"mono_break");
3056 code = emit_call_seq (cfg, code);
3058 case OP_RELAXED_NOP:
3063 case OP_DUMMY_STORE:
3064 case OP_NOT_REACHED:
3067 case OP_SEQ_POINT: {
3069 MonoInst *info_var = cfg->arch.seq_point_info_var;
3070 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3072 int dreg = ARMREG_LR;
3075 * For AOT, we use one got slot per method, which will point to a
3076 * SeqPointInfo structure, containing all the information required
3077 * by the code below.
3079 if (cfg->compile_aot) {
3080 g_assert (info_var);
3081 g_assert (info_var->opcode == OP_REGOFFSET);
3082 g_assert (arm_is_imm12 (info_var->inst_offset));
3086 * Read from the single stepping trigger page. This will cause a
3087 * SIGSEGV when single stepping is enabled.
3088 * We do this _before_ the breakpoint, so single stepping after
3089 * a breakpoint is hit will step to the next IL offset.
3091 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3093 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3094 if (cfg->compile_aot) {
3095 /* Load the trigger page addr from the variable initialized in the prolog */
3096 var = ss_trigger_page_var;
3098 g_assert (var->opcode == OP_REGOFFSET);
3099 g_assert (arm_is_imm12 (var->inst_offset));
3100 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3102 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3104 *(int*)code = (int)ss_trigger_page;
3107 ARM_LDR_IMM (code, dreg, dreg, 0);
3110 il_offset = ins->inst_imm;
3112 if (!cfg->seq_points)
3113 cfg->seq_points = g_ptr_array_new ();
3114 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
3115 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
3117 if (cfg->compile_aot) {
3118 guint32 offset = code - cfg->native_code;
3121 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3122 /* Add the offset */
3123 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3124 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3126 * Have to emit nops to keep the difference between the offset
3127 * stored in seq_points and breakpoint instruction constant,
3128 * mono_arch_get_ip_for_breakpoint () depends on this.
3131 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3135 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3138 g_assert (!(val & 0xFF000000));
3139 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3140 ARM_LDR_IMM (code, dreg, dreg, 0);
3142 /* What is faster, a branch or a load ? */
3143 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3144 /* The breakpoint instruction */
3145 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3148 * A placeholder for a possible breakpoint inserted by
3149 * mono_arch_set_breakpoint ().
3151 for (i = 0; i < 4; ++i)
3158 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3161 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3165 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3168 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3169 g_assert (imm8 >= 0);
3170 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3174 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3175 g_assert (imm8 >= 0);
3176 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3180 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3181 g_assert (imm8 >= 0);
3182 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3185 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3186 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3188 case OP_IADD_OVF_UN:
3189 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3190 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3193 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3194 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3196 case OP_ISUB_OVF_UN:
3197 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3198 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3200 case OP_ADD_OVF_CARRY:
3201 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3202 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3204 case OP_ADD_OVF_UN_CARRY:
3205 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3206 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3208 case OP_SUB_OVF_CARRY:
3209 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3210 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3212 case OP_SUB_OVF_UN_CARRY:
3213 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3214 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3218 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3221 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3222 g_assert (imm8 >= 0);
3223 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3226 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3230 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3234 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3235 g_assert (imm8 >= 0);
3236 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3240 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3241 g_assert (imm8 >= 0);
3242 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3244 case OP_ARM_RSBS_IMM:
3245 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3246 g_assert (imm8 >= 0);
3247 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3249 case OP_ARM_RSC_IMM:
3250 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3251 g_assert (imm8 >= 0);
3252 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3255 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3259 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3260 g_assert (imm8 >= 0);
3261 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3269 /* crappy ARM arch doesn't have a DIV instruction */
3270 g_assert_not_reached ();
3272 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3276 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3277 g_assert (imm8 >= 0);
3278 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3281 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3285 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3286 g_assert (imm8 >= 0);
3287 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3290 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3295 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3296 else if (ins->dreg != ins->sreg1)
3297 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3300 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3305 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3306 else if (ins->dreg != ins->sreg1)
3307 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3310 case OP_ISHR_UN_IMM:
3312 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3313 else if (ins->dreg != ins->sreg1)
3314 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3317 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3320 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3323 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3326 if (ins->dreg == ins->sreg2)
3327 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3329 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3332 g_assert_not_reached ();
3335 /* FIXME: handle ovf/ sreg2 != dreg */
3336 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3337 /* FIXME: MUL doesn't set the C/O flags on ARM */
3339 case OP_IMUL_OVF_UN:
3340 /* FIXME: handle ovf/ sreg2 != dreg */
3341 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3342 /* FIXME: MUL doesn't set the C/O flags on ARM */
3345 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3348 /* Load the GOT offset */
3349 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3350 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3352 *(gpointer*)code = NULL;
3354 /* Load the value from the GOT */
3355 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3357 case OP_ICONV_TO_I4:
3358 case OP_ICONV_TO_U4:
3360 if (ins->dreg != ins->sreg1)
3361 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3364 int saved = ins->sreg2;
3365 if (ins->sreg2 == ARM_LSW_REG) {
3366 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3369 if (ins->sreg1 != ARM_LSW_REG)
3370 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3371 if (saved != ARM_MSW_REG)
3372 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3377 ARM_MVFD (code, ins->dreg, ins->sreg1);
3378 #elif defined(ARM_FPU_VFP)
3379 ARM_CPYD (code, ins->dreg, ins->sreg1);
3382 case OP_FCONV_TO_R4:
3384 ARM_MVFS (code, ins->dreg, ins->sreg1);
3385 #elif defined(ARM_FPU_VFP)
3386 ARM_CVTD (code, ins->dreg, ins->sreg1);
3387 ARM_CVTS (code, ins->dreg, ins->dreg);
3392 * Keep in sync with mono_arch_emit_epilog
3394 g_assert (!cfg->method->save_lmf);
3396 code = emit_load_volatile_arguments (cfg, code);
3398 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3399 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3400 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3401 if (cfg->compile_aot) {
3402 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3404 *(gpointer*)code = NULL;
3406 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3412 /* ensure ins->sreg1 is not NULL */
3413 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3417 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
3418 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
3420 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
3421 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
3423 ppc_stw (code, ppc_r11, 0, ins->sreg1);
3433 call = (MonoCallInst*)ins;
3434 if (ins->flags & MONO_INST_HAS_METHOD)
3435 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3437 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3438 code = emit_call_seq (cfg, code);
3439 code = emit_move_return_value (cfg, ins, code);
3445 case OP_VOIDCALL_REG:
3447 code = emit_call_reg (code, ins->sreg1);
3448 code = emit_move_return_value (cfg, ins, code);
3450 case OP_FCALL_MEMBASE:
3451 case OP_LCALL_MEMBASE:
3452 case OP_VCALL_MEMBASE:
3453 case OP_VCALL2_MEMBASE:
3454 case OP_VOIDCALL_MEMBASE:
3455 case OP_CALL_MEMBASE:
3456 g_assert (arm_is_imm12 (ins->inst_offset));
3457 g_assert (ins->sreg1 != ARMREG_LR);
3458 call = (MonoCallInst*)ins;
3459 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3460 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3461 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3463 * We can't embed the method in the code stream in PIC code, or
3465 * Instead, we put it in V5 in code emitted by
3466 * mono_arch_emit_imt_argument (), and embed NULL here to
3467 * signal the IMT thunk that the value is in V5.
3469 if (call->dynamic_imt_arg)
3470 *((gpointer*)code) = NULL;
3472 *((gpointer*)code) = (gpointer)call->method;
3475 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3476 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3478 code = emit_move_return_value (cfg, ins, code);
3481 /* keep alignment */
3482 int alloca_waste = cfg->param_area;
3485 /* round the size to 8 bytes */
3486 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3487 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3489 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3490 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3491 /* memzero the area: dreg holds the size, sp is the pointer */
3492 if (ins->flags & MONO_INST_INIT) {
3493 guint8 *start_loop, *branch_to_cond;
3494 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3495 branch_to_cond = code;
3498 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3499 arm_patch (branch_to_cond, code);
3500 /* decrement by 4 and set flags */
3501 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3502 ARM_B_COND (code, ARMCOND_GE, 0);
3503 arm_patch (code - 4, start_loop);
3505 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3510 MonoInst *var = cfg->dyn_call_var;
3512 g_assert (var->opcode == OP_REGOFFSET);
3513 g_assert (arm_is_imm12 (var->inst_offset));
3515 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3516 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3518 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3520 /* Save args buffer */
3521 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3523 /* Set stack slots using R0 as scratch reg */
3524 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3525 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3526 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3527 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3530 /* Set argument registers */
3531 for (i = 0; i < PARAM_REGS; ++i)
3532 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3535 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3536 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3539 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3540 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3541 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3545 if (ins->sreg1 != ARMREG_R0)
3546 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3547 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3548 (gpointer)"mono_arch_throw_exception");
3549 code = emit_call_seq (cfg, code);
3553 if (ins->sreg1 != ARMREG_R0)
3554 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3555 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3556 (gpointer)"mono_arch_rethrow_exception");
3557 code = emit_call_seq (cfg, code);
3560 case OP_START_HANDLER: {
3561 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3563 if (arm_is_imm12 (spvar->inst_offset)) {
3564 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3566 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3567 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3571 case OP_ENDFILTER: {
3572 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3574 if (ins->sreg1 != ARMREG_R0)
3575 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3576 if (arm_is_imm12 (spvar->inst_offset)) {
3577 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3579 g_assert (ARMREG_IP != spvar->inst_basereg);
3580 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3581 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3583 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3586 case OP_ENDFINALLY: {
3587 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3589 if (arm_is_imm12 (spvar->inst_offset)) {
3590 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3592 g_assert (ARMREG_IP != spvar->inst_basereg);
3593 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3594 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3596 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3599 case OP_CALL_HANDLER:
3600 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3604 ins->inst_c0 = code - cfg->native_code;
3607 /*if (ins->inst_target_bb->native_offset) {
3609 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3611 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3616 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3620 * In the normal case we have:
3621 * ldr pc, [pc, ins->sreg1 << 2]
3624 * ldr lr, [pc, ins->sreg1 << 2]
3626 * After follows the data.
3627 * FIXME: add aot support.
3629 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3630 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3631 if (offset > (cfg->code_size - max_len - 16)) {
3632 cfg->code_size += max_len;
3633 cfg->code_size *= 2;
3634 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3635 code = cfg->native_code + offset;
3637 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3639 code += 4 * GPOINTER_TO_INT (ins->klass);
3643 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3644 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3648 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3649 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3653 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3654 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3658 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3659 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3663 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3664 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3666 case OP_COND_EXC_EQ:
3667 case OP_COND_EXC_NE_UN:
3668 case OP_COND_EXC_LT:
3669 case OP_COND_EXC_LT_UN:
3670 case OP_COND_EXC_GT:
3671 case OP_COND_EXC_GT_UN:
3672 case OP_COND_EXC_GE:
3673 case OP_COND_EXC_GE_UN:
3674 case OP_COND_EXC_LE:
3675 case OP_COND_EXC_LE_UN:
3676 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3678 case OP_COND_EXC_IEQ:
3679 case OP_COND_EXC_INE_UN:
3680 case OP_COND_EXC_ILT:
3681 case OP_COND_EXC_ILT_UN:
3682 case OP_COND_EXC_IGT:
3683 case OP_COND_EXC_IGT_UN:
3684 case OP_COND_EXC_IGE:
3685 case OP_COND_EXC_IGE_UN:
3686 case OP_COND_EXC_ILE:
3687 case OP_COND_EXC_ILE_UN:
3688 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3691 case OP_COND_EXC_IC:
3692 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3694 case OP_COND_EXC_OV:
3695 case OP_COND_EXC_IOV:
3696 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3698 case OP_COND_EXC_NC:
3699 case OP_COND_EXC_INC:
3700 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3702 case OP_COND_EXC_NO:
3703 case OP_COND_EXC_INO:
3704 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3716 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3719 /* floating point opcodes */
3722 if (cfg->compile_aot) {
3723 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3725 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3727 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3730 /* FIXME: we can optimize the imm load by dealing with part of
3731 * the displacement in LDFD (aligning to 512).
3733 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3734 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3738 if (cfg->compile_aot) {
3739 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3741 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3744 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3745 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3748 case OP_STORER8_MEMBASE_REG:
3749 /* This is generated by the local regalloc pass which runs after the lowering pass */
3750 if (!arm_is_fpimm8 (ins->inst_offset)) {
3751 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3752 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3753 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3755 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3758 case OP_LOADR8_MEMBASE:
3759 /* This is generated by the local regalloc pass which runs after the lowering pass */
3760 if (!arm_is_fpimm8 (ins->inst_offset)) {
3761 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3762 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3763 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3765 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3768 case OP_STORER4_MEMBASE_REG:
3769 g_assert (arm_is_fpimm8 (ins->inst_offset));
3770 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3772 case OP_LOADR4_MEMBASE:
3773 g_assert (arm_is_fpimm8 (ins->inst_offset));
3774 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3776 case OP_ICONV_TO_R_UN: {
3778 tmpreg = ins->dreg == 0? 1: 0;
3779 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3780 ARM_FLTD (code, ins->dreg, ins->sreg1);
3781 ARM_B_COND (code, ARMCOND_GE, 8);
3782 /* save the temp register */
3783 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3784 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3785 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3786 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3787 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3788 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3789 /* skip the constant pool */
3792 *(int*)code = 0x41f00000;
3797 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3798 * adfltd fdest, fdest, ftemp
3802 case OP_ICONV_TO_R4:
3803 ARM_FLTS (code, ins->dreg, ins->sreg1);
3805 case OP_ICONV_TO_R8:
3806 ARM_FLTD (code, ins->dreg, ins->sreg1);
3809 #elif defined(ARM_FPU_VFP)
3812 if (cfg->compile_aot) {
3813 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3815 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3817 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3820 /* FIXME: we can optimize the imm load by dealing with part of
3821 * the displacement in LDFD (aligning to 512).
3823 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3824 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3828 if (cfg->compile_aot) {
3829 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3831 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3833 ARM_CVTS (code, ins->dreg, ins->dreg);
3835 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3836 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3837 ARM_CVTS (code, ins->dreg, ins->dreg);
3840 case OP_STORER8_MEMBASE_REG:
3841 /* This is generated by the local regalloc pass which runs after the lowering pass */
3842 if (!arm_is_fpimm8 (ins->inst_offset)) {
3843 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3844 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3845 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
3847 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3850 case OP_LOADR8_MEMBASE:
3851 /* This is generated by the local regalloc pass which runs after the lowering pass */
3852 if (!arm_is_fpimm8 (ins->inst_offset)) {
3853 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3854 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3855 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3857 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3860 case OP_STORER4_MEMBASE_REG:
3861 g_assert (arm_is_fpimm8 (ins->inst_offset));
3862 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3863 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
3865 case OP_LOADR4_MEMBASE:
3866 g_assert (arm_is_fpimm8 (ins->inst_offset));
3867 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
3868 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3870 case OP_ICONV_TO_R_UN: {
3871 g_assert_not_reached ();
3874 case OP_ICONV_TO_R4:
3875 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3876 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
3877 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3879 case OP_ICONV_TO_R8:
3880 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3881 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
3885 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
3886 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3887 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
3889 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
3895 case OP_FCONV_TO_I1:
3896 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3898 case OP_FCONV_TO_U1:
3899 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3901 case OP_FCONV_TO_I2:
3902 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3904 case OP_FCONV_TO_U2:
3905 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3907 case OP_FCONV_TO_I4:
3909 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3911 case OP_FCONV_TO_U4:
3913 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3915 case OP_FCONV_TO_I8:
3916 case OP_FCONV_TO_U8:
3917 g_assert_not_reached ();
3918 /* Implemented as helper calls */
3920 case OP_LCONV_TO_R_UN:
3921 g_assert_not_reached ();
3922 /* Implemented as helper calls */
3924 case OP_LCONV_TO_OVF_I4_2: {
3925 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3927 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3930 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3931 high_bit_not_set = code;
3932 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3934 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3935 valid_negative = code;
3936 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3937 invalid_negative = code;
3938 ARM_B_COND (code, ARMCOND_AL, 0);
3940 arm_patch (high_bit_not_set, code);
3942 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3943 valid_positive = code;
3944 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3946 arm_patch (invalid_negative, code);
3947 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3949 arm_patch (valid_negative, code);
3950 arm_patch (valid_positive, code);
3952 if (ins->dreg != ins->sreg1)
3953 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3958 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3961 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3964 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3967 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3970 ARM_MNFD (code, ins->dreg, ins->sreg1);
3972 #elif defined(ARM_FPU_VFP)
3974 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3977 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3980 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3983 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3986 ARM_NEGD (code, ins->dreg, ins->sreg1);
3991 g_assert_not_reached ();
3995 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3996 #elif defined(ARM_FPU_VFP)
3997 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4003 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4004 #elif defined(ARM_FPU_VFP)
4005 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4008 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4009 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4013 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4014 #elif defined(ARM_FPU_VFP)
4015 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4018 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4019 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4023 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4024 #elif defined(ARM_FPU_VFP)
4025 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4028 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4029 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4030 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4035 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4036 #elif defined(ARM_FPU_VFP)
4037 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4040 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4041 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4046 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4047 #elif defined(ARM_FPU_VFP)
4048 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4051 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4052 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4053 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4055 /* ARM FPA flags table:
4056 * N Less than ARMCOND_MI
4057 * Z Equal ARMCOND_EQ
4058 * C Greater Than or Equal ARMCOND_CS
4059 * V Unordered ARMCOND_VS
4062 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4065 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4068 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4071 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4072 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4078 g_assert_not_reached ();
4082 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4084 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4085 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4086 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4090 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4091 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4096 if (ins->dreg != ins->sreg1)
4097 ARM_MVFD (code, ins->dreg, ins->sreg1);
4098 #elif defined(ARM_FPU_VFP)
4099 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4100 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4102 *(guint32*)code = 0xffffffff;
4104 *(guint32*)code = 0x7fefffff;
4106 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4108 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4109 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4111 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4113 ARM_CPYD (code, ins->dreg, ins->sreg1);
4118 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4119 g_assert_not_reached ();
4122 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4123 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4124 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4125 g_assert_not_reached ();
4131 last_offset = offset;
4134 cfg->code_len = code - cfg->native_code;
4137 #endif /* DISABLE_JIT */
4139 #ifdef HAVE_AEABI_READ_TP
4140 void __aeabi_read_tp (void);
4144 mono_arch_register_lowlevel_calls (void)
4146 /* The signature doesn't matter */
4147 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4148 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4150 #ifdef HAVE_AEABI_READ_TP
4151 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4155 #define patch_lis_ori(ip,val) do {\
4156 guint16 *__lis_ori = (guint16*)(ip); \
4157 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4158 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4162 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4164 MonoJumpInfo *patch_info;
4165 gboolean compile_aot = !run_cctors;
4167 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4168 unsigned char *ip = patch_info->ip.i + code;
4169 const unsigned char *target;
4171 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4172 gpointer *jt = (gpointer*)(ip + 8);
4174 /* jt is the inlined jump table, 2 instructions after ip
4175 * In the normal case we store the absolute addresses,
4176 * otherwise the displacements.
4178 for (i = 0; i < patch_info->data.table->table_size; i++)
4179 jt [i] = code + (int)patch_info->data.table->table [i];
4182 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4185 switch (patch_info->type) {
4186 case MONO_PATCH_INFO_BB:
4187 case MONO_PATCH_INFO_LABEL:
4190 /* No need to patch these */
4195 switch (patch_info->type) {
4196 case MONO_PATCH_INFO_IP:
4197 g_assert_not_reached ();
4198 patch_lis_ori (ip, ip);
4200 case MONO_PATCH_INFO_METHOD_REL:
4201 g_assert_not_reached ();
4202 *((gpointer *)(ip)) = code + patch_info->data.offset;
4204 case MONO_PATCH_INFO_METHODCONST:
4205 case MONO_PATCH_INFO_CLASS:
4206 case MONO_PATCH_INFO_IMAGE:
4207 case MONO_PATCH_INFO_FIELD:
4208 case MONO_PATCH_INFO_VTABLE:
4209 case MONO_PATCH_INFO_IID:
4210 case MONO_PATCH_INFO_SFLDA:
4211 case MONO_PATCH_INFO_LDSTR:
4212 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4213 case MONO_PATCH_INFO_LDTOKEN:
4214 g_assert_not_reached ();
4215 /* from OP_AOTCONST : lis + ori */
4216 patch_lis_ori (ip, target);
4218 case MONO_PATCH_INFO_R4:
4219 case MONO_PATCH_INFO_R8:
4220 g_assert_not_reached ();
4221 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4223 case MONO_PATCH_INFO_EXC_NAME:
4224 g_assert_not_reached ();
4225 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4227 case MONO_PATCH_INFO_NONE:
4228 case MONO_PATCH_INFO_BB_OVF:
4229 case MONO_PATCH_INFO_EXC_OVF:
4230 /* everything is dealt with at epilog output time */
4235 arm_patch (ip, target);
4240 * Stack frame layout:
4242 * ------------------- fp
4243 * MonoLMF structure or saved registers
4244 * -------------------
4246 * -------------------
4248 * -------------------
4249 * optional 8 bytes for tracing
4250 * -------------------
4251 * param area size is cfg->param_area
4252 * ------------------- sp
4255 mono_arch_emit_prolog (MonoCompile *cfg)
4257 MonoMethod *method = cfg->method;
4259 MonoMethodSignature *sig;
4261 int alloc_size, pos, max_offset, i, rot_amount;
4266 int prev_sp_offset, reg_offset;
4268 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4271 sig = mono_method_signature (method);
4272 cfg->code_size = 256 + sig->param_count * 20;
4273 code = cfg->native_code = g_malloc (cfg->code_size);
4275 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4277 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4279 alloc_size = cfg->stack_offset;
4282 if (!method->save_lmf) {
4283 /* We save SP by storing it into IP and saving IP */
4284 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4285 prev_sp_offset = 8; /* ip and lr */
4286 for (i = 0; i < 16; ++i) {
4287 if (cfg->used_int_regs & (1 << i))
4288 prev_sp_offset += 4;
4290 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4292 for (i = 0; i < 16; ++i) {
4293 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4294 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4299 ARM_PUSH (code, 0x5ff0);
4300 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4301 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4303 for (i = 0; i < 16; ++i) {
4304 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4305 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4309 pos += sizeof (MonoLMF) - prev_sp_offset;
4313 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4314 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4315 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4316 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4319 /* the stack used in the pushed regs */
4320 if (prev_sp_offset & 4)
4322 cfg->stack_usage = alloc_size;
4324 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4325 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4327 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4328 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4330 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4332 if (cfg->frame_reg != ARMREG_SP) {
4333 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4334 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4336 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4337 prev_sp_offset += alloc_size;
4339 /* compute max_offset in order to use short forward jumps
4340 * we could skip do it on arm because the immediate displacement
4341 * for jumps is large enough, it may be useful later for constant pools
4344 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4345 MonoInst *ins = bb->code;
4346 bb->max_offset = max_offset;
4348 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4351 MONO_BB_FOR_EACH_INS (bb, ins)
4352 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4355 /* store runtime generic context */
4356 if (cfg->rgctx_var) {
4357 MonoInst *ins = cfg->rgctx_var;
4359 g_assert (ins->opcode == OP_REGOFFSET);
4361 if (arm_is_imm12 (ins->inst_offset)) {
4362 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4364 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4365 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4369 /* load arguments allocated to register from the stack */
4372 cinfo = get_call_info (sig, sig->pinvoke);
4374 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4375 ArgInfo *ainfo = &cinfo->ret;
4376 inst = cfg->vret_addr;
4377 g_assert (arm_is_imm12 (inst->inst_offset));
4378 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4380 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4381 ArgInfo *ainfo = cinfo->args + i;
4382 inst = cfg->args [pos];
4384 if (cfg->verbose_level > 2)
4385 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4386 if (inst->opcode == OP_REGVAR) {
4387 if (ainfo->regtype == RegTypeGeneral)
4388 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4389 else if (ainfo->regtype == RegTypeFP) {
4390 g_assert_not_reached ();
4391 } else if (ainfo->regtype == RegTypeBase) {
4392 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4393 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4395 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4396 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4399 g_assert_not_reached ();
4401 if (cfg->verbose_level > 2)
4402 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4404 /* the argument should be put on the stack: FIXME handle size != word */
4405 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
4406 switch (ainfo->size) {
4408 if (arm_is_imm12 (inst->inst_offset))
4409 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4411 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4412 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4416 if (arm_is_imm8 (inst->inst_offset)) {
4417 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4419 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4420 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4424 g_assert (arm_is_imm12 (inst->inst_offset));
4425 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4426 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4427 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4430 if (arm_is_imm12 (inst->inst_offset)) {
4431 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4433 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4434 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4438 } else if (ainfo->regtype == RegTypeBaseGen) {
4439 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4440 g_assert (arm_is_imm12 (inst->inst_offset));
4441 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4442 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4443 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4444 } else if (ainfo->regtype == RegTypeBase) {
4445 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4446 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4448 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4449 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4452 switch (ainfo->size) {
4454 if (arm_is_imm8 (inst->inst_offset)) {
4455 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4457 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4458 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4462 if (arm_is_imm8 (inst->inst_offset)) {
4463 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4465 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4466 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4470 if (arm_is_imm12 (inst->inst_offset)) {
4471 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4473 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4474 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4476 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4477 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4479 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4480 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4482 if (arm_is_imm12 (inst->inst_offset + 4)) {
4483 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4485 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4486 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4490 if (arm_is_imm12 (inst->inst_offset)) {
4491 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4493 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4494 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4498 } else if (ainfo->regtype == RegTypeFP) {
4499 g_assert_not_reached ();
4500 } else if (ainfo->regtype == RegTypeStructByVal) {
4501 int doffset = inst->inst_offset;
4505 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4506 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4507 if (arm_is_imm12 (doffset)) {
4508 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4510 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4511 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4513 soffset += sizeof (gpointer);
4514 doffset += sizeof (gpointer);
4516 if (ainfo->vtsize) {
4517 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4518 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4519 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4521 } else if (ainfo->regtype == RegTypeStructByAddr) {
4522 g_assert_not_reached ();
4523 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4524 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4526 g_assert_not_reached ();
4531 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4532 if (cfg->compile_aot)
4533 /* AOT code is only used in the root domain */
4534 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4536 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4537 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4538 (gpointer)"mono_jit_thread_attach");
4539 code = emit_call_seq (cfg, code);
4542 if (method->save_lmf) {
4543 gboolean get_lmf_fast = FALSE;
4545 #ifdef HAVE_AEABI_READ_TP
4546 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4548 if (lmf_addr_tls_offset != -1) {
4549 get_lmf_fast = TRUE;
4551 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4552 (gpointer)"__aeabi_read_tp");
4553 code = emit_call_seq (cfg, code);
4555 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4556 get_lmf_fast = TRUE;
4559 if (!get_lmf_fast) {
4560 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4561 (gpointer)"mono_get_lmf_addr");
4562 code = emit_call_seq (cfg, code);
4564 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4565 /* lmf_offset is the offset from the previous stack pointer,
4566 * alloc_size is the total stack space allocated, so the offset
4567 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4568 * The pointer to the struct is put in r1 (new_lmf).
4569 * r2 is used as scratch
4570 * The callee-saved registers are already in the MonoLMF structure
4572 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4573 /* r0 is the result from mono_get_lmf_addr () */
4574 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4575 /* new_lmf->previous_lmf = *lmf_addr */
4576 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4577 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4578 /* *(lmf_addr) = r1 */
4579 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4580 /* Skip method (only needed for trampoline LMF frames) */
4581 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4582 /* save the current IP */
4583 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4584 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4588 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4590 if (cfg->arch.seq_point_info_var) {
4591 MonoInst *ins = cfg->arch.seq_point_info_var;
4593 /* Initialize the variable from a GOT slot */
4594 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4595 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4597 *(gpointer*)code = NULL;
4599 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4601 g_assert (ins->opcode == OP_REGOFFSET);
4603 if (arm_is_imm12 (ins->inst_offset)) {
4604 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4606 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4607 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4611 /* Initialize ss_trigger_page_var */
4613 MonoInst *info_var = cfg->arch.seq_point_info_var;
4614 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4615 int dreg = ARMREG_LR;
4618 g_assert (info_var->opcode == OP_REGOFFSET);
4619 g_assert (arm_is_imm12 (info_var->inst_offset));
4621 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4622 /* Load the trigger page addr */
4623 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4624 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4628 cfg->code_len = code - cfg->native_code;
4629 g_assert (cfg->code_len < cfg->code_size);
4636 mono_arch_emit_epilog (MonoCompile *cfg)
4638 MonoMethod *method = cfg->method;
4639 int pos, i, rot_amount;
4640 int max_epilog_size = 16 + 20*4;
4643 if (cfg->method->save_lmf)
4644 max_epilog_size += 128;
4646 if (mono_jit_trace_calls != NULL)
4647 max_epilog_size += 50;
4649 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4650 max_epilog_size += 50;
4652 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4653 cfg->code_size *= 2;
4654 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4655 mono_jit_stats.code_reallocs++;
4659 * Keep in sync with OP_JMP
4661 code = cfg->native_code + cfg->code_len;
4663 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4664 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4668 if (method->save_lmf) {
4670 /* all but r0-r3, sp and pc */
4671 pos += sizeof (MonoLMF) - (4 * 10);
4673 /* r2 contains the pointer to the current LMF */
4674 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4675 /* ip = previous_lmf */
4676 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4678 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4679 /* *(lmf_addr) = previous_lmf */
4680 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4681 /* FIXME: speedup: there is no actual need to restore the registers if
4682 * we didn't actually change them (idea from Zoltan).
4685 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4686 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4687 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4689 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4690 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4692 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4693 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4695 /* FIXME: add v4 thumb interworking support */
4696 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4699 cfg->code_len = code - cfg->native_code;
4701 g_assert (cfg->code_len < cfg->code_size);
4705 /* remove once throw_exception_by_name is eliminated */
4707 exception_id_by_name (const char *name)
4709 if (strcmp (name, "IndexOutOfRangeException") == 0)
4710 return MONO_EXC_INDEX_OUT_OF_RANGE;
4711 if (strcmp (name, "OverflowException") == 0)
4712 return MONO_EXC_OVERFLOW;
4713 if (strcmp (name, "ArithmeticException") == 0)
4714 return MONO_EXC_ARITHMETIC;
4715 if (strcmp (name, "DivideByZeroException") == 0)
4716 return MONO_EXC_DIVIDE_BY_ZERO;
4717 if (strcmp (name, "InvalidCastException") == 0)
4718 return MONO_EXC_INVALID_CAST;
4719 if (strcmp (name, "NullReferenceException") == 0)
4720 return MONO_EXC_NULL_REF;
4721 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4722 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4723 g_error ("Unknown intrinsic exception %s\n", name);
4728 mono_arch_emit_exceptions (MonoCompile *cfg)
4730 MonoJumpInfo *patch_info;
4733 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4734 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4735 int max_epilog_size = 50;
4737 /* count the number of exception infos */
4740 * make sure we have enough space for exceptions
4742 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4743 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4744 i = exception_id_by_name (patch_info->data.target);
4745 if (!exc_throw_found [i]) {
4746 max_epilog_size += 32;
4747 exc_throw_found [i] = TRUE;
4752 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4753 cfg->code_size *= 2;
4754 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4755 mono_jit_stats.code_reallocs++;
4758 code = cfg->native_code + cfg->code_len;
4760 /* add code to raise exceptions */
4761 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4762 switch (patch_info->type) {
4763 case MONO_PATCH_INFO_EXC: {
4764 MonoClass *exc_class;
4765 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4767 i = exception_id_by_name (patch_info->data.target);
4768 if (exc_throw_pos [i]) {
4769 arm_patch (ip, exc_throw_pos [i]);
4770 patch_info->type = MONO_PATCH_INFO_NONE;
4773 exc_throw_pos [i] = code;
4775 arm_patch (ip, code);
4777 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4778 g_assert (exc_class);
4780 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4781 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4782 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4783 patch_info->data.name = "mono_arch_throw_corlib_exception";
4784 patch_info->ip.i = code - cfg->native_code;
4786 *(guint32*)(gpointer)code = exc_class->type_token;
4796 cfg->code_len = code - cfg->native_code;
4798 g_assert (cfg->code_len < cfg->code_size);
4802 static gboolean tls_offset_inited = FALSE;
4805 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4807 if (!tls_offset_inited) {
4808 tls_offset_inited = TRUE;
4810 lmf_tls_offset = mono_get_lmf_tls_offset ();
4811 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4816 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4821 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4828 mono_arch_print_tree (MonoInst *tree, int arity)
4834 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4836 return mono_get_domain_intrinsic (cfg);
4840 mono_arch_get_patch_offset (guint8 *code)
4847 mono_arch_flush_register_windows (void)
4851 #ifdef MONO_ARCH_HAVE_IMT
4854 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4856 if (cfg->compile_aot) {
4857 int method_reg = mono_alloc_ireg (cfg);
4860 call->dynamic_imt_arg = TRUE;
4863 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4865 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4866 ins->dreg = method_reg;
4867 ins->inst_p0 = call->method;
4868 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4869 MONO_ADD_INS (cfg->cbb, ins);
4871 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4873 } else if (cfg->generic_context) {
4875 /* Always pass in a register for simplicity */
4876 call->dynamic_imt_arg = TRUE;
4878 cfg->uses_rgctx_reg = TRUE;
4881 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4884 int method_reg = mono_alloc_preg (cfg);
4886 MONO_INST_NEW (cfg, ins, OP_PCONST);
4887 ins->inst_p0 = call->method;
4888 ins->dreg = method_reg;
4889 MONO_ADD_INS (cfg->cbb, ins);
4891 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4897 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4899 guint32 *code_ptr = (guint32*)code;
4901 /* The IMT value is stored in the code stream right after the LDC instruction. */
4902 if (!IS_LDR_PC (code_ptr [0])) {
4903 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4904 g_assert (IS_LDR_PC (code_ptr [0]));
4906 if (code_ptr [1] == 0)
4907 /* This is AOTed code, the IMT method is in V5 */
4908 return (MonoMethod*)regs [ARMREG_V5];
4910 return (MonoMethod*) code_ptr [1];
4914 mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4916 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
4920 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
4922 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4925 #define ENABLE_WRONG_METHOD_CHECK 0
4926 #define BASE_SIZE (6 * 4)
4927 #define BSEARCH_ENTRY_SIZE (4 * 4)
4928 #define CMP_SIZE (3 * 4)
4929 #define BRANCH_SIZE (1 * 4)
4930 #define CALL_SIZE (2 * 4)
4931 #define WMC_SIZE (5 * 4)
4932 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4935 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4937 guint32 delta = DISTANCE (target, code);
4939 g_assert (delta >= 0 && delta <= 0xFFF);
4940 *target = *target | delta;
4946 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4947 gpointer fail_tramp)
4949 int size, i, extra_space = 0;
4950 arminstr_t *code, *start, *vtable_target = NULL;
4951 gboolean large_offsets = FALSE;
4952 guint32 **constant_pool_starts;
4955 constant_pool_starts = g_new0 (guint32*, count);
4958 * We might be called with a fail_tramp from the IMT builder code even if
4959 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
4961 //g_assert (!fail_tramp);
4963 for (i = 0; i < count; ++i) {
4964 MonoIMTCheckItem *item = imt_entries [i];
4965 if (item->is_equals) {
4966 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
4967 item->chunk_size += 32;
4968 large_offsets = TRUE;
4971 if (item->check_target_idx) {
4972 if (!item->compare_done)
4973 item->chunk_size += CMP_SIZE;
4974 item->chunk_size += BRANCH_SIZE;
4976 #if ENABLE_WRONG_METHOD_CHECK
4977 item->chunk_size += WMC_SIZE;
4980 item->chunk_size += CALL_SIZE;
4982 item->chunk_size += BSEARCH_ENTRY_SIZE;
4983 imt_entries [item->check_target_idx]->compare_done = TRUE;
4985 size += item->chunk_size;
4989 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
4991 start = code = mono_domain_code_reserve (domain, size);
4994 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4995 for (i = 0; i < count; ++i) {
4996 MonoIMTCheckItem *item = imt_entries [i];
4997 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5002 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5004 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5005 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5006 vtable_target = code;
5007 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5009 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5010 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5011 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5013 for (i = 0; i < count; ++i) {
5014 MonoIMTCheckItem *item = imt_entries [i];
5015 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5016 gint32 vtable_offset;
5018 item->code_target = (guint8*)code;
5020 if (item->is_equals) {
5021 if (item->check_target_idx) {
5022 if (!item->compare_done) {
5024 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5025 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5027 item->jmp_code = (guint8*)code;
5028 ARM_B_COND (code, ARMCOND_NE, 0);
5030 /*Enable the commented code to assert on wrong method*/
5031 #if ENABLE_WRONG_METHOD_CHECK
5033 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5034 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5035 ARM_B_COND (code, ARMCOND_NE, 1);
5041 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5042 if (!arm_is_imm12 (vtable_offset)) {
5044 * We need to branch to a computed address but we don't have
5045 * a free register to store it, since IP must contain the
5046 * vtable address. So we push the two values to the stack, and
5047 * load them both using LDM.
5049 /* Compute target address */
5050 vtable_offset_ins = code;
5051 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5052 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5053 /* Save it to the fourth slot */
5054 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5055 /* Restore registers and branch */
5056 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5058 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5060 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5062 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5063 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5067 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5069 /*must emit after unconditional branch*/
5070 if (vtable_target) {
5071 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5072 item->chunk_size += 4;
5073 vtable_target = NULL;
5076 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5077 constant_pool_starts [i] = code;
5079 code += extra_space;
5083 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5084 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5086 item->jmp_code = (guint8*)code;
5087 ARM_B_COND (code, ARMCOND_GE, 0);
5092 for (i = 0; i < count; ++i) {
5093 MonoIMTCheckItem *item = imt_entries [i];
5094 if (item->jmp_code) {
5095 if (item->check_target_idx)
5096 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5098 if (i > 0 && item->is_equals) {
5100 arminstr_t *space_start = constant_pool_starts [i];
5101 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5102 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5109 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5110 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5115 g_free (constant_pool_starts);
5117 mono_arch_flush_icache ((guint8*)start, size);
5118 mono_stats.imt_thunks_size += code - start;
5120 g_assert (DISTANCE (start, code) <= size);
5127 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5129 if (reg >= 4 && reg <= 11)
5130 return (gpointer)ctx->regs [reg - 4];
5131 else if (reg == ARMREG_IP)
5132 return (gpointer)ctx->regs [8];
5133 else if (reg == ARMREG_LR)
5134 return (gpointer)ctx->regs [9];
5135 else if (reg == ARMREG_SP)
5136 return (gpointer)ctx->esp;
5138 g_assert_not_reached ();
5144 * mono_arch_set_breakpoint:
5146 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5147 * The location should contain code emitted by OP_SEQ_POINT.
5150 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5153 guint32 native_offset = ip - (guint8*)ji->code_start;
5156 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5158 g_assert (native_offset % 4 == 0);
5159 g_assert (info->bp_addrs [native_offset / 4] == 0);
5160 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5162 int dreg = ARMREG_LR;
5164 /* Read from another trigger page */
5165 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5167 *(int*)code = (int)bp_trigger_page;
5169 ARM_LDR_IMM (code, dreg, dreg, 0);
5171 mono_arch_flush_icache (code - 16, 16);
5174 /* This is currently implemented by emitting an SWI instruction, which
5175 * qemu/linux seems to convert to a SIGILL.
5177 *(int*)code = (0xef << 24) | 8;
5179 mono_arch_flush_icache (code - 4, 4);
5185 * mono_arch_clear_breakpoint:
5187 * Clear the breakpoint at IP.
5190 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5196 guint32 native_offset = ip - (guint8*)ji->code_start;
5197 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5199 g_assert (native_offset % 4 == 0);
5200 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5201 info->bp_addrs [native_offset / 4] = 0;
5203 for (i = 0; i < 4; ++i)
5206 mono_arch_flush_icache (ip, code - ip);
5211 * mono_arch_start_single_stepping:
5213 * Start single stepping.
5216 mono_arch_start_single_stepping (void)
5218 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5222 * mono_arch_stop_single_stepping:
5224 * Stop single stepping.
5227 mono_arch_stop_single_stepping (void)
5229 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5233 #define DBG_SIGNAL SIGBUS
5235 #define DBG_SIGNAL SIGSEGV
5239 * mono_arch_is_single_step_event:
5241 * Return whenever the machine state in SIGCTX corresponds to a single
5245 mono_arch_is_single_step_event (siginfo_t *info, void *sigctx)
5247 /* Sometimes the address is off by 4 */
5248 if (info->si_addr >= ss_trigger_page && (guint8*)info->si_addr <= (guint8*)ss_trigger_page + 128)
5255 * mono_arch_is_breakpoint_event:
5257 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5260 mono_arch_is_breakpoint_event (siginfo_t *info, void *sigctx)
5262 if (info->si_signo == DBG_SIGNAL) {
5263 /* Sometimes the address is off by 4 */
5264 if (info->si_addr >= bp_trigger_page && (guint8*)info->si_addr <= (guint8*)bp_trigger_page + 128)
5274 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5276 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5287 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5289 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5297 * mono_arch_skip_breakpoint:
5299 * See mini-amd64.c for docs.
5302 mono_arch_skip_breakpoint (MonoContext *ctx)
5304 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5308 * mono_arch_skip_single_step:
5310 * See mini-amd64.c for docs.
5313 mono_arch_skip_single_step (MonoContext *ctx)
5315 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5319 * mono_arch_get_seq_point_info:
5321 * See mini-amd64.c for docs.
5324 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5329 // FIXME: Add a free function
5331 mono_domain_lock (domain);
5332 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5334 mono_domain_unlock (domain);
5337 ji = mono_jit_info_table_find (domain, (char*)code);
5340 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5342 info->ss_trigger_page = ss_trigger_page;
5343 info->bp_trigger_page = bp_trigger_page;
5345 mono_domain_lock (domain);
5346 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5348 mono_domain_unlock (domain);