2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
88 mono_arch_regname (int reg)
90 static const char * rnames[] = {
91 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
92 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
93 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
96 if (reg >= 0 && reg < 16)
102 mono_arch_fregname (int reg)
104 static const char * rnames[] = {
105 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
106 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
107 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
108 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
109 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
110 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
113 if (reg >= 0 && reg < 32)
119 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
121 int imm8, rot_amount;
122 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
123 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
126 g_assert (dreg != sreg);
127 code = mono_arm_emit_load_imm (code, dreg, imm);
128 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
133 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
135 /* we can use r0-r3, since this is called only for incoming args on the stack */
136 if (size > sizeof (gpointer) * 4) {
138 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
139 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
140 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
141 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
142 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
143 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
144 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
145 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
146 ARM_B_COND (code, ARMCOND_NE, 0);
147 arm_patch (code - 4, start_loop);
150 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
151 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
153 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
154 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
160 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
161 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
162 doffset = soffset = 0;
164 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
165 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
171 g_assert (size == 0);
176 emit_call_reg (guint8 *code, int reg)
179 ARM_BLX_REG (code, reg);
181 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
185 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
191 emit_call_seq (MonoCompile *cfg, guint8 *code)
193 if (cfg->method->dynamic) {
194 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
196 *(gpointer*)code = NULL;
198 code = emit_call_reg (code, ARMREG_IP);
206 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
208 switch (ins->opcode) {
211 case OP_FCALL_MEMBASE:
213 if (ins->dreg != ARM_FPA_F0)
214 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
215 #elif defined(ARM_FPU_VFP)
216 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
217 ARM_FMSR (code, ins->dreg, ARMREG_R0);
218 ARM_CVTS (code, ins->dreg, ins->dreg);
220 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
230 * mono_arch_get_argument_info:
231 * @csig: a method signature
232 * @param_count: the number of parameters to consider
233 * @arg_info: an array to store the result infos
235 * Gathers information on parameters such as size, alignment and
236 * padding. arg_info should be large enought to hold param_count + 1 entries.
238 * Returns the size of the activation frame.
241 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
243 int k, frame_size = 0;
244 guint32 size, align, pad;
247 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
248 frame_size += sizeof (gpointer);
252 arg_info [0].offset = offset;
255 frame_size += sizeof (gpointer);
259 arg_info [0].size = frame_size;
261 for (k = 0; k < param_count; k++) {
262 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
264 /* ignore alignment for now */
267 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
268 arg_info [k].pad = pad;
270 arg_info [k + 1].pad = 0;
271 arg_info [k + 1].size = size;
273 arg_info [k + 1].offset = offset;
277 align = MONO_ARCH_FRAME_ALIGNMENT;
278 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
279 arg_info [k].pad = pad;
285 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
289 reg = (ldr >> 16 ) & 0xf;
290 offset = ldr & 0xfff;
291 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
293 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
294 o = (gpointer)regs [reg];
296 *displacement = offset;
301 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
303 guint32* code = (guint32*)code_ptr;
305 /* Locate the address of the method-specific trampoline. The call using
306 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
307 looks something like this:
316 The call sequence could be also:
319 function pointer literal
323 Note that on ARM5+ we can use one instruction instead of the last two.
324 Therefore, we need to locate the 'ldr rA' instruction to know which
325 register was used to hold the method addrs.
328 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
331 /* Three possible code sequences can happen here:
335 * ldr pc, [rX - #offset]
341 * ldr pc, [rX - #offset]
343 * direct branch with bl:
347 * direct branch with mov:
351 * We only need to identify interface and virtual calls, the others can be ignored.
354 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
355 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
357 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
358 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
363 #define MAX_ARCH_DELEGATE_PARAMS 3
366 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
368 guint8 *code, *start;
371 start = code = mono_global_codeman_reserve (12);
373 /* Replace the this argument with the target */
374 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
375 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
376 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
378 g_assert ((code - start) <= 12);
380 mono_arch_flush_icache (start, 12);
384 size = 8 + param_count * 4;
385 start = code = mono_global_codeman_reserve (size);
387 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
388 /* slide down the arguments */
389 for (i = 0; i < param_count; ++i) {
390 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
392 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
394 g_assert ((code - start) <= size);
396 mono_arch_flush_icache (start, size);
400 *code_size = code - start;
406 * mono_arch_get_delegate_invoke_impls:
408 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
412 mono_arch_get_delegate_invoke_impls (void)
419 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
420 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
422 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
423 code = get_delegate_invoke_impl (FALSE, i, &code_len);
424 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
431 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
433 guint8 *code, *start;
435 /* FIXME: Support more cases */
436 if (MONO_TYPE_ISSTRUCT (sig->ret))
440 static guint8* cached = NULL;
441 mono_mini_arch_lock ();
443 mono_mini_arch_unlock ();
448 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
450 start = get_delegate_invoke_impl (TRUE, 0, NULL);
452 mono_mini_arch_unlock ();
455 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
458 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
460 for (i = 0; i < sig->param_count; ++i)
461 if (!mono_is_regsize_var (sig->params [i]))
464 mono_mini_arch_lock ();
465 code = cache [sig->param_count];
467 mono_mini_arch_unlock ();
472 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
473 start = mono_aot_get_named_code (name);
476 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
478 cache [sig->param_count] = start;
479 mono_mini_arch_unlock ();
487 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
489 /* FIXME: handle returning a struct */
490 if (MONO_TYPE_ISSTRUCT (sig->ret))
491 return (gpointer)regs [ARMREG_R1];
492 return (gpointer)regs [ARMREG_R0];
496 * Initialize the cpu to execute managed code.
499 mono_arch_cpu_init (void)
504 * Initialize architecture specific code.
507 mono_arch_init (void)
509 InitializeCriticalSection (&mini_arch_mutex);
511 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
512 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
513 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
517 * Cleanup architecture specific code.
520 mono_arch_cleanup (void)
525 * This function returns the optimizations supported on this cpu.
528 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
532 thumb_supported = TRUE;
537 FILE *file = fopen ("/proc/cpuinfo", "r");
539 while ((line = fgets (buf, 512, file))) {
540 if (strncmp (line, "Processor", 9) == 0) {
541 char *ver = strstr (line, "(v");
542 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
544 if (ver && (ver [2] == '7'))
548 if (strncmp (line, "Features", 8) == 0) {
549 char *th = strstr (line, "thumb");
551 thumb_supported = TRUE;
559 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
563 /* no arm-specific optimizations yet */
569 is_regsize_var (MonoType *t) {
572 t = mini_type_get_underlying_type (NULL, t);
579 case MONO_TYPE_FNPTR:
581 case MONO_TYPE_OBJECT:
582 case MONO_TYPE_STRING:
583 case MONO_TYPE_CLASS:
584 case MONO_TYPE_SZARRAY:
585 case MONO_TYPE_ARRAY:
587 case MONO_TYPE_GENERICINST:
588 if (!mono_type_generic_inst_is_valuetype (t))
591 case MONO_TYPE_VALUETYPE:
598 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
603 for (i = 0; i < cfg->num_varinfo; i++) {
604 MonoInst *ins = cfg->varinfo [i];
605 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
608 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
611 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
614 /* we can only allocate 32 bit values */
615 if (is_regsize_var (ins->inst_vtype)) {
616 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
617 g_assert (i == vmv->idx);
618 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
625 #define USE_EXTRA_TEMPS 0
628 mono_arch_get_global_int_regs (MonoCompile *cfg)
633 * FIXME: Interface calls might go through a static rgctx trampoline which
634 * sets V5, but it doesn't save it, so we need to save it ourselves, and
637 if (cfg->flags & MONO_CFG_HAS_CALLS)
638 cfg->uses_rgctx_reg = TRUE;
640 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
641 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
642 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
643 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
644 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
645 /* V5 is reserved for passing the vtable/rgctx/IMT method */
646 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
647 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
648 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
654 * mono_arch_regalloc_cost:
656 * Return the cost, in number of memory references, of the action of
657 * allocating the variable VMV into a register during global register
661 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
667 #ifndef __GNUC_PREREQ
668 #define __GNUC_PREREQ(maj, min) (0)
672 mono_arch_flush_icache (guint8 *code, gint size)
675 sys_icache_invalidate (code, size);
676 #elif __GNUC_PREREQ(4, 1)
677 __clear_cache (code, code + size);
678 #elif defined(PLATFORM_ANDROID)
679 const int syscall = 0xf0002;
687 : "r" (code), "r" (code + size), "r" (syscall)
688 : "r0", "r1", "r7", "r2"
691 __asm __volatile ("mov r0, %0\n"
694 "swi 0x9f0002 @ sys_cacheflush"
696 : "r" (code), "r" (code + size), "r" (0)
697 : "r0", "r1", "r3" );
714 guint16 vtsize; /* in param area */
716 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
717 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
724 gboolean vtype_retaddr;
733 /*#define __alignof__(a) sizeof(a)*/
734 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
740 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
743 if (*gr > ARMREG_R3) {
744 ainfo->offset = *stack_size;
745 ainfo->reg = ARMREG_SP; /* in the caller */
746 ainfo->regtype = RegTypeBase;
749 ainfo->regtype = RegTypeGeneral;
753 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
756 int i8_align = __alignof__ (gint64);
760 gboolean split = i8_align == 4;
762 gboolean split = TRUE;
765 if (*gr == ARMREG_R3 && split) {
766 /* first word in r3 and the second on the stack */
767 ainfo->offset = *stack_size;
768 ainfo->reg = ARMREG_SP; /* in the caller */
769 ainfo->regtype = RegTypeBaseGen;
771 } else if (*gr >= ARMREG_R3) {
773 /* darwin aligns longs to 4 byte only */
779 ainfo->offset = *stack_size;
780 ainfo->reg = ARMREG_SP; /* in the caller */
781 ainfo->regtype = RegTypeBase;
785 if (i8_align == 8 && ((*gr) & 1))
788 ainfo->regtype = RegTypeIRegPair;
797 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
800 int n = sig->hasthis + sig->param_count;
801 MonoType *simpletype;
802 guint32 stack_size = 0;
803 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
808 /* FIXME: handle returning a struct */
809 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
810 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
811 cinfo->struct_ret = ARMREG_R0;
812 cinfo->vtype_retaddr = TRUE;
817 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
820 DEBUG(printf("params: %d\n", sig->param_count));
821 for (i = 0; i < sig->param_count; ++i) {
822 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
823 /* Prevent implicit arguments and sig_cookie from
824 being passed in registers */
826 /* Emit the signature cookie just before the implicit arguments */
827 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
829 DEBUG(printf("param %d: ", i));
830 if (sig->params [i]->byref) {
831 DEBUG(printf("byref\n"));
832 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
836 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
837 switch (simpletype->type) {
838 case MONO_TYPE_BOOLEAN:
841 cinfo->args [n].size = 1;
842 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
848 cinfo->args [n].size = 2;
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
854 cinfo->args [n].size = 4;
855 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
861 case MONO_TYPE_FNPTR:
862 case MONO_TYPE_CLASS:
863 case MONO_TYPE_OBJECT:
864 case MONO_TYPE_STRING:
865 case MONO_TYPE_SZARRAY:
866 case MONO_TYPE_ARRAY:
868 cinfo->args [n].size = sizeof (gpointer);
869 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
872 case MONO_TYPE_GENERICINST:
873 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
874 cinfo->args [n].size = sizeof (gpointer);
875 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
880 case MONO_TYPE_TYPEDBYREF:
881 case MONO_TYPE_VALUETYPE: {
887 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
888 size = sizeof (MonoTypedRef);
889 align = sizeof (gpointer);
891 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
893 size = mono_class_native_size (klass, &align);
895 size = mono_class_value_size (klass, &align);
897 DEBUG(printf ("load %d bytes struct\n",
898 mono_class_native_size (sig->params [i]->data.klass, NULL)));
901 align_size += (sizeof (gpointer) - 1);
902 align_size &= ~(sizeof (gpointer) - 1);
903 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
904 cinfo->args [n].regtype = RegTypeStructByVal;
905 /* FIXME: align stack_size if needed */
907 if (align >= 8 && (gr & 1))
910 if (gr > ARMREG_R3) {
911 cinfo->args [n].size = 0;
912 cinfo->args [n].vtsize = nwords;
914 int rest = ARMREG_R3 - gr + 1;
915 int n_in_regs = rest >= nwords? nwords: rest;
917 cinfo->args [n].size = n_in_regs;
918 cinfo->args [n].vtsize = nwords - n_in_regs;
919 cinfo->args [n].reg = gr;
923 cinfo->args [n].offset = stack_size;
924 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
925 stack_size += nwords * sizeof (gpointer);
932 cinfo->args [n].size = 8;
933 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
937 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
942 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
943 switch (simpletype->type) {
944 case MONO_TYPE_BOOLEAN:
955 case MONO_TYPE_FNPTR:
956 case MONO_TYPE_CLASS:
957 case MONO_TYPE_OBJECT:
958 case MONO_TYPE_SZARRAY:
959 case MONO_TYPE_ARRAY:
960 case MONO_TYPE_STRING:
961 cinfo->ret.regtype = RegTypeGeneral;
962 cinfo->ret.reg = ARMREG_R0;
966 cinfo->ret.regtype = RegTypeIRegPair;
967 cinfo->ret.reg = ARMREG_R0;
971 cinfo->ret.regtype = RegTypeFP;
972 cinfo->ret.reg = ARMREG_R0;
973 /* FIXME: cinfo->ret.reg = ???;
974 cinfo->ret.regtype = RegTypeFP;*/
976 case MONO_TYPE_GENERICINST:
977 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
978 cinfo->ret.regtype = RegTypeGeneral;
979 cinfo->ret.reg = ARMREG_R0;
982 cinfo->ret.regtype = RegTypeStructByAddr;
984 case MONO_TYPE_VALUETYPE:
985 cinfo->ret.regtype = RegTypeStructByAddr;
987 case MONO_TYPE_TYPEDBYREF:
988 cinfo->ret.regtype = RegTypeStructByAddr;
993 g_error ("Can't handle as return value 0x%x", sig->ret->type);
997 /* align stack size to 8 */
998 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
999 stack_size = (stack_size + 7) & ~7;
1001 cinfo->stack_usage = stack_size;
1007 * Set var information according to the calling convention. arm version.
1008 * The locals var stuff should most likely be split in another method.
1011 mono_arch_allocate_vars (MonoCompile *cfg)
1013 MonoMethodSignature *sig;
1014 MonoMethodHeader *header;
1016 int i, offset, size, align, curinst;
1017 int frame_reg = ARMREG_FP;
1019 /* FIXME: this will change when we use FP as gcc does */
1020 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1022 /* allow room for the vararg method args: void* and long/double */
1023 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1024 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1026 header = mono_method_get_header (cfg->method);
1029 * We use the frame register also for any method that has
1030 * exception clauses. This way, when the handlers are called,
1031 * the code will reference local variables using the frame reg instead of
1032 * the stack pointer: if we had to restore the stack pointer, we'd
1033 * corrupt the method frames that are already on the stack (since
1034 * filters get called before stack unwinding happens) when the filter
1035 * code would call any method (this also applies to finally etc.).
1037 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1038 frame_reg = ARMREG_FP;
1039 cfg->frame_reg = frame_reg;
1040 if (frame_reg != ARMREG_SP) {
1041 cfg->used_int_regs |= 1 << frame_reg;
1044 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1045 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1046 cfg->used_int_regs |= (1 << ARMREG_V5);
1048 sig = mono_method_signature (cfg->method);
1052 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1053 /* FIXME: handle long and FP values */
1054 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1055 case MONO_TYPE_VOID:
1058 cfg->ret->opcode = OP_REGVAR;
1059 cfg->ret->inst_c0 = ARMREG_R0;
1063 /* local vars are at a positive offset from the stack pointer */
1065 * also note that if the function uses alloca, we use FP
1066 * to point at the local variables.
1068 offset = 0; /* linkage area */
1069 /* align the offset to 16 bytes: not sure this is needed here */
1071 //offset &= ~(8 - 1);
1073 /* add parameter area size for called functions */
1074 offset += cfg->param_area;
1077 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1080 /* allow room to save the return value */
1081 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1084 /* the MonoLMF structure is stored just below the stack pointer */
1086 if (sig->call_convention == MONO_CALL_VARARG) {
1087 cfg->sig_cookie = 0;
1090 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1091 inst = cfg->vret_addr;
1092 offset += sizeof(gpointer) - 1;
1093 offset &= ~(sizeof(gpointer) - 1);
1094 inst->inst_offset = offset;
1095 inst->opcode = OP_REGOFFSET;
1096 inst->inst_basereg = frame_reg;
1097 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1098 printf ("vret_addr =");
1099 mono_print_ins (cfg->vret_addr);
1101 offset += sizeof(gpointer);
1102 if (sig->call_convention == MONO_CALL_VARARG)
1103 cfg->sig_cookie += sizeof (gpointer);
1106 curinst = cfg->locals_start;
1107 for (i = curinst; i < cfg->num_varinfo; ++i) {
1108 inst = cfg->varinfo [i];
1109 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1112 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1113 * pinvoke wrappers when they call functions returning structure */
1114 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1116 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1120 size = mono_type_size (inst->inst_vtype, &align);
1122 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1123 * since it loads/stores misaligned words, which don't do the right thing.
1125 if (align < 4 && size >= 4)
1127 offset += align - 1;
1128 offset &= ~(align - 1);
1129 inst->inst_offset = offset;
1130 inst->opcode = OP_REGOFFSET;
1131 inst->inst_basereg = frame_reg;
1133 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1138 inst = cfg->args [curinst];
1139 if (inst->opcode != OP_REGVAR) {
1140 inst->opcode = OP_REGOFFSET;
1141 inst->inst_basereg = frame_reg;
1142 offset += sizeof (gpointer) - 1;
1143 offset &= ~(sizeof (gpointer) - 1);
1144 inst->inst_offset = offset;
1145 offset += sizeof (gpointer);
1146 if (sig->call_convention == MONO_CALL_VARARG)
1147 cfg->sig_cookie += sizeof (gpointer);
1152 for (i = 0; i < sig->param_count; ++i) {
1153 inst = cfg->args [curinst];
1154 if (inst->opcode != OP_REGVAR) {
1155 inst->opcode = OP_REGOFFSET;
1156 inst->inst_basereg = frame_reg;
1157 size = mono_type_size (sig->params [i], &align);
1158 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1159 * since it loads/stores misaligned words, which don't do the right thing.
1161 if (align < 4 && size >= 4)
1163 /* The code in the prolog () stores words when storing vtypes received in a register */
1164 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1166 offset += align - 1;
1167 offset &= ~(align - 1);
1168 inst->inst_offset = offset;
1170 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1171 cfg->sig_cookie += size;
1176 /* align the offset to 8 bytes */
1181 cfg->stack_offset = offset;
1185 mono_arch_create_vars (MonoCompile *cfg)
1187 MonoMethodSignature *sig;
1189 sig = mono_method_signature (cfg->method);
1191 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1192 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1193 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1194 printf ("vret_addr = ");
1195 mono_print_ins (cfg->vret_addr);
1199 if (cfg->gen_seq_points && cfg->compile_aot) {
1200 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1201 ins->flags |= MONO_INST_VOLATILE;
1202 cfg->arch.seq_point_info_var = ins;
1204 /* Allocate a separate variable for this to save 1 load per seq point */
1205 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1206 ins->flags |= MONO_INST_VOLATILE;
1207 cfg->arch.ss_trigger_page_var = ins;
1212 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1215 MonoMethodSignature *sig;
1219 sig = call->signature;
1220 n = sig->param_count + sig->hasthis;
1222 cinfo = get_call_info (sig, sig->pinvoke);
1224 for (i = 0; i < n; ++i) {
1225 ArgInfo *ainfo = cinfo->args + i;
1228 if (i >= sig->hasthis)
1229 t = sig->params [i - sig->hasthis];
1231 t = &mono_defaults.int_class->byval_arg;
1232 t = mini_type_get_underlying_type (NULL, t);
1234 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1239 in = call->args [i];
1241 switch (ainfo->regtype) {
1242 case RegTypeGeneral:
1243 case RegTypeIRegPair:
1244 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1245 MONO_INST_NEW (cfg, ins, OP_MOVE);
1246 ins->dreg = mono_alloc_ireg (cfg);
1247 ins->sreg1 = in->dreg + 1;
1248 MONO_ADD_INS (cfg->cbb, ins);
1249 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1251 MONO_INST_NEW (cfg, ins, OP_MOVE);
1252 ins->dreg = mono_alloc_ireg (cfg);
1253 ins->sreg1 = in->dreg + 2;
1254 MONO_ADD_INS (cfg->cbb, ins);
1255 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1256 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1257 #ifndef MONO_ARCH_SOFT_FLOAT
1261 if (ainfo->size == 4) {
1262 #ifdef MONO_ARCH_SOFT_FLOAT
1263 /* mono_emit_call_args () have already done the r8->r4 conversion */
1264 /* The converted value is in an int vreg */
1265 MONO_INST_NEW (cfg, ins, OP_MOVE);
1266 ins->dreg = mono_alloc_ireg (cfg);
1267 ins->sreg1 = in->dreg;
1268 MONO_ADD_INS (cfg->cbb, ins);
1269 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1271 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1272 creg = mono_alloc_ireg (cfg);
1273 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1274 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1277 #ifdef MONO_ARCH_SOFT_FLOAT
1278 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1279 ins->dreg = mono_alloc_ireg (cfg);
1280 ins->sreg1 = in->dreg;
1281 MONO_ADD_INS (cfg->cbb, ins);
1282 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1284 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1285 ins->dreg = mono_alloc_ireg (cfg);
1286 ins->sreg1 = in->dreg;
1287 MONO_ADD_INS (cfg->cbb, ins);
1288 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1290 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1291 creg = mono_alloc_ireg (cfg);
1292 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1293 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1294 creg = mono_alloc_ireg (cfg);
1295 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1296 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1299 cfg->flags |= MONO_CFG_HAS_FPOUT;
1301 MONO_INST_NEW (cfg, ins, OP_MOVE);
1302 ins->dreg = mono_alloc_ireg (cfg);
1303 ins->sreg1 = in->dreg;
1304 MONO_ADD_INS (cfg->cbb, ins);
1306 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1309 case RegTypeStructByAddr:
1312 /* FIXME: where si the data allocated? */
1313 arg->backend.reg3 = ainfo->reg;
1314 call->used_iregs |= 1 << ainfo->reg;
1315 g_assert_not_reached ();
1318 case RegTypeStructByVal:
1319 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1320 ins->opcode = OP_OUTARG_VT;
1321 ins->sreg1 = in->dreg;
1322 ins->klass = in->klass;
1323 ins->inst_p0 = call;
1324 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1325 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1326 MONO_ADD_INS (cfg->cbb, ins);
1329 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1330 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1331 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1332 if (t->type == MONO_TYPE_R8) {
1333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1335 #ifdef MONO_ARCH_SOFT_FLOAT
1336 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1338 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1342 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1345 case RegTypeBaseGen:
1346 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1347 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1348 MONO_INST_NEW (cfg, ins, OP_MOVE);
1349 ins->dreg = mono_alloc_ireg (cfg);
1350 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1351 MONO_ADD_INS (cfg->cbb, ins);
1352 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1353 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1356 #ifdef MONO_ARCH_SOFT_FLOAT
1357 g_assert_not_reached ();
1360 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1361 creg = mono_alloc_ireg (cfg);
1362 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1363 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1364 creg = mono_alloc_ireg (cfg);
1365 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1366 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1367 cfg->flags |= MONO_CFG_HAS_FPOUT;
1369 g_assert_not_reached ();
1376 arg->backend.reg3 = ainfo->reg;
1377 /* FP args are passed in int regs */
1378 call->used_iregs |= 1 << ainfo->reg;
1379 if (ainfo->size == 8) {
1380 arg->opcode = OP_OUTARG_R8;
1381 call->used_iregs |= 1 << (ainfo->reg + 1);
1383 arg->opcode = OP_OUTARG_R4;
1386 cfg->flags |= MONO_CFG_HAS_FPOUT;
1390 g_assert_not_reached ();
1394 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1397 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1398 vtarg->sreg1 = call->vret_var->dreg;
1399 vtarg->dreg = mono_alloc_preg (cfg);
1400 MONO_ADD_INS (cfg->cbb, vtarg);
1402 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1405 call->stack_usage = cinfo->stack_usage;
1411 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1413 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1414 ArgInfo *ainfo = ins->inst_p1;
1415 int ovf_size = ainfo->vtsize;
1416 int doffset = ainfo->offset;
1417 int i, soffset, dreg;
1420 for (i = 0; i < ainfo->size; ++i) {
1421 dreg = mono_alloc_ireg (cfg);
1422 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1423 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1424 soffset += sizeof (gpointer);
1426 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1428 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1432 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1434 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1437 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1440 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1441 ins->sreg1 = val->dreg + 1;
1442 ins->sreg2 = val->dreg + 2;
1443 MONO_ADD_INS (cfg->cbb, ins);
1446 #ifdef MONO_ARCH_SOFT_FLOAT
1447 if (ret->type == MONO_TYPE_R8) {
1450 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1451 ins->dreg = cfg->ret->dreg;
1452 ins->sreg1 = val->dreg;
1453 MONO_ADD_INS (cfg->cbb, ins);
1456 if (ret->type == MONO_TYPE_R4) {
1457 /* Already converted to an int in method_to_ir () */
1458 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1461 #elif defined(ARM_FPU_VFP)
1462 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1465 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1466 ins->dreg = cfg->ret->dreg;
1467 ins->sreg1 = val->dreg;
1468 MONO_ADD_INS (cfg->cbb, ins);
1472 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1473 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1480 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1484 mono_arch_is_inst_imm (gint64 imm)
1489 #define DYN_CALL_STACK_ARGS 6
1492 MonoMethodSignature *sig;
1497 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1503 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1507 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1510 switch (cinfo->ret.regtype) {
1512 case RegTypeGeneral:
1513 case RegTypeIRegPair:
1514 case RegTypeStructByAddr:
1519 #elif defined(ARM_FPU_VFP)
1528 for (i = 0; i < cinfo->nargs; ++i) {
1529 switch (cinfo->args [i].regtype) {
1530 case RegTypeGeneral:
1532 case RegTypeIRegPair:
1535 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1538 case RegTypeStructByVal:
1539 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1547 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1548 for (i = 0; i < sig->param_count; ++i) {
1549 MonoType *t = sig->params [i];
1557 #ifdef MONO_ARCH_SOFT_FLOAT
1576 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1578 ArchDynCallInfo *info;
1581 cinfo = get_call_info (sig, FALSE);
1583 if (!dyn_call_supported (cinfo, sig)) {
1588 info = g_new0 (ArchDynCallInfo, 1);
1589 // FIXME: Preprocess the info to speed up start_dyn_call ()
1591 info->cinfo = cinfo;
1593 return (MonoDynCallInfo*)info;
1597 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1599 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1601 g_free (ainfo->cinfo);
1606 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1608 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1609 DynCallArgs *p = (DynCallArgs*)buf;
1610 int arg_index, greg, i, j;
1611 MonoMethodSignature *sig = dinfo->sig;
1613 g_assert (buf_len >= sizeof (DynCallArgs));
1621 if (dinfo->cinfo->vtype_retaddr)
1622 p->regs [greg ++] = (mgreg_t)ret;
1625 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1627 for (i = 0; i < sig->param_count; i++) {
1628 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1629 gpointer *arg = args [arg_index ++];
1630 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1633 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair || ainfo->regtype == RegTypeStructByVal)
1635 else if (ainfo->regtype == RegTypeBase)
1636 slot = PARAM_REGS + (ainfo->offset / 4);
1638 g_assert_not_reached ();
1641 p->regs [slot] = (mgreg_t)*arg;
1646 case MONO_TYPE_STRING:
1647 case MONO_TYPE_CLASS:
1648 case MONO_TYPE_ARRAY:
1649 case MONO_TYPE_SZARRAY:
1650 case MONO_TYPE_OBJECT:
1654 p->regs [slot] = (mgreg_t)*arg;
1656 case MONO_TYPE_BOOLEAN:
1658 p->regs [slot] = *(guint8*)arg;
1661 p->regs [slot] = *(gint8*)arg;
1664 p->regs [slot] = *(gint16*)arg;
1667 case MONO_TYPE_CHAR:
1668 p->regs [slot] = *(guint16*)arg;
1671 p->regs [slot] = *(gint32*)arg;
1674 p->regs [slot] = *(guint32*)arg;
1678 p->regs [slot ++] = (mgreg_t)arg [0];
1679 p->regs [slot] = (mgreg_t)arg [1];
1682 p->regs [slot] = *(mgreg_t*)arg;
1685 p->regs [slot ++] = (mgreg_t)arg [0];
1686 p->regs [slot] = (mgreg_t)arg [1];
1688 case MONO_TYPE_GENERICINST:
1689 if (MONO_TYPE_IS_REFERENCE (t)) {
1690 p->regs [slot] = (mgreg_t)*arg;
1695 case MONO_TYPE_VALUETYPE:
1696 g_assert (ainfo->regtype == RegTypeStructByVal);
1698 if (ainfo->size == 0)
1699 slot = PARAM_REGS + (ainfo->offset / 4);
1703 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1704 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1707 g_assert_not_reached ();
1713 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1715 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1716 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1717 guint8 *ret = ((DynCallArgs*)buf)->ret;
1718 mgreg_t res = ((DynCallArgs*)buf)->res;
1719 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1721 switch (mono_type_get_underlying_type (sig->ret)->type) {
1722 case MONO_TYPE_VOID:
1723 *(gpointer*)ret = NULL;
1725 case MONO_TYPE_STRING:
1726 case MONO_TYPE_CLASS:
1727 case MONO_TYPE_ARRAY:
1728 case MONO_TYPE_SZARRAY:
1729 case MONO_TYPE_OBJECT:
1733 *(gpointer*)ret = (gpointer)res;
1739 case MONO_TYPE_BOOLEAN:
1740 *(guint8*)ret = res;
1743 *(gint16*)ret = res;
1746 case MONO_TYPE_CHAR:
1747 *(guint16*)ret = res;
1750 *(gint32*)ret = res;
1753 *(guint32*)ret = res;
1757 /* This handles endianness as well */
1758 ((gint32*)ret) [0] = res;
1759 ((gint32*)ret) [1] = res2;
1761 case MONO_TYPE_GENERICINST:
1762 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1763 *(gpointer*)ret = (gpointer)res;
1768 case MONO_TYPE_VALUETYPE:
1769 g_assert (ainfo->cinfo->vtype_retaddr);
1772 #if defined(ARM_FPU_VFP)
1774 *(float*)ret = *(float*)&res;
1776 case MONO_TYPE_R8: {
1782 *(double*)ret = *(double*)®s;
1787 g_assert_not_reached ();
1792 * Allow tracing to work with this interface (with an optional argument)
1796 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1800 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1801 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1802 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1803 code = emit_call_reg (code, ARMREG_R2);
1816 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1819 int save_mode = SAVE_NONE;
1821 MonoMethod *method = cfg->method;
1822 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1823 int save_offset = cfg->param_area;
1827 offset = code - cfg->native_code;
1828 /* we need about 16 instructions */
1829 if (offset > (cfg->code_size - 16 * 4)) {
1830 cfg->code_size *= 2;
1831 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1832 code = cfg->native_code + offset;
1835 case MONO_TYPE_VOID:
1836 /* special case string .ctor icall */
1837 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1838 save_mode = SAVE_ONE;
1840 save_mode = SAVE_NONE;
1844 save_mode = SAVE_TWO;
1848 save_mode = SAVE_FP;
1850 case MONO_TYPE_VALUETYPE:
1851 save_mode = SAVE_STRUCT;
1854 save_mode = SAVE_ONE;
1858 switch (save_mode) {
1860 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1861 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1862 if (enable_arguments) {
1863 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1864 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1868 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1869 if (enable_arguments) {
1870 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1874 /* FIXME: what reg? */
1875 if (enable_arguments) {
1876 /* FIXME: what reg? */
1880 if (enable_arguments) {
1881 /* FIXME: get the actual address */
1882 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1890 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1891 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1892 code = emit_call_reg (code, ARMREG_IP);
1894 switch (save_mode) {
1896 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1897 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1900 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1914 * The immediate field for cond branches is big enough for all reasonable methods
1916 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1917 if (0 && ins->inst_true_bb->native_offset) { \
1918 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1920 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1921 ARM_B_COND (code, (condcode), 0); \
1924 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1926 /* emit an exception if condition is fail
1928 * We assign the extra code used to throw the implicit exceptions
1929 * to cfg->bb_exit as far as the big branch handling is concerned
1931 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1933 mono_add_patch_info (cfg, code - cfg->native_code, \
1934 MONO_PATCH_INFO_EXC, exc_name); \
1935 ARM_BL_COND (code, (condcode), 0); \
1938 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1941 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1946 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1948 MonoInst *ins, *n, *last_ins = NULL;
1950 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1951 switch (ins->opcode) {
1954 /* Already done by an arch-independent pass */
1956 case OP_LOAD_MEMBASE:
1957 case OP_LOADI4_MEMBASE:
1959 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1960 * OP_LOAD_MEMBASE offset(basereg), reg
1962 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1963 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1964 ins->inst_basereg == last_ins->inst_destbasereg &&
1965 ins->inst_offset == last_ins->inst_offset) {
1966 if (ins->dreg == last_ins->sreg1) {
1967 MONO_DELETE_INS (bb, ins);
1970 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1971 ins->opcode = OP_MOVE;
1972 ins->sreg1 = last_ins->sreg1;
1976 * Note: reg1 must be different from the basereg in the second load
1977 * OP_LOAD_MEMBASE offset(basereg), reg1
1978 * OP_LOAD_MEMBASE offset(basereg), reg2
1980 * OP_LOAD_MEMBASE offset(basereg), reg1
1981 * OP_MOVE reg1, reg2
1983 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1984 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1985 ins->inst_basereg != last_ins->dreg &&
1986 ins->inst_basereg == last_ins->inst_basereg &&
1987 ins->inst_offset == last_ins->inst_offset) {
1989 if (ins->dreg == last_ins->dreg) {
1990 MONO_DELETE_INS (bb, ins);
1993 ins->opcode = OP_MOVE;
1994 ins->sreg1 = last_ins->dreg;
1997 //g_assert_not_reached ();
2001 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2002 * OP_LOAD_MEMBASE offset(basereg), reg
2004 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2005 * OP_ICONST reg, imm
2007 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2008 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2009 ins->inst_basereg == last_ins->inst_destbasereg &&
2010 ins->inst_offset == last_ins->inst_offset) {
2011 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2012 ins->opcode = OP_ICONST;
2013 ins->inst_c0 = last_ins->inst_imm;
2014 g_assert_not_reached (); // check this rule
2018 case OP_LOADU1_MEMBASE:
2019 case OP_LOADI1_MEMBASE:
2020 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2021 ins->inst_basereg == last_ins->inst_destbasereg &&
2022 ins->inst_offset == last_ins->inst_offset) {
2023 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2024 ins->sreg1 = last_ins->sreg1;
2027 case OP_LOADU2_MEMBASE:
2028 case OP_LOADI2_MEMBASE:
2029 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2030 ins->inst_basereg == last_ins->inst_destbasereg &&
2031 ins->inst_offset == last_ins->inst_offset) {
2032 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2033 ins->sreg1 = last_ins->sreg1;
2037 ins->opcode = OP_MOVE;
2041 if (ins->dreg == ins->sreg1) {
2042 MONO_DELETE_INS (bb, ins);
2046 * OP_MOVE sreg, dreg
2047 * OP_MOVE dreg, sreg
2049 if (last_ins && last_ins->opcode == OP_MOVE &&
2050 ins->sreg1 == last_ins->dreg &&
2051 ins->dreg == last_ins->sreg1) {
2052 MONO_DELETE_INS (bb, ins);
2060 bb->last_ins = last_ins;
2064 * the branch_cc_table should maintain the order of these
2078 branch_cc_table [] = {
2092 #define NEW_INS(cfg,dest,op) do { \
2093 MONO_INST_NEW ((cfg), (dest), (op)); \
2094 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2098 map_to_reg_reg_op (int op)
2107 case OP_COMPARE_IMM:
2109 case OP_ICOMPARE_IMM:
2123 case OP_LOAD_MEMBASE:
2124 return OP_LOAD_MEMINDEX;
2125 case OP_LOADI4_MEMBASE:
2126 return OP_LOADI4_MEMINDEX;
2127 case OP_LOADU4_MEMBASE:
2128 return OP_LOADU4_MEMINDEX;
2129 case OP_LOADU1_MEMBASE:
2130 return OP_LOADU1_MEMINDEX;
2131 case OP_LOADI2_MEMBASE:
2132 return OP_LOADI2_MEMINDEX;
2133 case OP_LOADU2_MEMBASE:
2134 return OP_LOADU2_MEMINDEX;
2135 case OP_LOADI1_MEMBASE:
2136 return OP_LOADI1_MEMINDEX;
2137 case OP_STOREI1_MEMBASE_REG:
2138 return OP_STOREI1_MEMINDEX;
2139 case OP_STOREI2_MEMBASE_REG:
2140 return OP_STOREI2_MEMINDEX;
2141 case OP_STOREI4_MEMBASE_REG:
2142 return OP_STOREI4_MEMINDEX;
2143 case OP_STORE_MEMBASE_REG:
2144 return OP_STORE_MEMINDEX;
2145 case OP_STORER4_MEMBASE_REG:
2146 return OP_STORER4_MEMINDEX;
2147 case OP_STORER8_MEMBASE_REG:
2148 return OP_STORER8_MEMINDEX;
2149 case OP_STORE_MEMBASE_IMM:
2150 return OP_STORE_MEMBASE_REG;
2151 case OP_STOREI1_MEMBASE_IMM:
2152 return OP_STOREI1_MEMBASE_REG;
2153 case OP_STOREI2_MEMBASE_IMM:
2154 return OP_STOREI2_MEMBASE_REG;
2155 case OP_STOREI4_MEMBASE_IMM:
2156 return OP_STOREI4_MEMBASE_REG;
2158 g_assert_not_reached ();
2162 * Remove from the instruction list the instructions that can't be
2163 * represented with very simple instructions with no register
2167 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2169 MonoInst *ins, *temp, *last_ins = NULL;
2170 int rot_amount, imm8, low_imm;
2172 MONO_BB_FOR_EACH_INS (bb, ins) {
2174 switch (ins->opcode) {
2178 case OP_COMPARE_IMM:
2179 case OP_ICOMPARE_IMM:
2193 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2194 NEW_INS (cfg, temp, OP_ICONST);
2195 temp->inst_c0 = ins->inst_imm;
2196 temp->dreg = mono_alloc_ireg (cfg);
2197 ins->sreg2 = temp->dreg;
2198 ins->opcode = mono_op_imm_to_op (ins->opcode);
2200 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2206 if (ins->inst_imm == 1) {
2207 ins->opcode = OP_MOVE;
2210 if (ins->inst_imm == 0) {
2211 ins->opcode = OP_ICONST;
2215 imm8 = mono_is_power_of_two (ins->inst_imm);
2217 ins->opcode = OP_SHL_IMM;
2218 ins->inst_imm = imm8;
2221 NEW_INS (cfg, temp, OP_ICONST);
2222 temp->inst_c0 = ins->inst_imm;
2223 temp->dreg = mono_alloc_ireg (cfg);
2224 ins->sreg2 = temp->dreg;
2225 ins->opcode = OP_IMUL;
2231 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2232 /* ARM sets the C flag to 1 if there was _no_ overflow */
2233 ins->next->opcode = OP_COND_EXC_NC;
2235 case OP_LOCALLOC_IMM:
2236 NEW_INS (cfg, temp, OP_ICONST);
2237 temp->inst_c0 = ins->inst_imm;
2238 temp->dreg = mono_alloc_ireg (cfg);
2239 ins->sreg1 = temp->dreg;
2240 ins->opcode = OP_LOCALLOC;
2242 case OP_LOAD_MEMBASE:
2243 case OP_LOADI4_MEMBASE:
2244 case OP_LOADU4_MEMBASE:
2245 case OP_LOADU1_MEMBASE:
2246 /* we can do two things: load the immed in a register
2247 * and use an indexed load, or see if the immed can be
2248 * represented as an ad_imm + a load with a smaller offset
2249 * that fits. We just do the first for now, optimize later.
2251 if (arm_is_imm12 (ins->inst_offset))
2253 NEW_INS (cfg, temp, OP_ICONST);
2254 temp->inst_c0 = ins->inst_offset;
2255 temp->dreg = mono_alloc_ireg (cfg);
2256 ins->sreg2 = temp->dreg;
2257 ins->opcode = map_to_reg_reg_op (ins->opcode);
2259 case OP_LOADI2_MEMBASE:
2260 case OP_LOADU2_MEMBASE:
2261 case OP_LOADI1_MEMBASE:
2262 if (arm_is_imm8 (ins->inst_offset))
2264 NEW_INS (cfg, temp, OP_ICONST);
2265 temp->inst_c0 = ins->inst_offset;
2266 temp->dreg = mono_alloc_ireg (cfg);
2267 ins->sreg2 = temp->dreg;
2268 ins->opcode = map_to_reg_reg_op (ins->opcode);
2270 case OP_LOADR4_MEMBASE:
2271 case OP_LOADR8_MEMBASE:
2272 if (arm_is_fpimm8 (ins->inst_offset))
2274 low_imm = ins->inst_offset & 0x1ff;
2275 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2276 NEW_INS (cfg, temp, OP_ADD_IMM);
2277 temp->inst_imm = ins->inst_offset & ~0x1ff;
2278 temp->sreg1 = ins->inst_basereg;
2279 temp->dreg = mono_alloc_ireg (cfg);
2280 ins->inst_basereg = temp->dreg;
2281 ins->inst_offset = low_imm;
2284 /* VFP/FPA doesn't have indexed load instructions */
2285 g_assert_not_reached ();
2287 case OP_STORE_MEMBASE_REG:
2288 case OP_STOREI4_MEMBASE_REG:
2289 case OP_STOREI1_MEMBASE_REG:
2290 if (arm_is_imm12 (ins->inst_offset))
2292 NEW_INS (cfg, temp, OP_ICONST);
2293 temp->inst_c0 = ins->inst_offset;
2294 temp->dreg = mono_alloc_ireg (cfg);
2295 ins->sreg2 = temp->dreg;
2296 ins->opcode = map_to_reg_reg_op (ins->opcode);
2298 case OP_STOREI2_MEMBASE_REG:
2299 if (arm_is_imm8 (ins->inst_offset))
2301 NEW_INS (cfg, temp, OP_ICONST);
2302 temp->inst_c0 = ins->inst_offset;
2303 temp->dreg = mono_alloc_ireg (cfg);
2304 ins->sreg2 = temp->dreg;
2305 ins->opcode = map_to_reg_reg_op (ins->opcode);
2307 case OP_STORER4_MEMBASE_REG:
2308 case OP_STORER8_MEMBASE_REG:
2309 if (arm_is_fpimm8 (ins->inst_offset))
2311 low_imm = ins->inst_offset & 0x1ff;
2312 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2313 NEW_INS (cfg, temp, OP_ADD_IMM);
2314 temp->inst_imm = ins->inst_offset & ~0x1ff;
2315 temp->sreg1 = ins->inst_destbasereg;
2316 temp->dreg = mono_alloc_ireg (cfg);
2317 ins->inst_destbasereg = temp->dreg;
2318 ins->inst_offset = low_imm;
2321 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2322 /* VFP/FPA doesn't have indexed store instructions */
2323 g_assert_not_reached ();
2325 case OP_STORE_MEMBASE_IMM:
2326 case OP_STOREI1_MEMBASE_IMM:
2327 case OP_STOREI2_MEMBASE_IMM:
2328 case OP_STOREI4_MEMBASE_IMM:
2329 NEW_INS (cfg, temp, OP_ICONST);
2330 temp->inst_c0 = ins->inst_imm;
2331 temp->dreg = mono_alloc_ireg (cfg);
2332 ins->sreg1 = temp->dreg;
2333 ins->opcode = map_to_reg_reg_op (ins->opcode);
2335 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2337 gboolean swap = FALSE;
2340 /* Some fp compares require swapped operands */
2341 g_assert (ins->next);
2342 switch (ins->next->opcode) {
2344 ins->next->opcode = OP_FBLT;
2348 ins->next->opcode = OP_FBLT_UN;
2352 ins->next->opcode = OP_FBGE;
2356 ins->next->opcode = OP_FBGE_UN;
2364 ins->sreg1 = ins->sreg2;
2373 bb->last_ins = last_ins;
2374 bb->max_vreg = cfg->next_vreg;
2378 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2382 if (long_ins->opcode == OP_LNEG) {
2384 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2385 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2391 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2393 /* sreg is a float, dreg is an integer reg */
2395 ARM_FIXZ (code, dreg, sreg);
2396 #elif defined(ARM_FPU_VFP)
2398 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2400 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2401 ARM_FMRS (code, dreg, ARM_VFP_F0);
2405 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2406 else if (size == 2) {
2407 ARM_SHL_IMM (code, dreg, dreg, 16);
2408 ARM_SHR_IMM (code, dreg, dreg, 16);
2412 ARM_SHL_IMM (code, dreg, dreg, 24);
2413 ARM_SAR_IMM (code, dreg, dreg, 24);
2414 } else if (size == 2) {
2415 ARM_SHL_IMM (code, dreg, dreg, 16);
2416 ARM_SAR_IMM (code, dreg, dreg, 16);
2424 const guchar *target;
2429 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2432 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2433 PatchData *pdata = (PatchData*)user_data;
2434 guchar *code = data;
2435 guint32 *thunks = data;
2436 guint32 *endthunks = (guint32*)(code + bsize);
2438 int difflow, diffhigh;
2440 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2441 difflow = (char*)pdata->code - (char*)thunks;
2442 diffhigh = (char*)pdata->code - (char*)endthunks;
2443 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2447 * The thunk is composed of 3 words:
2448 * load constant from thunks [2] into ARM_IP
2451 * Note that the LR register is already setup
2453 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2454 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2455 while (thunks < endthunks) {
2456 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2457 if (thunks [2] == (guint32)pdata->target) {
2458 arm_patch (pdata->code, (guchar*)thunks);
2459 mono_arch_flush_icache (pdata->code, 4);
2462 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2463 /* found a free slot instead: emit thunk */
2464 /* ARMREG_IP is fine to use since this can't be an IMT call
2467 code = (guchar*)thunks;
2468 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2469 if (thumb_supported)
2470 ARM_BX (code, ARMREG_IP);
2472 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2473 thunks [2] = (guint32)pdata->target;
2474 mono_arch_flush_icache ((guchar*)thunks, 12);
2476 arm_patch (pdata->code, (guchar*)thunks);
2477 mono_arch_flush_icache (pdata->code, 4);
2481 /* skip 12 bytes, the size of the thunk */
2485 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2491 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2496 domain = mono_domain_get ();
2499 pdata.target = target;
2500 pdata.absolute = absolute;
2503 mono_domain_lock (domain);
2504 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2507 /* this uses the first available slot */
2509 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2511 mono_domain_unlock (domain);
2513 if (pdata.found != 1)
2514 g_print ("thunk failed for %p from %p\n", target, code);
2515 g_assert (pdata.found == 1);
2519 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2521 guint32 *code32 = (void*)code;
2522 guint32 ins = *code32;
2523 guint32 prim = (ins >> 25) & 7;
2524 guint32 tval = GPOINTER_TO_UINT (target);
2526 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2527 if (prim == 5) { /* 101b */
2528 /* the diff starts 8 bytes from the branch opcode */
2529 gint diff = target - code - 8;
2531 gint tmask = 0xffffffff;
2532 if (tval & 1) { /* entering thumb mode */
2533 diff = target - 1 - code - 8;
2534 g_assert (thumb_supported);
2535 tbits = 0xf << 28; /* bl->blx bit pattern */
2536 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2537 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2541 tmask = ~(1 << 24); /* clear the link bit */
2542 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2547 if (diff <= 33554431) {
2549 ins = (ins & 0xff000000) | diff;
2551 *code32 = ins | tbits;
2555 /* diff between 0 and -33554432 */
2556 if (diff >= -33554432) {
2558 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2560 *code32 = ins | tbits;
2565 handle_thunk (domain, TRUE, code, target);
2570 * The alternative call sequences looks like this:
2572 * ldr ip, [pc] // loads the address constant
2573 * b 1f // jumps around the constant
2574 * address constant embedded in the code
2579 * There are two cases for patching:
2580 * a) at the end of method emission: in this case code points to the start
2581 * of the call sequence
2582 * b) during runtime patching of the call site: in this case code points
2583 * to the mov pc, ip instruction
2585 * We have to handle also the thunk jump code sequence:
2589 * address constant // execution never reaches here
2591 if ((ins & 0x0ffffff0) == 0x12fff10) {
2592 /* Branch and exchange: the address is constructed in a reg
2593 * We can patch BX when the code sequence is the following:
2594 * ldr ip, [pc, #0] ; 0x8
2601 guint8 *emit = (guint8*)ccode;
2602 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2604 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2605 ARM_BX (emit, ARMREG_IP);
2607 /*patching from magic trampoline*/
2608 if (ins == ccode [3]) {
2609 g_assert (code32 [-4] == ccode [0]);
2610 g_assert (code32 [-3] == ccode [1]);
2611 g_assert (code32 [-1] == ccode [2]);
2612 code32 [-2] = (guint32)target;
2615 /*patching from JIT*/
2616 if (ins == ccode [0]) {
2617 g_assert (code32 [1] == ccode [1]);
2618 g_assert (code32 [3] == ccode [2]);
2619 g_assert (code32 [4] == ccode [3]);
2620 code32 [2] = (guint32)target;
2623 g_assert_not_reached ();
2624 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2632 guint8 *emit = (guint8*)ccode;
2633 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2635 ARM_BLX_REG (emit, ARMREG_IP);
2637 g_assert (code32 [-3] == ccode [0]);
2638 g_assert (code32 [-2] == ccode [1]);
2639 g_assert (code32 [0] == ccode [2]);
2641 code32 [-1] = (guint32)target;
2644 guint32 *tmp = ccode;
2645 guint8 *emit = (guint8*)tmp;
2646 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2647 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2648 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2649 ARM_BX (emit, ARMREG_IP);
2650 if (ins == ccode [2]) {
2651 g_assert_not_reached (); // should be -2 ...
2652 code32 [-1] = (guint32)target;
2655 if (ins == ccode [0]) {
2656 /* handles both thunk jump code and the far call sequence */
2657 code32 [2] = (guint32)target;
2660 g_assert_not_reached ();
2662 // g_print ("patched with 0x%08x\n", ins);
2666 arm_patch (guchar *code, const guchar *target)
2668 arm_patch_general (NULL, code, target);
2672 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2673 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2674 * to be used with the emit macros.
2675 * Return -1 otherwise.
2678 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2681 for (i = 0; i < 31; i+= 2) {
2682 res = (val << (32 - i)) | (val >> i);
2685 *rot_amount = i? 32 - i: 0;
2692 * Emits in code a sequence of instructions that load the value 'val'
2693 * into the dreg register. Uses at most 4 instructions.
2696 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2698 int imm8, rot_amount;
2700 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2701 /* skip the constant pool */
2707 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2708 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2709 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2710 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2713 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2715 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2719 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2721 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2723 if (val & 0xFF0000) {
2724 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2726 if (val & 0xFF000000) {
2727 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2729 } else if (val & 0xFF00) {
2730 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2731 if (val & 0xFF0000) {
2732 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2734 if (val & 0xFF000000) {
2735 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2737 } else if (val & 0xFF0000) {
2738 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2739 if (val & 0xFF000000) {
2740 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2743 //g_assert_not_reached ();
2749 * emit_load_volatile_arguments:
2751 * Load volatile arguments from the stack to the original input registers.
2752 * Required before a tail call.
2755 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2757 MonoMethod *method = cfg->method;
2758 MonoMethodSignature *sig;
2763 /* FIXME: Generate intermediate code instead */
2765 sig = mono_method_signature (method);
2767 /* This is the opposite of the code in emit_prolog */
2771 cinfo = get_call_info (sig, sig->pinvoke);
2773 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2774 ArgInfo *ainfo = &cinfo->ret;
2775 inst = cfg->vret_addr;
2776 g_assert (arm_is_imm12 (inst->inst_offset));
2777 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2779 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2780 ArgInfo *ainfo = cinfo->args + i;
2781 inst = cfg->args [pos];
2783 if (cfg->verbose_level > 2)
2784 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2785 if (inst->opcode == OP_REGVAR) {
2786 if (ainfo->regtype == RegTypeGeneral)
2787 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2788 else if (ainfo->regtype == RegTypeFP) {
2789 g_assert_not_reached ();
2790 } else if (ainfo->regtype == RegTypeBase) {
2794 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2795 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2797 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2798 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2802 g_assert_not_reached ();
2804 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
2805 switch (ainfo->size) {
2812 g_assert (arm_is_imm12 (inst->inst_offset));
2813 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2814 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2815 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2818 if (arm_is_imm12 (inst->inst_offset)) {
2819 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2821 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2822 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2826 } else if (ainfo->regtype == RegTypeBaseGen) {
2829 } else if (ainfo->regtype == RegTypeBase) {
2831 } else if (ainfo->regtype == RegTypeFP) {
2832 g_assert_not_reached ();
2833 } else if (ainfo->regtype == RegTypeStructByVal) {
2834 int doffset = inst->inst_offset;
2838 if (mono_class_from_mono_type (inst->inst_vtype))
2839 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2840 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2841 if (arm_is_imm12 (doffset)) {
2842 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2844 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2845 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2847 soffset += sizeof (gpointer);
2848 doffset += sizeof (gpointer);
2853 } else if (ainfo->regtype == RegTypeStructByAddr) {
2870 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2875 guint8 *code = cfg->native_code + cfg->code_len;
2876 MonoInst *last_ins = NULL;
2877 guint last_offset = 0;
2879 int imm8, rot_amount;
2881 /* we don't align basic blocks of loops on arm */
2883 if (cfg->verbose_level > 2)
2884 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2886 cpos = bb->max_offset;
2888 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2889 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2890 //g_assert (!mono_compile_aot);
2893 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2894 /* this is not thread save, but good enough */
2895 /* fixme: howto handle overflows? */
2896 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2899 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2900 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2901 (gpointer)"mono_break");
2902 code = emit_call_seq (cfg, code);
2905 MONO_BB_FOR_EACH_INS (bb, ins) {
2906 offset = code - cfg->native_code;
2908 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2910 if (offset > (cfg->code_size - max_len - 16)) {
2911 cfg->code_size *= 2;
2912 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2913 code = cfg->native_code + offset;
2915 // if (ins->cil_code)
2916 // g_print ("cil code\n");
2917 mono_debug_record_line_number (cfg, ins, offset);
2919 switch (ins->opcode) {
2920 case OP_MEMORY_BARRIER:
2923 #ifdef HAVE_AEABI_READ_TP
2924 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2925 (gpointer)"__aeabi_read_tp");
2926 code = emit_call_seq (cfg, code);
2928 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
2930 g_assert_not_reached ();
2934 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2935 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2938 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2939 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2941 case OP_STOREI1_MEMBASE_IMM:
2942 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2943 g_assert (arm_is_imm12 (ins->inst_offset));
2944 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2946 case OP_STOREI2_MEMBASE_IMM:
2947 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2948 g_assert (arm_is_imm8 (ins->inst_offset));
2949 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2951 case OP_STORE_MEMBASE_IMM:
2952 case OP_STOREI4_MEMBASE_IMM:
2953 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2954 g_assert (arm_is_imm12 (ins->inst_offset));
2955 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2957 case OP_STOREI1_MEMBASE_REG:
2958 g_assert (arm_is_imm12 (ins->inst_offset));
2959 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2961 case OP_STOREI2_MEMBASE_REG:
2962 g_assert (arm_is_imm8 (ins->inst_offset));
2963 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2965 case OP_STORE_MEMBASE_REG:
2966 case OP_STOREI4_MEMBASE_REG:
2967 /* this case is special, since it happens for spill code after lowering has been called */
2968 if (arm_is_imm12 (ins->inst_offset)) {
2969 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2971 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2972 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2975 case OP_STOREI1_MEMINDEX:
2976 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2978 case OP_STOREI2_MEMINDEX:
2979 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2981 case OP_STORE_MEMINDEX:
2982 case OP_STOREI4_MEMINDEX:
2983 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2986 g_assert_not_reached ();
2988 case OP_LOAD_MEMINDEX:
2989 case OP_LOADI4_MEMINDEX:
2990 case OP_LOADU4_MEMINDEX:
2991 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2993 case OP_LOADI1_MEMINDEX:
2994 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2996 case OP_LOADU1_MEMINDEX:
2997 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2999 case OP_LOADI2_MEMINDEX:
3000 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3002 case OP_LOADU2_MEMINDEX:
3003 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3005 case OP_LOAD_MEMBASE:
3006 case OP_LOADI4_MEMBASE:
3007 case OP_LOADU4_MEMBASE:
3008 /* this case is special, since it happens for spill code after lowering has been called */
3009 if (arm_is_imm12 (ins->inst_offset)) {
3010 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3012 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3013 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3016 case OP_LOADI1_MEMBASE:
3017 g_assert (arm_is_imm8 (ins->inst_offset));
3018 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3020 case OP_LOADU1_MEMBASE:
3021 g_assert (arm_is_imm12 (ins->inst_offset));
3022 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3024 case OP_LOADU2_MEMBASE:
3025 g_assert (arm_is_imm8 (ins->inst_offset));
3026 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3028 case OP_LOADI2_MEMBASE:
3029 g_assert (arm_is_imm8 (ins->inst_offset));
3030 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3032 case OP_ICONV_TO_I1:
3033 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3034 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3036 case OP_ICONV_TO_I2:
3037 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3038 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3040 case OP_ICONV_TO_U1:
3041 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3043 case OP_ICONV_TO_U2:
3044 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3045 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3049 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3051 case OP_COMPARE_IMM:
3052 case OP_ICOMPARE_IMM:
3053 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3054 g_assert (imm8 >= 0);
3055 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3059 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3060 * So instead of emitting a trap, we emit a call a C function and place a
3063 //*(int*)code = 0xef9f0001;
3066 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3067 (gpointer)"mono_break");
3068 code = emit_call_seq (cfg, code);
3070 case OP_RELAXED_NOP:
3075 case OP_DUMMY_STORE:
3076 case OP_NOT_REACHED:
3079 case OP_SEQ_POINT: {
3081 MonoInst *info_var = cfg->arch.seq_point_info_var;
3082 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3084 int dreg = ARMREG_LR;
3087 * For AOT, we use one got slot per method, which will point to a
3088 * SeqPointInfo structure, containing all the information required
3089 * by the code below.
3091 if (cfg->compile_aot) {
3092 g_assert (info_var);
3093 g_assert (info_var->opcode == OP_REGOFFSET);
3094 g_assert (arm_is_imm12 (info_var->inst_offset));
3098 * Read from the single stepping trigger page. This will cause a
3099 * SIGSEGV when single stepping is enabled.
3100 * We do this _before_ the breakpoint, so single stepping after
3101 * a breakpoint is hit will step to the next IL offset.
3103 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3105 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3106 if (cfg->compile_aot) {
3107 /* Load the trigger page addr from the variable initialized in the prolog */
3108 var = ss_trigger_page_var;
3110 g_assert (var->opcode == OP_REGOFFSET);
3111 g_assert (arm_is_imm12 (var->inst_offset));
3112 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3114 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3116 *(int*)code = (int)ss_trigger_page;
3119 ARM_LDR_IMM (code, dreg, dreg, 0);
3122 il_offset = ins->inst_imm;
3124 if (!cfg->seq_points)
3125 cfg->seq_points = g_ptr_array_new ();
3126 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
3127 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
3129 if (cfg->compile_aot) {
3130 guint32 offset = code - cfg->native_code;
3133 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3134 /* Add the offset */
3135 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3136 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3138 * Have to emit nops to keep the difference between the offset
3139 * stored in seq_points and breakpoint instruction constant,
3140 * mono_arch_get_ip_for_breakpoint () depends on this.
3143 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3147 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3150 g_assert (!(val & 0xFF000000));
3151 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3152 ARM_LDR_IMM (code, dreg, dreg, 0);
3154 /* What is faster, a branch or a load ? */
3155 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3156 /* The breakpoint instruction */
3157 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3160 * A placeholder for a possible breakpoint inserted by
3161 * mono_arch_set_breakpoint ().
3163 for (i = 0; i < 4; ++i)
3170 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3173 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3177 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3180 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3181 g_assert (imm8 >= 0);
3182 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3186 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3187 g_assert (imm8 >= 0);
3188 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3192 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3193 g_assert (imm8 >= 0);
3194 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3197 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3198 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3200 case OP_IADD_OVF_UN:
3201 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3202 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3205 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3206 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3208 case OP_ISUB_OVF_UN:
3209 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3210 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3212 case OP_ADD_OVF_CARRY:
3213 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3214 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3216 case OP_ADD_OVF_UN_CARRY:
3217 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3218 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3220 case OP_SUB_OVF_CARRY:
3221 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3222 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3224 case OP_SUB_OVF_UN_CARRY:
3225 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3226 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3230 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3233 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3234 g_assert (imm8 >= 0);
3235 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3238 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3242 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3246 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3247 g_assert (imm8 >= 0);
3248 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3252 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3253 g_assert (imm8 >= 0);
3254 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3256 case OP_ARM_RSBS_IMM:
3257 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3258 g_assert (imm8 >= 0);
3259 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3261 case OP_ARM_RSC_IMM:
3262 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3263 g_assert (imm8 >= 0);
3264 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3267 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3271 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3272 g_assert (imm8 >= 0);
3273 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3281 /* crappy ARM arch doesn't have a DIV instruction */
3282 g_assert_not_reached ();
3284 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3288 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3289 g_assert (imm8 >= 0);
3290 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3293 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3297 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3298 g_assert (imm8 >= 0);
3299 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3302 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3307 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3308 else if (ins->dreg != ins->sreg1)
3309 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3312 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3317 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3318 else if (ins->dreg != ins->sreg1)
3319 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3322 case OP_ISHR_UN_IMM:
3324 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3325 else if (ins->dreg != ins->sreg1)
3326 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3329 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3332 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3335 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3338 if (ins->dreg == ins->sreg2)
3339 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3341 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3344 g_assert_not_reached ();
3347 /* FIXME: handle ovf/ sreg2 != dreg */
3348 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3349 /* FIXME: MUL doesn't set the C/O flags on ARM */
3351 case OP_IMUL_OVF_UN:
3352 /* FIXME: handle ovf/ sreg2 != dreg */
3353 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3354 /* FIXME: MUL doesn't set the C/O flags on ARM */
3357 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3360 /* Load the GOT offset */
3361 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3362 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3364 *(gpointer*)code = NULL;
3366 /* Load the value from the GOT */
3367 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3369 case OP_ICONV_TO_I4:
3370 case OP_ICONV_TO_U4:
3372 if (ins->dreg != ins->sreg1)
3373 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3376 int saved = ins->sreg2;
3377 if (ins->sreg2 == ARM_LSW_REG) {
3378 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3381 if (ins->sreg1 != ARM_LSW_REG)
3382 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3383 if (saved != ARM_MSW_REG)
3384 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3389 ARM_MVFD (code, ins->dreg, ins->sreg1);
3390 #elif defined(ARM_FPU_VFP)
3391 ARM_CPYD (code, ins->dreg, ins->sreg1);
3394 case OP_FCONV_TO_R4:
3396 ARM_MVFS (code, ins->dreg, ins->sreg1);
3397 #elif defined(ARM_FPU_VFP)
3398 ARM_CVTD (code, ins->dreg, ins->sreg1);
3399 ARM_CVTS (code, ins->dreg, ins->dreg);
3404 * Keep in sync with mono_arch_emit_epilog
3406 g_assert (!cfg->method->save_lmf);
3408 code = emit_load_volatile_arguments (cfg, code);
3410 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3411 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3412 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3413 if (cfg->compile_aot) {
3414 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3416 *(gpointer*)code = NULL;
3418 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3424 /* ensure ins->sreg1 is not NULL */
3425 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3429 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
3430 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
3432 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
3433 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
3435 ppc_stw (code, ppc_r11, 0, ins->sreg1);
3445 call = (MonoCallInst*)ins;
3446 if (ins->flags & MONO_INST_HAS_METHOD)
3447 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3449 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3450 code = emit_call_seq (cfg, code);
3451 code = emit_move_return_value (cfg, ins, code);
3457 case OP_VOIDCALL_REG:
3459 code = emit_call_reg (code, ins->sreg1);
3460 code = emit_move_return_value (cfg, ins, code);
3462 case OP_FCALL_MEMBASE:
3463 case OP_LCALL_MEMBASE:
3464 case OP_VCALL_MEMBASE:
3465 case OP_VCALL2_MEMBASE:
3466 case OP_VOIDCALL_MEMBASE:
3467 case OP_CALL_MEMBASE:
3468 g_assert (arm_is_imm12 (ins->inst_offset));
3469 g_assert (ins->sreg1 != ARMREG_LR);
3470 call = (MonoCallInst*)ins;
3471 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3472 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3473 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3475 * We can't embed the method in the code stream in PIC code, or
3477 * Instead, we put it in V5 in code emitted by
3478 * mono_arch_emit_imt_argument (), and embed NULL here to
3479 * signal the IMT thunk that the value is in V5.
3481 if (call->dynamic_imt_arg)
3482 *((gpointer*)code) = NULL;
3484 *((gpointer*)code) = (gpointer)call->method;
3487 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3488 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3490 code = emit_move_return_value (cfg, ins, code);
3493 /* keep alignment */
3494 int alloca_waste = cfg->param_area;
3497 /* round the size to 8 bytes */
3498 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3499 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3501 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3502 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3503 /* memzero the area: dreg holds the size, sp is the pointer */
3504 if (ins->flags & MONO_INST_INIT) {
3505 guint8 *start_loop, *branch_to_cond;
3506 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3507 branch_to_cond = code;
3510 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3511 arm_patch (branch_to_cond, code);
3512 /* decrement by 4 and set flags */
3513 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3514 ARM_B_COND (code, ARMCOND_GE, 0);
3515 arm_patch (code - 4, start_loop);
3517 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3522 MonoInst *var = cfg->dyn_call_var;
3524 g_assert (var->opcode == OP_REGOFFSET);
3525 g_assert (arm_is_imm12 (var->inst_offset));
3527 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3528 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3530 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3532 /* Save args buffer */
3533 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3535 /* Set stack slots using R0 as scratch reg */
3536 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3537 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3538 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3539 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3542 /* Set argument registers */
3543 for (i = 0; i < PARAM_REGS; ++i)
3544 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3547 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3548 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3551 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3552 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3553 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3557 if (ins->sreg1 != ARMREG_R0)
3558 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3559 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3560 (gpointer)"mono_arch_throw_exception");
3561 code = emit_call_seq (cfg, code);
3565 if (ins->sreg1 != ARMREG_R0)
3566 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3567 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3568 (gpointer)"mono_arch_rethrow_exception");
3569 code = emit_call_seq (cfg, code);
3572 case OP_START_HANDLER: {
3573 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3575 if (arm_is_imm12 (spvar->inst_offset)) {
3576 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3578 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3579 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3583 case OP_ENDFILTER: {
3584 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3586 if (ins->sreg1 != ARMREG_R0)
3587 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3588 if (arm_is_imm12 (spvar->inst_offset)) {
3589 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3591 g_assert (ARMREG_IP != spvar->inst_basereg);
3592 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3593 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3595 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3598 case OP_ENDFINALLY: {
3599 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3601 if (arm_is_imm12 (spvar->inst_offset)) {
3602 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3604 g_assert (ARMREG_IP != spvar->inst_basereg);
3605 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3606 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3608 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3611 case OP_CALL_HANDLER:
3612 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3616 ins->inst_c0 = code - cfg->native_code;
3619 /*if (ins->inst_target_bb->native_offset) {
3621 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3623 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3628 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3632 * In the normal case we have:
3633 * ldr pc, [pc, ins->sreg1 << 2]
3636 * ldr lr, [pc, ins->sreg1 << 2]
3638 * After follows the data.
3639 * FIXME: add aot support.
3641 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3642 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3643 if (offset > (cfg->code_size - max_len - 16)) {
3644 cfg->code_size += max_len;
3645 cfg->code_size *= 2;
3646 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3647 code = cfg->native_code + offset;
3649 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3651 code += 4 * GPOINTER_TO_INT (ins->klass);
3655 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3656 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3660 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3661 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3665 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3666 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3670 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3671 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3675 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3676 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3678 case OP_COND_EXC_EQ:
3679 case OP_COND_EXC_NE_UN:
3680 case OP_COND_EXC_LT:
3681 case OP_COND_EXC_LT_UN:
3682 case OP_COND_EXC_GT:
3683 case OP_COND_EXC_GT_UN:
3684 case OP_COND_EXC_GE:
3685 case OP_COND_EXC_GE_UN:
3686 case OP_COND_EXC_LE:
3687 case OP_COND_EXC_LE_UN:
3688 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3690 case OP_COND_EXC_IEQ:
3691 case OP_COND_EXC_INE_UN:
3692 case OP_COND_EXC_ILT:
3693 case OP_COND_EXC_ILT_UN:
3694 case OP_COND_EXC_IGT:
3695 case OP_COND_EXC_IGT_UN:
3696 case OP_COND_EXC_IGE:
3697 case OP_COND_EXC_IGE_UN:
3698 case OP_COND_EXC_ILE:
3699 case OP_COND_EXC_ILE_UN:
3700 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3703 case OP_COND_EXC_IC:
3704 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3706 case OP_COND_EXC_OV:
3707 case OP_COND_EXC_IOV:
3708 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3710 case OP_COND_EXC_NC:
3711 case OP_COND_EXC_INC:
3712 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3714 case OP_COND_EXC_NO:
3715 case OP_COND_EXC_INO:
3716 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3728 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3731 /* floating point opcodes */
3734 if (cfg->compile_aot) {
3735 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3737 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3739 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3742 /* FIXME: we can optimize the imm load by dealing with part of
3743 * the displacement in LDFD (aligning to 512).
3745 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3746 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3750 if (cfg->compile_aot) {
3751 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3753 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3756 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3757 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3760 case OP_STORER8_MEMBASE_REG:
3761 /* This is generated by the local regalloc pass which runs after the lowering pass */
3762 if (!arm_is_fpimm8 (ins->inst_offset)) {
3763 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3764 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3765 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3767 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3770 case OP_LOADR8_MEMBASE:
3771 /* This is generated by the local regalloc pass which runs after the lowering pass */
3772 if (!arm_is_fpimm8 (ins->inst_offset)) {
3773 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3774 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3775 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3777 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3780 case OP_STORER4_MEMBASE_REG:
3781 g_assert (arm_is_fpimm8 (ins->inst_offset));
3782 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3784 case OP_LOADR4_MEMBASE:
3785 g_assert (arm_is_fpimm8 (ins->inst_offset));
3786 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3788 case OP_ICONV_TO_R_UN: {
3790 tmpreg = ins->dreg == 0? 1: 0;
3791 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3792 ARM_FLTD (code, ins->dreg, ins->sreg1);
3793 ARM_B_COND (code, ARMCOND_GE, 8);
3794 /* save the temp register */
3795 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3796 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3797 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3798 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3799 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3800 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3801 /* skip the constant pool */
3804 *(int*)code = 0x41f00000;
3809 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3810 * adfltd fdest, fdest, ftemp
3814 case OP_ICONV_TO_R4:
3815 ARM_FLTS (code, ins->dreg, ins->sreg1);
3817 case OP_ICONV_TO_R8:
3818 ARM_FLTD (code, ins->dreg, ins->sreg1);
3821 #elif defined(ARM_FPU_VFP)
3824 if (cfg->compile_aot) {
3825 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3827 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3829 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3832 /* FIXME: we can optimize the imm load by dealing with part of
3833 * the displacement in LDFD (aligning to 512).
3835 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3836 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3840 if (cfg->compile_aot) {
3841 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3843 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3845 ARM_CVTS (code, ins->dreg, ins->dreg);
3847 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3848 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3849 ARM_CVTS (code, ins->dreg, ins->dreg);
3852 case OP_STORER8_MEMBASE_REG:
3853 /* This is generated by the local regalloc pass which runs after the lowering pass */
3854 if (!arm_is_fpimm8 (ins->inst_offset)) {
3855 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3856 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3857 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
3859 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3862 case OP_LOADR8_MEMBASE:
3863 /* This is generated by the local regalloc pass which runs after the lowering pass */
3864 if (!arm_is_fpimm8 (ins->inst_offset)) {
3865 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3866 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3867 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3869 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3872 case OP_STORER4_MEMBASE_REG:
3873 g_assert (arm_is_fpimm8 (ins->inst_offset));
3874 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3875 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
3877 case OP_LOADR4_MEMBASE:
3878 g_assert (arm_is_fpimm8 (ins->inst_offset));
3879 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
3880 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3882 case OP_ICONV_TO_R_UN: {
3883 g_assert_not_reached ();
3886 case OP_ICONV_TO_R4:
3887 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3888 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
3889 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
3891 case OP_ICONV_TO_R8:
3892 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
3893 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
3897 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
3898 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
3899 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
3901 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
3907 case OP_FCONV_TO_I1:
3908 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3910 case OP_FCONV_TO_U1:
3911 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3913 case OP_FCONV_TO_I2:
3914 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3916 case OP_FCONV_TO_U2:
3917 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3919 case OP_FCONV_TO_I4:
3921 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3923 case OP_FCONV_TO_U4:
3925 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3927 case OP_FCONV_TO_I8:
3928 case OP_FCONV_TO_U8:
3929 g_assert_not_reached ();
3930 /* Implemented as helper calls */
3932 case OP_LCONV_TO_R_UN:
3933 g_assert_not_reached ();
3934 /* Implemented as helper calls */
3936 case OP_LCONV_TO_OVF_I4_2: {
3937 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3939 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3942 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3943 high_bit_not_set = code;
3944 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3946 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3947 valid_negative = code;
3948 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3949 invalid_negative = code;
3950 ARM_B_COND (code, ARMCOND_AL, 0);
3952 arm_patch (high_bit_not_set, code);
3954 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3955 valid_positive = code;
3956 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3958 arm_patch (invalid_negative, code);
3959 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3961 arm_patch (valid_negative, code);
3962 arm_patch (valid_positive, code);
3964 if (ins->dreg != ins->sreg1)
3965 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3970 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3973 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3976 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3979 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3982 ARM_MNFD (code, ins->dreg, ins->sreg1);
3984 #elif defined(ARM_FPU_VFP)
3986 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3989 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3992 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3995 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3998 ARM_NEGD (code, ins->dreg, ins->sreg1);
4003 g_assert_not_reached ();
4007 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4008 #elif defined(ARM_FPU_VFP)
4009 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4015 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4016 #elif defined(ARM_FPU_VFP)
4017 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4020 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4021 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4025 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4026 #elif defined(ARM_FPU_VFP)
4027 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4030 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4031 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4035 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4036 #elif defined(ARM_FPU_VFP)
4037 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4040 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4041 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4042 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4047 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4048 #elif defined(ARM_FPU_VFP)
4049 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4052 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4053 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4058 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4059 #elif defined(ARM_FPU_VFP)
4060 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4063 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4064 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4065 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4067 /* ARM FPA flags table:
4068 * N Less than ARMCOND_MI
4069 * Z Equal ARMCOND_EQ
4070 * C Greater Than or Equal ARMCOND_CS
4071 * V Unordered ARMCOND_VS
4074 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4077 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4080 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4083 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4084 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4090 g_assert_not_reached ();
4094 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4096 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4097 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4098 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4102 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4103 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4108 if (ins->dreg != ins->sreg1)
4109 ARM_MVFD (code, ins->dreg, ins->sreg1);
4110 #elif defined(ARM_FPU_VFP)
4111 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4112 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4114 *(guint32*)code = 0xffffffff;
4116 *(guint32*)code = 0x7fefffff;
4118 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4120 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4121 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4123 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4125 ARM_CPYD (code, ins->dreg, ins->sreg1);
4130 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4131 g_assert_not_reached ();
4134 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4135 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4136 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4137 g_assert_not_reached ();
4143 last_offset = offset;
4146 cfg->code_len = code - cfg->native_code;
4149 #endif /* DISABLE_JIT */
4151 #ifdef HAVE_AEABI_READ_TP
4152 void __aeabi_read_tp (void);
4156 mono_arch_register_lowlevel_calls (void)
4158 /* The signature doesn't matter */
4159 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4160 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4162 #ifdef HAVE_AEABI_READ_TP
4163 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4167 #define patch_lis_ori(ip,val) do {\
4168 guint16 *__lis_ori = (guint16*)(ip); \
4169 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4170 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4174 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4176 MonoJumpInfo *patch_info;
4177 gboolean compile_aot = !run_cctors;
4179 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4180 unsigned char *ip = patch_info->ip.i + code;
4181 const unsigned char *target;
4183 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4184 gpointer *jt = (gpointer*)(ip + 8);
4186 /* jt is the inlined jump table, 2 instructions after ip
4187 * In the normal case we store the absolute addresses,
4188 * otherwise the displacements.
4190 for (i = 0; i < patch_info->data.table->table_size; i++)
4191 jt [i] = code + (int)patch_info->data.table->table [i];
4194 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4197 switch (patch_info->type) {
4198 case MONO_PATCH_INFO_BB:
4199 case MONO_PATCH_INFO_LABEL:
4202 /* No need to patch these */
4207 switch (patch_info->type) {
4208 case MONO_PATCH_INFO_IP:
4209 g_assert_not_reached ();
4210 patch_lis_ori (ip, ip);
4212 case MONO_PATCH_INFO_METHOD_REL:
4213 g_assert_not_reached ();
4214 *((gpointer *)(ip)) = code + patch_info->data.offset;
4216 case MONO_PATCH_INFO_METHODCONST:
4217 case MONO_PATCH_INFO_CLASS:
4218 case MONO_PATCH_INFO_IMAGE:
4219 case MONO_PATCH_INFO_FIELD:
4220 case MONO_PATCH_INFO_VTABLE:
4221 case MONO_PATCH_INFO_IID:
4222 case MONO_PATCH_INFO_SFLDA:
4223 case MONO_PATCH_INFO_LDSTR:
4224 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4225 case MONO_PATCH_INFO_LDTOKEN:
4226 g_assert_not_reached ();
4227 /* from OP_AOTCONST : lis + ori */
4228 patch_lis_ori (ip, target);
4230 case MONO_PATCH_INFO_R4:
4231 case MONO_PATCH_INFO_R8:
4232 g_assert_not_reached ();
4233 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4235 case MONO_PATCH_INFO_EXC_NAME:
4236 g_assert_not_reached ();
4237 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4239 case MONO_PATCH_INFO_NONE:
4240 case MONO_PATCH_INFO_BB_OVF:
4241 case MONO_PATCH_INFO_EXC_OVF:
4242 /* everything is dealt with at epilog output time */
4247 arm_patch_general (domain, ip, target);
4252 * Stack frame layout:
4254 * ------------------- fp
4255 * MonoLMF structure or saved registers
4256 * -------------------
4258 * -------------------
4260 * -------------------
4261 * optional 8 bytes for tracing
4262 * -------------------
4263 * param area size is cfg->param_area
4264 * ------------------- sp
4267 mono_arch_emit_prolog (MonoCompile *cfg)
4269 MonoMethod *method = cfg->method;
4271 MonoMethodSignature *sig;
4273 int alloc_size, pos, max_offset, i, rot_amount;
4278 int prev_sp_offset, reg_offset;
4280 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4283 sig = mono_method_signature (method);
4284 cfg->code_size = 256 + sig->param_count * 20;
4285 code = cfg->native_code = g_malloc (cfg->code_size);
4287 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4289 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4291 alloc_size = cfg->stack_offset;
4294 if (!method->save_lmf) {
4295 /* We save SP by storing it into IP and saving IP */
4296 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4297 prev_sp_offset = 8; /* ip and lr */
4298 for (i = 0; i < 16; ++i) {
4299 if (cfg->used_int_regs & (1 << i))
4300 prev_sp_offset += 4;
4302 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4304 for (i = 0; i < 16; ++i) {
4305 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4306 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4311 ARM_PUSH (code, 0x5ff0);
4312 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4313 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4315 for (i = 0; i < 16; ++i) {
4316 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4317 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4321 pos += sizeof (MonoLMF) - prev_sp_offset;
4325 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4326 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4327 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4328 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4331 /* the stack used in the pushed regs */
4332 if (prev_sp_offset & 4)
4334 cfg->stack_usage = alloc_size;
4336 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4337 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4339 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4340 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4342 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4344 if (cfg->frame_reg != ARMREG_SP) {
4345 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4346 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4348 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4349 prev_sp_offset += alloc_size;
4351 /* compute max_offset in order to use short forward jumps
4352 * we could skip do it on arm because the immediate displacement
4353 * for jumps is large enough, it may be useful later for constant pools
4356 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4357 MonoInst *ins = bb->code;
4358 bb->max_offset = max_offset;
4360 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4363 MONO_BB_FOR_EACH_INS (bb, ins)
4364 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4367 /* store runtime generic context */
4368 if (cfg->rgctx_var) {
4369 MonoInst *ins = cfg->rgctx_var;
4371 g_assert (ins->opcode == OP_REGOFFSET);
4373 if (arm_is_imm12 (ins->inst_offset)) {
4374 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4376 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4377 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4381 /* load arguments allocated to register from the stack */
4384 cinfo = get_call_info (sig, sig->pinvoke);
4386 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
4387 ArgInfo *ainfo = &cinfo->ret;
4388 inst = cfg->vret_addr;
4389 g_assert (arm_is_imm12 (inst->inst_offset));
4390 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4392 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4393 ArgInfo *ainfo = cinfo->args + i;
4394 inst = cfg->args [pos];
4396 if (cfg->verbose_level > 2)
4397 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
4398 if (inst->opcode == OP_REGVAR) {
4399 if (ainfo->regtype == RegTypeGeneral)
4400 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4401 else if (ainfo->regtype == RegTypeFP) {
4402 g_assert_not_reached ();
4403 } else if (ainfo->regtype == RegTypeBase) {
4404 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4405 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4407 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4408 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4411 g_assert_not_reached ();
4413 if (cfg->verbose_level > 2)
4414 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4416 /* the argument should be put on the stack: FIXME handle size != word */
4417 if (ainfo->regtype == RegTypeGeneral || ainfo->regtype == RegTypeIRegPair) {
4418 switch (ainfo->size) {
4420 if (arm_is_imm12 (inst->inst_offset))
4421 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4423 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4424 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4428 if (arm_is_imm8 (inst->inst_offset)) {
4429 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4431 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4432 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4436 g_assert (arm_is_imm12 (inst->inst_offset));
4437 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4438 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4439 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4442 if (arm_is_imm12 (inst->inst_offset)) {
4443 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4445 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4446 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4450 } else if (ainfo->regtype == RegTypeBaseGen) {
4451 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4452 g_assert (arm_is_imm12 (inst->inst_offset));
4453 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4454 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4455 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4456 } else if (ainfo->regtype == RegTypeBase) {
4457 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4458 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4460 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4461 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4464 switch (ainfo->size) {
4466 if (arm_is_imm8 (inst->inst_offset)) {
4467 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4469 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4470 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4474 if (arm_is_imm8 (inst->inst_offset)) {
4475 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4477 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4478 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4482 if (arm_is_imm12 (inst->inst_offset)) {
4483 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4485 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4486 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4488 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4489 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4491 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4492 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4494 if (arm_is_imm12 (inst->inst_offset + 4)) {
4495 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4497 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4498 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4502 if (arm_is_imm12 (inst->inst_offset)) {
4503 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4505 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4506 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4510 } else if (ainfo->regtype == RegTypeFP) {
4511 g_assert_not_reached ();
4512 } else if (ainfo->regtype == RegTypeStructByVal) {
4513 int doffset = inst->inst_offset;
4517 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4518 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4519 if (arm_is_imm12 (doffset)) {
4520 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4522 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4523 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4525 soffset += sizeof (gpointer);
4526 doffset += sizeof (gpointer);
4528 if (ainfo->vtsize) {
4529 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4530 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4531 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4533 } else if (ainfo->regtype == RegTypeStructByAddr) {
4534 g_assert_not_reached ();
4535 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4536 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4538 g_assert_not_reached ();
4543 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4544 if (cfg->compile_aot)
4545 /* AOT code is only used in the root domain */
4546 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4548 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4549 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4550 (gpointer)"mono_jit_thread_attach");
4551 code = emit_call_seq (cfg, code);
4554 if (method->save_lmf) {
4555 gboolean get_lmf_fast = FALSE;
4557 #ifdef HAVE_AEABI_READ_TP
4558 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4560 if (lmf_addr_tls_offset != -1) {
4561 get_lmf_fast = TRUE;
4563 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4564 (gpointer)"__aeabi_read_tp");
4565 code = emit_call_seq (cfg, code);
4567 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4568 get_lmf_fast = TRUE;
4571 if (!get_lmf_fast) {
4572 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4573 (gpointer)"mono_get_lmf_addr");
4574 code = emit_call_seq (cfg, code);
4576 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4577 /* lmf_offset is the offset from the previous stack pointer,
4578 * alloc_size is the total stack space allocated, so the offset
4579 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4580 * The pointer to the struct is put in r1 (new_lmf).
4581 * r2 is used as scratch
4582 * The callee-saved registers are already in the MonoLMF structure
4584 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4585 /* r0 is the result from mono_get_lmf_addr () */
4586 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4587 /* new_lmf->previous_lmf = *lmf_addr */
4588 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4589 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4590 /* *(lmf_addr) = r1 */
4591 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4592 /* Skip method (only needed for trampoline LMF frames) */
4593 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4594 /* save the current IP */
4595 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4596 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4600 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4602 if (cfg->arch.seq_point_info_var) {
4603 MonoInst *ins = cfg->arch.seq_point_info_var;
4605 /* Initialize the variable from a GOT slot */
4606 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4607 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4609 *(gpointer*)code = NULL;
4611 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4613 g_assert (ins->opcode == OP_REGOFFSET);
4615 if (arm_is_imm12 (ins->inst_offset)) {
4616 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4618 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4619 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4623 /* Initialize ss_trigger_page_var */
4625 MonoInst *info_var = cfg->arch.seq_point_info_var;
4626 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4627 int dreg = ARMREG_LR;
4630 g_assert (info_var->opcode == OP_REGOFFSET);
4631 g_assert (arm_is_imm12 (info_var->inst_offset));
4633 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4634 /* Load the trigger page addr */
4635 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4636 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4640 cfg->code_len = code - cfg->native_code;
4641 g_assert (cfg->code_len < cfg->code_size);
4648 mono_arch_emit_epilog (MonoCompile *cfg)
4650 MonoMethod *method = cfg->method;
4651 int pos, i, rot_amount;
4652 int max_epilog_size = 16 + 20*4;
4655 if (cfg->method->save_lmf)
4656 max_epilog_size += 128;
4658 if (mono_jit_trace_calls != NULL)
4659 max_epilog_size += 50;
4661 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4662 max_epilog_size += 50;
4664 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4665 cfg->code_size *= 2;
4666 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4667 mono_jit_stats.code_reallocs++;
4671 * Keep in sync with OP_JMP
4673 code = cfg->native_code + cfg->code_len;
4675 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4676 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4680 if (method->save_lmf) {
4682 /* all but r0-r3, sp and pc */
4683 pos += sizeof (MonoLMF) - (4 * 10);
4685 /* r2 contains the pointer to the current LMF */
4686 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4687 /* ip = previous_lmf */
4688 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4690 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4691 /* *(lmf_addr) = previous_lmf */
4692 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4693 /* FIXME: speedup: there is no actual need to restore the registers if
4694 * we didn't actually change them (idea from Zoltan).
4697 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4698 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4699 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4701 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4702 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4704 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4705 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4707 /* FIXME: add v4 thumb interworking support */
4708 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4711 cfg->code_len = code - cfg->native_code;
4713 g_assert (cfg->code_len < cfg->code_size);
4717 /* remove once throw_exception_by_name is eliminated */
4719 exception_id_by_name (const char *name)
4721 if (strcmp (name, "IndexOutOfRangeException") == 0)
4722 return MONO_EXC_INDEX_OUT_OF_RANGE;
4723 if (strcmp (name, "OverflowException") == 0)
4724 return MONO_EXC_OVERFLOW;
4725 if (strcmp (name, "ArithmeticException") == 0)
4726 return MONO_EXC_ARITHMETIC;
4727 if (strcmp (name, "DivideByZeroException") == 0)
4728 return MONO_EXC_DIVIDE_BY_ZERO;
4729 if (strcmp (name, "InvalidCastException") == 0)
4730 return MONO_EXC_INVALID_CAST;
4731 if (strcmp (name, "NullReferenceException") == 0)
4732 return MONO_EXC_NULL_REF;
4733 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4734 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4735 g_error ("Unknown intrinsic exception %s\n", name);
4740 mono_arch_emit_exceptions (MonoCompile *cfg)
4742 MonoJumpInfo *patch_info;
4745 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4746 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4747 int max_epilog_size = 50;
4749 /* count the number of exception infos */
4752 * make sure we have enough space for exceptions
4754 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4755 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4756 i = exception_id_by_name (patch_info->data.target);
4757 if (!exc_throw_found [i]) {
4758 max_epilog_size += 32;
4759 exc_throw_found [i] = TRUE;
4764 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4765 cfg->code_size *= 2;
4766 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4767 mono_jit_stats.code_reallocs++;
4770 code = cfg->native_code + cfg->code_len;
4772 /* add code to raise exceptions */
4773 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4774 switch (patch_info->type) {
4775 case MONO_PATCH_INFO_EXC: {
4776 MonoClass *exc_class;
4777 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4779 i = exception_id_by_name (patch_info->data.target);
4780 if (exc_throw_pos [i]) {
4781 arm_patch (ip, exc_throw_pos [i]);
4782 patch_info->type = MONO_PATCH_INFO_NONE;
4785 exc_throw_pos [i] = code;
4787 arm_patch (ip, code);
4789 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4790 g_assert (exc_class);
4792 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4793 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4794 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4795 patch_info->data.name = "mono_arch_throw_corlib_exception";
4796 patch_info->ip.i = code - cfg->native_code;
4798 *(guint32*)(gpointer)code = exc_class->type_token;
4808 cfg->code_len = code - cfg->native_code;
4810 g_assert (cfg->code_len < cfg->code_size);
4814 static gboolean tls_offset_inited = FALSE;
4817 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4819 if (!tls_offset_inited) {
4820 tls_offset_inited = TRUE;
4822 lmf_tls_offset = mono_get_lmf_tls_offset ();
4823 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4828 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4833 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4840 mono_arch_print_tree (MonoInst *tree, int arity)
4846 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4848 return mono_get_domain_intrinsic (cfg);
4852 mono_arch_get_patch_offset (guint8 *code)
4859 mono_arch_flush_register_windows (void)
4863 #ifdef MONO_ARCH_HAVE_IMT
4866 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4868 if (cfg->compile_aot) {
4869 int method_reg = mono_alloc_ireg (cfg);
4872 call->dynamic_imt_arg = TRUE;
4875 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4877 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4878 ins->dreg = method_reg;
4879 ins->inst_p0 = call->method;
4880 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4881 MONO_ADD_INS (cfg->cbb, ins);
4883 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4885 } else if (cfg->generic_context) {
4887 /* Always pass in a register for simplicity */
4888 call->dynamic_imt_arg = TRUE;
4890 cfg->uses_rgctx_reg = TRUE;
4893 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4896 int method_reg = mono_alloc_preg (cfg);
4898 MONO_INST_NEW (cfg, ins, OP_PCONST);
4899 ins->inst_p0 = call->method;
4900 ins->dreg = method_reg;
4901 MONO_ADD_INS (cfg->cbb, ins);
4903 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4909 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
4911 guint32 *code_ptr = (guint32*)code;
4913 /* The IMT value is stored in the code stream right after the LDC instruction. */
4914 if (!IS_LDR_PC (code_ptr [0])) {
4915 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4916 g_assert (IS_LDR_PC (code_ptr [0]));
4918 if (code_ptr [1] == 0)
4919 /* This is AOTed code, the IMT method is in V5 */
4920 return (MonoMethod*)regs [ARMREG_V5];
4922 return (MonoMethod*) code_ptr [1];
4926 mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4928 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
4932 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
4934 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4937 #define ENABLE_WRONG_METHOD_CHECK 0
4938 #define BASE_SIZE (6 * 4)
4939 #define BSEARCH_ENTRY_SIZE (4 * 4)
4940 #define CMP_SIZE (3 * 4)
4941 #define BRANCH_SIZE (1 * 4)
4942 #define CALL_SIZE (2 * 4)
4943 #define WMC_SIZE (5 * 4)
4944 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4947 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4949 guint32 delta = DISTANCE (target, code);
4951 g_assert (delta >= 0 && delta <= 0xFFF);
4952 *target = *target | delta;
4958 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4959 gpointer fail_tramp)
4961 int size, i, extra_space = 0;
4962 arminstr_t *code, *start, *vtable_target = NULL;
4963 gboolean large_offsets = FALSE;
4964 guint32 **constant_pool_starts;
4967 constant_pool_starts = g_new0 (guint32*, count);
4970 * We might be called with a fail_tramp from the IMT builder code even if
4971 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
4973 //g_assert (!fail_tramp);
4975 for (i = 0; i < count; ++i) {
4976 MonoIMTCheckItem *item = imt_entries [i];
4977 if (item->is_equals) {
4978 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
4979 item->chunk_size += 32;
4980 large_offsets = TRUE;
4983 if (item->check_target_idx) {
4984 if (!item->compare_done)
4985 item->chunk_size += CMP_SIZE;
4986 item->chunk_size += BRANCH_SIZE;
4988 #if ENABLE_WRONG_METHOD_CHECK
4989 item->chunk_size += WMC_SIZE;
4992 item->chunk_size += CALL_SIZE;
4994 item->chunk_size += BSEARCH_ENTRY_SIZE;
4995 imt_entries [item->check_target_idx]->compare_done = TRUE;
4997 size += item->chunk_size;
5001 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5003 start = code = mono_domain_code_reserve (domain, size);
5006 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5007 for (i = 0; i < count; ++i) {
5008 MonoIMTCheckItem *item = imt_entries [i];
5009 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5014 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5016 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5017 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5018 vtable_target = code;
5019 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5021 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5022 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5023 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5025 for (i = 0; i < count; ++i) {
5026 MonoIMTCheckItem *item = imt_entries [i];
5027 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5028 gint32 vtable_offset;
5030 item->code_target = (guint8*)code;
5032 if (item->is_equals) {
5033 if (item->check_target_idx) {
5034 if (!item->compare_done) {
5036 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5037 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5039 item->jmp_code = (guint8*)code;
5040 ARM_B_COND (code, ARMCOND_NE, 0);
5042 /*Enable the commented code to assert on wrong method*/
5043 #if ENABLE_WRONG_METHOD_CHECK
5045 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5046 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5047 ARM_B_COND (code, ARMCOND_NE, 1);
5053 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5054 if (!arm_is_imm12 (vtable_offset)) {
5056 * We need to branch to a computed address but we don't have
5057 * a free register to store it, since IP must contain the
5058 * vtable address. So we push the two values to the stack, and
5059 * load them both using LDM.
5061 /* Compute target address */
5062 vtable_offset_ins = code;
5063 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5064 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5065 /* Save it to the fourth slot */
5066 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5067 /* Restore registers and branch */
5068 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5070 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5072 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5074 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5075 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5079 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5081 /*must emit after unconditional branch*/
5082 if (vtable_target) {
5083 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5084 item->chunk_size += 4;
5085 vtable_target = NULL;
5088 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5089 constant_pool_starts [i] = code;
5091 code += extra_space;
5095 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5096 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5098 item->jmp_code = (guint8*)code;
5099 ARM_B_COND (code, ARMCOND_GE, 0);
5104 for (i = 0; i < count; ++i) {
5105 MonoIMTCheckItem *item = imt_entries [i];
5106 if (item->jmp_code) {
5107 if (item->check_target_idx)
5108 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5110 if (i > 0 && item->is_equals) {
5112 arminstr_t *space_start = constant_pool_starts [i];
5113 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5114 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5121 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5122 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5127 g_free (constant_pool_starts);
5129 mono_arch_flush_icache ((guint8*)start, size);
5130 mono_stats.imt_thunks_size += code - start;
5132 g_assert (DISTANCE (start, code) <= size);
5139 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5141 if (reg == ARMREG_SP)
5142 return (gpointer)ctx->esp;
5144 return (gpointer)ctx->regs [reg];
5148 * mono_arch_set_breakpoint:
5150 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5151 * The location should contain code emitted by OP_SEQ_POINT.
5154 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5157 guint32 native_offset = ip - (guint8*)ji->code_start;
5160 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5162 g_assert (native_offset % 4 == 0);
5163 g_assert (info->bp_addrs [native_offset / 4] == 0);
5164 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5166 int dreg = ARMREG_LR;
5168 /* Read from another trigger page */
5169 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5171 *(int*)code = (int)bp_trigger_page;
5173 ARM_LDR_IMM (code, dreg, dreg, 0);
5175 mono_arch_flush_icache (code - 16, 16);
5178 /* This is currently implemented by emitting an SWI instruction, which
5179 * qemu/linux seems to convert to a SIGILL.
5181 *(int*)code = (0xef << 24) | 8;
5183 mono_arch_flush_icache (code - 4, 4);
5189 * mono_arch_clear_breakpoint:
5191 * Clear the breakpoint at IP.
5194 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5200 guint32 native_offset = ip - (guint8*)ji->code_start;
5201 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5203 g_assert (native_offset % 4 == 0);
5204 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5205 info->bp_addrs [native_offset / 4] = 0;
5207 for (i = 0; i < 4; ++i)
5210 mono_arch_flush_icache (ip, code - ip);
5215 * mono_arch_start_single_stepping:
5217 * Start single stepping.
5220 mono_arch_start_single_stepping (void)
5222 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5226 * mono_arch_stop_single_stepping:
5228 * Stop single stepping.
5231 mono_arch_stop_single_stepping (void)
5233 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5237 #define DBG_SIGNAL SIGBUS
5239 #define DBG_SIGNAL SIGSEGV
5243 * mono_arch_is_single_step_event:
5245 * Return whenever the machine state in SIGCTX corresponds to a single
5249 mono_arch_is_single_step_event (void *info, void *sigctx)
5251 siginfo_t *sinfo = info;
5253 /* Sometimes the address is off by 4 */
5254 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5261 * mono_arch_is_breakpoint_event:
5263 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5266 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5268 siginfo_t *sinfo = info;
5270 if (sinfo->si_signo == DBG_SIGNAL) {
5271 /* Sometimes the address is off by 4 */
5272 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5282 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5284 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5295 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5297 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5305 * mono_arch_skip_breakpoint:
5307 * See mini-amd64.c for docs.
5310 mono_arch_skip_breakpoint (MonoContext *ctx)
5312 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5316 * mono_arch_skip_single_step:
5318 * See mini-amd64.c for docs.
5321 mono_arch_skip_single_step (MonoContext *ctx)
5323 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5327 * mono_arch_get_seq_point_info:
5329 * See mini-amd64.c for docs.
5332 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5337 // FIXME: Add a free function
5339 mono_domain_lock (domain);
5340 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5342 mono_domain_unlock (domain);
5345 ji = mono_jit_info_table_find (domain, (char*)code);
5348 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5350 info->ss_trigger_page = ss_trigger_page;
5351 info->bp_trigger_page = bp_trigger_page;
5353 mono_domain_lock (domain);
5354 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5356 mono_domain_unlock (domain);