2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
34 static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
38 * floating point support: on ARM it is a mess, there are at least 3
39 * different setups, each of which binary incompat with the other.
40 * 1) FPA: old and ugly, but unfortunately what current distros use
41 * the double binary format has the two words swapped. 8 double registers.
42 * Implemented usually by kernel emulation.
43 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
44 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
45 * 3) VFP: the new and actually sensible and useful FP support. Implemented
46 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
48 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
50 int mono_exc_esp_offset = 0;
52 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
53 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
54 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
56 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
57 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
58 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
60 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
61 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
64 void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
67 mono_arch_regname (int reg)
69 static const char * rnames[] = {
70 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
71 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
72 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
75 if (reg >= 0 && reg < 16)
81 mono_arch_fregname (int reg)
83 static const char * rnames[] = {
84 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
85 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
86 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
87 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
88 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
89 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
92 if (reg >= 0 && reg < 32)
98 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
100 int imm8, rot_amount;
101 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
102 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
105 g_assert (dreg != sreg);
106 code = mono_arm_emit_load_imm (code, dreg, imm);
107 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
112 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
114 /* we can use r0-r3, since this is called only for incoming args on the stack */
115 if (size > sizeof (gpointer) * 4) {
117 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
118 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
119 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
120 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
121 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
122 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
123 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
124 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
125 ARM_B_COND (code, ARMCOND_NE, 0);
126 arm_patch (code - 4, start_loop);
129 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
130 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
132 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
133 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
139 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
140 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
141 doffset = soffset = 0;
143 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
144 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
150 g_assert (size == 0);
155 emit_call_reg (guint8 *code, int reg)
158 ARM_BLX_REG (code, reg);
160 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
164 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
170 emit_call_seq (MonoCompile *cfg, guint8 *code)
172 if (cfg->method->dynamic) {
173 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
175 *(gpointer*)code = NULL;
177 code = emit_call_reg (code, ARMREG_IP);
185 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
187 switch (ins->opcode) {
190 case OP_FCALL_MEMBASE:
192 if (ins->dreg != ARM_FPA_F0)
193 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
202 * mono_arch_get_argument_info:
203 * @csig: a method signature
204 * @param_count: the number of parameters to consider
205 * @arg_info: an array to store the result infos
207 * Gathers information on parameters such as size, alignment and
208 * padding. arg_info should be large enought to hold param_count + 1 entries.
210 * Returns the size of the activation frame.
213 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
215 int k, frame_size = 0;
216 int size, align, pad;
219 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
220 frame_size += sizeof (gpointer);
224 arg_info [0].offset = offset;
227 frame_size += sizeof (gpointer);
231 arg_info [0].size = frame_size;
233 for (k = 0; k < param_count; k++) {
236 size = mono_type_native_stack_size (csig->params [k], &align);
238 size = mini_type_stack_size (NULL, csig->params [k], &align);
240 /* ignore alignment for now */
243 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
244 arg_info [k].pad = pad;
246 arg_info [k + 1].pad = 0;
247 arg_info [k + 1].size = size;
249 arg_info [k + 1].offset = offset;
253 align = MONO_ARCH_FRAME_ALIGNMENT;
254 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
255 arg_info [k].pad = pad;
261 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
265 reg = (ldr >> 16 ) & 0xf;
266 offset = ldr & 0xfff;
267 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
269 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
272 *displacement = offset;
277 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
279 guint32* code = (guint32*)code_ptr;
281 /* Locate the address of the method-specific trampoline. The call using
282 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
283 looks something like this:
292 The call sequence could be also:
295 function pointer literal
299 Note that on ARM5+ we can use one instruction instead of the last two.
300 Therefore, we need to locate the 'ldr rA' instruction to know which
301 register was used to hold the method addrs.
304 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
307 /* Three possible code sequences can happen here:
311 * ldr pc, [rX - #offset]
317 * ldr pc, [rX - #offset]
319 * direct branch with bl:
323 * direct branch with mov:
327 * We only need to identify interface and virtual calls, the others can be ignored.
330 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
331 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
333 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
334 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
340 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
344 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
347 return (gpointer*)((char*)vt + displacement);
350 #define MAX_ARCH_DELEGATE_PARAMS 3
353 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
355 guint8 *code, *start;
357 /* FIXME: Support more cases */
358 if (MONO_TYPE_ISSTRUCT (sig->ret))
362 static guint8* cached = NULL;
363 mono_mini_arch_lock ();
365 mono_mini_arch_unlock ();
369 start = code = mono_global_codeman_reserve (12);
371 /* Replace the this argument with the target */
372 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
373 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
374 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
376 g_assert ((code - start) <= 12);
378 mono_arch_flush_icache (code, 12);
380 mono_mini_arch_unlock ();
383 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
386 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
388 for (i = 0; i < sig->param_count; ++i)
389 if (!mono_is_regsize_var (sig->params [i]))
392 mono_mini_arch_lock ();
393 code = cache [sig->param_count];
395 mono_mini_arch_unlock ();
399 size = 8 + sig->param_count * 4;
400 start = code = mono_global_codeman_reserve (size);
402 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
403 /* slide down the arguments */
404 for (i = 0; i < sig->param_count; ++i) {
405 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
407 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
409 g_assert ((code - start) <= size);
411 mono_arch_flush_icache (code, size);
412 cache [sig->param_count] = start;
413 mono_mini_arch_unlock ();
421 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
423 /* FIXME: handle returning a struct */
424 if (MONO_TYPE_ISSTRUCT (sig->ret))
425 return (gpointer)regs [ARMREG_R1];
426 return (gpointer)regs [ARMREG_R0];
430 * Initialize the cpu to execute managed code.
433 mono_arch_cpu_init (void)
438 * Initialize architecture specific code.
441 mono_arch_init (void)
443 InitializeCriticalSection (&mini_arch_mutex);
447 * Cleanup architecture specific code.
450 mono_arch_cleanup (void)
455 * This function returns the optimizations supported on this cpu.
458 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
462 thumb_supported = TRUE;
467 FILE *file = fopen ("/proc/cpuinfo", "r");
469 while ((line = fgets (buf, 512, file))) {
470 if (strncmp (line, "Processor", 9) == 0) {
471 char *ver = strstr (line, "(v");
472 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
477 if (strncmp (line, "Features", 8) == 0) {
478 char *th = strstr (line, "thumb");
480 thumb_supported = TRUE;
488 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
492 /* no arm-specific optimizations yet */
498 is_regsize_var (MonoType *t) {
501 t = mono_type_get_underlying_type (t);
508 case MONO_TYPE_FNPTR:
510 case MONO_TYPE_OBJECT:
511 case MONO_TYPE_STRING:
512 case MONO_TYPE_CLASS:
513 case MONO_TYPE_SZARRAY:
514 case MONO_TYPE_ARRAY:
516 case MONO_TYPE_GENERICINST:
517 if (!mono_type_generic_inst_is_valuetype (t))
520 case MONO_TYPE_VALUETYPE:
527 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
532 for (i = 0; i < cfg->num_varinfo; i++) {
533 MonoInst *ins = cfg->varinfo [i];
534 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
537 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
540 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
543 /* we can only allocate 32 bit values */
544 if (is_regsize_var (ins->inst_vtype)) {
545 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
546 g_assert (i == vmv->idx);
547 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
554 #define USE_EXTRA_TEMPS 0
557 mono_arch_get_global_int_regs (MonoCompile *cfg)
560 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
561 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
562 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
563 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
564 if (cfg->compile_aot)
565 /* V5 is reserved for holding the IMT method */
566 cfg->used_int_regs |= (1 << ARMREG_V5);
568 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
569 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
570 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
576 * mono_arch_regalloc_cost:
578 * Return the cost, in number of memory references, of the action of
579 * allocating the variable VMV into a register during global register
583 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
590 mono_arch_flush_icache (guint8 *code, gint size)
593 sys_icache_invalidate (code, size);
595 __asm __volatile ("mov r0, %0\n"
598 "swi 0x9f0002 @ sys_cacheflush"
600 : "r" (code), "r" (code + size), "r" (0)
601 : "r0", "r1", "r3" );
616 guint16 vtsize; /* in param area */
618 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
619 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
634 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
637 if (*gr > ARMREG_R3) {
638 ainfo->offset = *stack_size;
639 ainfo->reg = ARMREG_SP; /* in the caller */
640 ainfo->regtype = RegTypeBase;
651 /* first word in r3 and the second on the stack */
652 ainfo->offset = *stack_size;
653 ainfo->reg = ARMREG_SP; /* in the caller */
654 ainfo->regtype = RegTypeBaseGen;
656 } else if (*gr >= ARMREG_R3) {
661 ainfo->offset = *stack_size;
662 ainfo->reg = ARMREG_SP; /* in the caller */
663 ainfo->regtype = RegTypeBase;
678 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
681 int n = sig->hasthis + sig->param_count;
683 guint32 stack_size = 0;
684 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
688 /* FIXME: handle returning a struct */
689 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
690 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
691 cinfo->struct_ret = ARMREG_R0;
696 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
699 DEBUG(printf("params: %d\n", sig->param_count));
700 for (i = 0; i < sig->param_count; ++i) {
701 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
702 /* Prevent implicit arguments and sig_cookie from
703 being passed in registers */
705 /* Emit the signature cookie just before the implicit arguments */
706 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
708 DEBUG(printf("param %d: ", i));
709 if (sig->params [i]->byref) {
710 DEBUG(printf("byref\n"));
711 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
715 simpletype = mono_type_get_underlying_type (sig->params [i])->type;
716 switch (simpletype) {
717 case MONO_TYPE_BOOLEAN:
720 cinfo->args [n].size = 1;
721 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
727 cinfo->args [n].size = 2;
728 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
733 cinfo->args [n].size = 4;
734 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
740 case MONO_TYPE_FNPTR:
741 case MONO_TYPE_CLASS:
742 case MONO_TYPE_OBJECT:
743 case MONO_TYPE_STRING:
744 case MONO_TYPE_SZARRAY:
745 case MONO_TYPE_ARRAY:
747 cinfo->args [n].size = sizeof (gpointer);
748 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
751 case MONO_TYPE_GENERICINST:
752 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
753 cinfo->args [n].size = sizeof (gpointer);
754 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
759 case MONO_TYPE_TYPEDBYREF:
760 case MONO_TYPE_VALUETYPE: {
765 if (simpletype == MONO_TYPE_TYPEDBYREF) {
766 size = sizeof (MonoTypedRef);
768 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
770 size = mono_class_native_size (klass, NULL);
772 size = mono_class_value_size (klass, NULL);
774 DEBUG(printf ("load %d bytes struct\n",
775 mono_class_native_size (sig->params [i]->data.klass, NULL)));
778 align_size += (sizeof (gpointer) - 1);
779 align_size &= ~(sizeof (gpointer) - 1);
780 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
781 cinfo->args [n].regtype = RegTypeStructByVal;
782 /* FIXME: align gr and stack_size if needed */
783 if (gr > ARMREG_R3) {
784 cinfo->args [n].size = 0;
785 cinfo->args [n].vtsize = nwords;
787 int rest = ARMREG_R3 - gr + 1;
788 int n_in_regs = rest >= nwords? nwords: rest;
789 cinfo->args [n].size = n_in_regs;
790 cinfo->args [n].vtsize = nwords - n_in_regs;
791 cinfo->args [n].reg = gr;
794 cinfo->args [n].offset = stack_size;
795 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
796 stack_size += nwords * sizeof (gpointer);
803 cinfo->args [n].size = 8;
804 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
808 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
813 simpletype = mono_type_get_underlying_type (sig->ret)->type;
814 switch (simpletype) {
815 case MONO_TYPE_BOOLEAN:
826 case MONO_TYPE_FNPTR:
827 case MONO_TYPE_CLASS:
828 case MONO_TYPE_OBJECT:
829 case MONO_TYPE_SZARRAY:
830 case MONO_TYPE_ARRAY:
831 case MONO_TYPE_STRING:
832 cinfo->ret.reg = ARMREG_R0;
836 cinfo->ret.reg = ARMREG_R0;
840 cinfo->ret.reg = ARMREG_R0;
841 /* FIXME: cinfo->ret.reg = ???;
842 cinfo->ret.regtype = RegTypeFP;*/
844 case MONO_TYPE_GENERICINST:
845 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
846 cinfo->ret.reg = ARMREG_R0;
850 case MONO_TYPE_VALUETYPE:
852 case MONO_TYPE_TYPEDBYREF:
856 g_error ("Can't handle as return value 0x%x", sig->ret->type);
860 /* align stack size to 8 */
861 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
862 stack_size = (stack_size + 7) & ~7;
864 cinfo->stack_usage = stack_size;
870 * Set var information according to the calling convention. arm version.
871 * The locals var stuff should most likely be split in another method.
874 mono_arch_allocate_vars (MonoCompile *m)
876 MonoMethodSignature *sig;
877 MonoMethodHeader *header;
879 int i, offset, size, align, curinst;
880 int frame_reg = ARMREG_FP;
882 /* FIXME: this will change when we use FP as gcc does */
883 m->flags |= MONO_CFG_HAS_SPILLUP;
885 /* allow room for the vararg method args: void* and long/double */
886 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
887 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
889 header = mono_method_get_header (m->method);
892 * We use the frame register also for any method that has
893 * exception clauses. This way, when the handlers are called,
894 * the code will reference local variables using the frame reg instead of
895 * the stack pointer: if we had to restore the stack pointer, we'd
896 * corrupt the method frames that are already on the stack (since
897 * filters get called before stack unwinding happens) when the filter
898 * code would call any method (this also applies to finally etc.).
900 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
901 frame_reg = ARMREG_FP;
902 m->frame_reg = frame_reg;
903 if (frame_reg != ARMREG_SP) {
904 m->used_int_regs |= 1 << frame_reg;
907 sig = mono_method_signature (m->method);
911 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
912 /* FIXME: handle long and FP values */
913 switch (mono_type_get_underlying_type (sig->ret)->type) {
917 m->ret->opcode = OP_REGVAR;
918 m->ret->inst_c0 = ARMREG_R0;
922 /* local vars are at a positive offset from the stack pointer */
924 * also note that if the function uses alloca, we use FP
925 * to point at the local variables.
927 offset = 0; /* linkage area */
928 /* align the offset to 16 bytes: not sure this is needed here */
930 //offset &= ~(8 - 1);
932 /* add parameter area size for called functions */
933 offset += m->param_area;
936 if (m->flags & MONO_CFG_HAS_FPOUT)
939 /* allow room to save the return value */
940 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
943 /* the MonoLMF structure is stored just below the stack pointer */
945 if (sig->call_convention == MONO_CALL_VARARG) {
949 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
951 offset += sizeof(gpointer) - 1;
952 offset &= ~(sizeof(gpointer) - 1);
953 inst->inst_offset = offset;
954 inst->opcode = OP_REGOFFSET;
955 inst->inst_basereg = frame_reg;
956 if (G_UNLIKELY (m->verbose_level > 1)) {
957 printf ("vret_addr =");
958 mono_print_ins (m->vret_addr);
960 offset += sizeof(gpointer);
961 if (sig->call_convention == MONO_CALL_VARARG)
962 m->sig_cookie += sizeof (gpointer);
965 curinst = m->locals_start;
966 for (i = curinst; i < m->num_varinfo; ++i) {
967 inst = m->varinfo [i];
968 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
971 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
972 * pinvoke wrappers when they call functions returning structure */
973 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
974 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
976 size = mono_type_size (inst->inst_vtype, &align);
978 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
979 * since it loads/stores misaligned words, which don't do the right thing.
981 if (align < 4 && size >= 4)
984 offset &= ~(align - 1);
985 inst->inst_offset = offset;
986 inst->opcode = OP_REGOFFSET;
987 inst->inst_basereg = frame_reg;
989 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
994 inst = m->args [curinst];
995 if (inst->opcode != OP_REGVAR) {
996 inst->opcode = OP_REGOFFSET;
997 inst->inst_basereg = frame_reg;
998 offset += sizeof (gpointer) - 1;
999 offset &= ~(sizeof (gpointer) - 1);
1000 inst->inst_offset = offset;
1001 offset += sizeof (gpointer);
1002 if (sig->call_convention == MONO_CALL_VARARG)
1003 m->sig_cookie += sizeof (gpointer);
1008 for (i = 0; i < sig->param_count; ++i) {
1009 inst = m->args [curinst];
1010 if (inst->opcode != OP_REGVAR) {
1011 inst->opcode = OP_REGOFFSET;
1012 inst->inst_basereg = frame_reg;
1013 size = mono_type_size (sig->params [i], &align);
1014 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1015 * since it loads/stores misaligned words, which don't do the right thing.
1017 if (align < 4 && size >= 4)
1019 offset += align - 1;
1020 offset &= ~(align - 1);
1021 inst->inst_offset = offset;
1023 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1024 m->sig_cookie += size;
1029 /* align the offset to 8 bytes */
1034 m->stack_offset = offset;
1039 mono_arch_create_vars (MonoCompile *cfg)
1041 MonoMethodSignature *sig;
1043 sig = mono_method_signature (cfg->method);
1045 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1046 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1047 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1048 printf ("vret_addr = ");
1049 mono_print_ins (cfg->vret_addr);
1055 * take the arguments and generate the arch-specific
1056 * instructions to properly call the function in call.
1057 * This includes pushing, moving arguments to the right register
1059 * Issue: who does the spilling if needed, and when?
1062 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1064 MonoMethodSignature *sig;
1069 sig = call->signature;
1070 n = sig->param_count + sig->hasthis;
1072 cinfo = calculate_sizes (sig, sig->pinvoke);
1073 if (cinfo->struct_ret)
1074 call->used_iregs |= 1 << cinfo->struct_ret;
1076 for (i = 0; i < n; ++i) {
1077 ainfo = cinfo->args + i;
1078 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1080 cfg->disable_aot = TRUE;
1082 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1083 sig_arg->inst_p0 = call->signature;
1085 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1086 arg->inst_imm = cinfo->sig_cookie.offset;
1087 arg->inst_left = sig_arg;
1089 /* prepend, so they get reversed */
1090 arg->next = call->out_args;
1091 call->out_args = arg;
1093 if (is_virtual && i == 0) {
1094 /* the argument will be attached to the call instrucion */
1095 in = call->args [i];
1096 call->used_iregs |= 1 << ainfo->reg;
1098 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1099 in = call->args [i];
1100 arg->cil_code = in->cil_code;
1101 arg->inst_left = in;
1102 arg->inst_right = (MonoInst*)call;
1103 arg->type = in->type;
1104 /* prepend, we'll need to reverse them later */
1105 arg->next = call->out_args;
1106 call->out_args = arg;
1107 if (ainfo->regtype == RegTypeGeneral) {
1108 arg->backend.reg3 = ainfo->reg;
1109 call->used_iregs |= 1 << ainfo->reg;
1110 if (arg->type == STACK_I8)
1111 call->used_iregs |= 1 << (ainfo->reg + 1);
1112 if (arg->type == STACK_R8) {
1113 if (ainfo->size == 4) {
1114 #ifndef MONO_ARCH_SOFT_FLOAT
1115 arg->opcode = OP_OUTARG_R4;
1118 call->used_iregs |= 1 << (ainfo->reg + 1);
1120 cfg->flags |= MONO_CFG_HAS_FPOUT;
1122 } else if (ainfo->regtype == RegTypeStructByAddr) {
1123 /* FIXME: where si the data allocated? */
1124 arg->backend.reg3 = ainfo->reg;
1125 call->used_iregs |= 1 << ainfo->reg;
1126 g_assert_not_reached ();
1127 } else if (ainfo->regtype == RegTypeStructByVal) {
1129 /* mark the used regs */
1130 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
1131 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
1133 arg->opcode = OP_OUTARG_VT;
1134 /* vtsize and offset have just 12 bits of encoding in number of words */
1135 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
1136 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
1137 } else if (ainfo->regtype == RegTypeBase) {
1138 arg->opcode = OP_OUTARG_MEMBASE;
1139 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
1140 } else if (ainfo->regtype == RegTypeBaseGen) {
1141 call->used_iregs |= 1 << ARMREG_R3;
1142 arg->opcode = OP_OUTARG_MEMBASE;
1143 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
1144 if (arg->type == STACK_R8)
1145 cfg->flags |= MONO_CFG_HAS_FPOUT;
1146 } else if (ainfo->regtype == RegTypeFP) {
1147 arg->backend.reg3 = ainfo->reg;
1148 /* FP args are passed in int regs */
1149 call->used_iregs |= 1 << ainfo->reg;
1150 if (ainfo->size == 8) {
1151 arg->opcode = OP_OUTARG_R8;
1152 call->used_iregs |= 1 << (ainfo->reg + 1);
1154 arg->opcode = OP_OUTARG_R4;
1156 cfg->flags |= MONO_CFG_HAS_FPOUT;
1158 g_assert_not_reached ();
1163 * Reverse the call->out_args list.
1166 MonoInst *prev = NULL, *list = call->out_args, *next;
1173 call->out_args = prev;
1175 call->stack_usage = cinfo->stack_usage;
1176 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
1177 cfg->flags |= MONO_CFG_HAS_CALLS;
1179 * should set more info in call, such as the stack space
1180 * used by the args that needs to be added back to esp
1188 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1191 MonoMethodSignature *sig;
1195 sig = call->signature;
1196 n = sig->param_count + sig->hasthis;
1198 cinfo = calculate_sizes (sig, sig->pinvoke);
1200 for (i = 0; i < n; ++i) {
1201 ArgInfo *ainfo = cinfo->args + i;
1204 if (i >= sig->hasthis)
1205 t = sig->params [i - sig->hasthis];
1207 t = &mono_defaults.int_class->byval_arg;
1208 t = mono_type_get_underlying_type (t);
1210 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1215 in = call->args [i];
1217 switch (ainfo->regtype) {
1218 case RegTypeGeneral:
1219 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1220 MONO_INST_NEW (cfg, ins, OP_MOVE);
1221 ins->dreg = mono_alloc_ireg (cfg);
1222 ins->sreg1 = in->dreg + 1;
1223 MONO_ADD_INS (cfg->cbb, ins);
1224 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1226 MONO_INST_NEW (cfg, ins, OP_MOVE);
1227 ins->dreg = mono_alloc_ireg (cfg);
1228 ins->sreg1 = in->dreg + 2;
1229 MONO_ADD_INS (cfg->cbb, ins);
1230 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1231 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1232 #ifndef MONO_ARCH_SOFT_FLOAT
1236 if (ainfo->size == 4) {
1237 #ifdef MONO_ARCH_SOFT_FLOAT
1238 /* mono_emit_call_args () have already done the r8->r4 conversion */
1239 /* The converted value is in an int vreg */
1240 MONO_INST_NEW (cfg, ins, OP_MOVE);
1241 ins->dreg = mono_alloc_ireg (cfg);
1242 ins->sreg1 = in->dreg;
1243 MONO_ADD_INS (cfg->cbb, ins);
1244 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1246 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1247 creg = mono_alloc_ireg (cfg);
1248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1249 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1252 #ifdef MONO_ARCH_SOFT_FLOAT
1253 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1254 ins->dreg = mono_alloc_ireg (cfg);
1255 ins->sreg1 = in->dreg;
1256 MONO_ADD_INS (cfg->cbb, ins);
1257 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1259 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1260 ins->dreg = mono_alloc_ireg (cfg);
1261 ins->sreg1 = in->dreg;
1262 MONO_ADD_INS (cfg->cbb, ins);
1263 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1266 creg = mono_alloc_ireg (cfg);
1267 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1268 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1269 creg = mono_alloc_ireg (cfg);
1270 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1271 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1274 cfg->flags |= MONO_CFG_HAS_FPOUT;
1276 MONO_INST_NEW (cfg, ins, OP_MOVE);
1277 ins->dreg = mono_alloc_ireg (cfg);
1278 ins->sreg1 = in->dreg;
1279 MONO_ADD_INS (cfg->cbb, ins);
1281 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1284 case RegTypeStructByAddr:
1287 /* FIXME: where si the data allocated? */
1288 arg->backend.reg3 = ainfo->reg;
1289 call->used_iregs |= 1 << ainfo->reg;
1290 g_assert_not_reached ();
1293 case RegTypeStructByVal:
1294 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1295 ins->opcode = OP_OUTARG_VT;
1296 ins->sreg1 = in->dreg;
1297 ins->klass = in->klass;
1298 ins->inst_p0 = call;
1299 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1300 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1301 MONO_ADD_INS (cfg->cbb, ins);
1304 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1306 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1307 if (t->type == MONO_TYPE_R8) {
1308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1310 #ifdef MONO_ARCH_SOFT_FLOAT
1311 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1313 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1320 case RegTypeBaseGen:
1321 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1323 MONO_INST_NEW (cfg, ins, OP_MOVE);
1324 ins->dreg = mono_alloc_ireg (cfg);
1325 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1326 MONO_ADD_INS (cfg->cbb, ins);
1327 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1328 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1331 #ifdef MONO_ARCH_SOFT_FLOAT
1332 g_assert_not_reached ();
1335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1336 creg = mono_alloc_ireg (cfg);
1337 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1339 creg = mono_alloc_ireg (cfg);
1340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1342 cfg->flags |= MONO_CFG_HAS_FPOUT;
1344 g_assert_not_reached ();
1351 arg->backend.reg3 = ainfo->reg;
1352 /* FP args are passed in int regs */
1353 call->used_iregs |= 1 << ainfo->reg;
1354 if (ainfo->size == 8) {
1355 arg->opcode = OP_OUTARG_R8;
1356 call->used_iregs |= 1 << (ainfo->reg + 1);
1358 arg->opcode = OP_OUTARG_R4;
1361 cfg->flags |= MONO_CFG_HAS_FPOUT;
1365 g_assert_not_reached ();
1369 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1372 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1373 vtarg->sreg1 = call->vret_var->dreg;
1374 vtarg->dreg = mono_alloc_preg (cfg);
1375 MONO_ADD_INS (cfg->cbb, vtarg);
1377 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1380 call->stack_usage = cinfo->stack_usage;
1386 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1388 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1389 ArgInfo *ainfo = ins->inst_p1;
1390 int ovf_size = ainfo->vtsize;
1391 int doffset = ainfo->offset;
1392 int i, soffset, dreg;
1395 for (i = 0; i < ainfo->size; ++i) {
1396 dreg = mono_alloc_ireg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1398 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1399 soffset += sizeof (gpointer);
1401 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1403 mini_emit_memcpy2 (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1407 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1409 MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
1412 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1415 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1416 ins->sreg1 = val->dreg + 1;
1417 ins->sreg2 = val->dreg + 2;
1418 MONO_ADD_INS (cfg->cbb, ins);
1421 #ifdef MONO_ARCH_SOFT_FLOAT
1422 if (ret->type == MONO_TYPE_R8) {
1425 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1426 ins->dreg = cfg->ret->dreg;
1427 ins->sreg1 = val->dreg;
1428 MONO_ADD_INS (cfg->cbb, ins);
1431 if (ret->type == MONO_TYPE_R4) {
1432 /* Already converted to an int in method_to_ir () */
1433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1444 mono_arch_is_inst_imm (gint64 imm)
1450 * Allow tracing to work with this interface (with an optional argument)
1454 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1458 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1459 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1460 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1461 code = emit_call_reg (code, ARMREG_R2);
1474 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1477 int save_mode = SAVE_NONE;
1479 MonoMethod *method = cfg->method;
1480 int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
1481 int save_offset = cfg->param_area;
1485 offset = code - cfg->native_code;
1486 /* we need about 16 instructions */
1487 if (offset > (cfg->code_size - 16 * 4)) {
1488 cfg->code_size *= 2;
1489 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1490 code = cfg->native_code + offset;
1493 case MONO_TYPE_VOID:
1494 /* special case string .ctor icall */
1495 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1496 save_mode = SAVE_ONE;
1498 save_mode = SAVE_NONE;
1502 save_mode = SAVE_TWO;
1506 save_mode = SAVE_FP;
1508 case MONO_TYPE_VALUETYPE:
1509 save_mode = SAVE_STRUCT;
1512 save_mode = SAVE_ONE;
1516 switch (save_mode) {
1518 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1519 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1520 if (enable_arguments) {
1521 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1522 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1526 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1527 if (enable_arguments) {
1528 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1532 /* FIXME: what reg? */
1533 if (enable_arguments) {
1534 /* FIXME: what reg? */
1538 if (enable_arguments) {
1539 /* FIXME: get the actual address */
1540 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1548 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1549 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1550 code = emit_call_reg (code, ARMREG_IP);
1552 switch (save_mode) {
1554 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1555 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1558 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1572 * The immediate field for cond branches is big enough for all reasonable methods
1574 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1575 if (ins->flags & MONO_INST_BRLABEL) { \
1576 if (0 && ins->inst_i0->inst_c0) { \
1577 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1579 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1580 ARM_B_COND (code, (condcode), 0); \
1583 if (0 && ins->inst_true_bb->native_offset) { \
1584 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1586 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1587 ARM_B_COND (code, (condcode), 0); \
1591 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1593 /* emit an exception if condition is fail
1595 * We assign the extra code used to throw the implicit exceptions
1596 * to cfg->bb_exit as far as the big branch handling is concerned
1598 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1600 mono_add_patch_info (cfg, code - cfg->native_code, \
1601 MONO_PATCH_INFO_EXC, exc_name); \
1602 ARM_BL_COND (code, (condcode), 0); \
1605 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1608 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1613 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1615 MonoInst *ins, *n, *last_ins = NULL;
1617 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1618 switch (ins->opcode) {
1621 /* Already done by an arch-independent pass */
1625 /* remove unnecessary multiplication with 1 */
1626 if (ins->inst_imm == 1) {
1627 if (ins->dreg != ins->sreg1) {
1628 ins->opcode = OP_MOVE;
1630 MONO_DELETE_INS (bb, ins);
1634 int power2 = mono_is_power_of_two (ins->inst_imm);
1636 ins->opcode = OP_SHL_IMM;
1637 ins->inst_imm = power2;
1641 case OP_LOAD_MEMBASE:
1642 case OP_LOADI4_MEMBASE:
1644 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1645 * OP_LOAD_MEMBASE offset(basereg), reg
1647 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1648 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1649 ins->inst_basereg == last_ins->inst_destbasereg &&
1650 ins->inst_offset == last_ins->inst_offset) {
1651 if (ins->dreg == last_ins->sreg1) {
1652 MONO_DELETE_INS (bb, ins);
1655 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1656 ins->opcode = OP_MOVE;
1657 ins->sreg1 = last_ins->sreg1;
1661 * Note: reg1 must be different from the basereg in the second load
1662 * OP_LOAD_MEMBASE offset(basereg), reg1
1663 * OP_LOAD_MEMBASE offset(basereg), reg2
1665 * OP_LOAD_MEMBASE offset(basereg), reg1
1666 * OP_MOVE reg1, reg2
1668 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1669 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1670 ins->inst_basereg != last_ins->dreg &&
1671 ins->inst_basereg == last_ins->inst_basereg &&
1672 ins->inst_offset == last_ins->inst_offset) {
1674 if (ins->dreg == last_ins->dreg) {
1675 MONO_DELETE_INS (bb, ins);
1678 ins->opcode = OP_MOVE;
1679 ins->sreg1 = last_ins->dreg;
1682 //g_assert_not_reached ();
1686 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1687 * OP_LOAD_MEMBASE offset(basereg), reg
1689 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1690 * OP_ICONST reg, imm
1692 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1693 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1694 ins->inst_basereg == last_ins->inst_destbasereg &&
1695 ins->inst_offset == last_ins->inst_offset) {
1696 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1697 ins->opcode = OP_ICONST;
1698 ins->inst_c0 = last_ins->inst_imm;
1699 g_assert_not_reached (); // check this rule
1703 case OP_LOADU1_MEMBASE:
1704 case OP_LOADI1_MEMBASE:
1705 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1706 ins->inst_basereg == last_ins->inst_destbasereg &&
1707 ins->inst_offset == last_ins->inst_offset) {
1708 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1709 ins->sreg1 = last_ins->sreg1;
1712 case OP_LOADU2_MEMBASE:
1713 case OP_LOADI2_MEMBASE:
1714 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1715 ins->inst_basereg == last_ins->inst_destbasereg &&
1716 ins->inst_offset == last_ins->inst_offset) {
1717 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1718 ins->sreg1 = last_ins->sreg1;
1722 ins->opcode = OP_MOVE;
1726 if (ins->dreg == ins->sreg1) {
1727 MONO_DELETE_INS (bb, ins);
1731 * OP_MOVE sreg, dreg
1732 * OP_MOVE dreg, sreg
1734 if (last_ins && last_ins->opcode == OP_MOVE &&
1735 ins->sreg1 == last_ins->dreg &&
1736 ins->dreg == last_ins->sreg1) {
1737 MONO_DELETE_INS (bb, ins);
1745 bb->last_ins = last_ins;
1749 * the branch_cc_table should maintain the order of these
1763 branch_cc_table [] = {
1777 #define NEW_INS(cfg,dest,op) do { \
1778 MONO_INST_NEW ((cfg), (dest), (op)); \
1779 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1783 map_to_reg_reg_op (int op)
1792 case OP_COMPARE_IMM:
1794 case OP_ICOMPARE_IMM:
1808 case OP_LOAD_MEMBASE:
1809 return OP_LOAD_MEMINDEX;
1810 case OP_LOADI4_MEMBASE:
1811 return OP_LOADI4_MEMINDEX;
1812 case OP_LOADU4_MEMBASE:
1813 return OP_LOADU4_MEMINDEX;
1814 case OP_LOADU1_MEMBASE:
1815 return OP_LOADU1_MEMINDEX;
1816 case OP_LOADI2_MEMBASE:
1817 return OP_LOADI2_MEMINDEX;
1818 case OP_LOADU2_MEMBASE:
1819 return OP_LOADU2_MEMINDEX;
1820 case OP_LOADI1_MEMBASE:
1821 return OP_LOADI1_MEMINDEX;
1822 case OP_STOREI1_MEMBASE_REG:
1823 return OP_STOREI1_MEMINDEX;
1824 case OP_STOREI2_MEMBASE_REG:
1825 return OP_STOREI2_MEMINDEX;
1826 case OP_STOREI4_MEMBASE_REG:
1827 return OP_STOREI4_MEMINDEX;
1828 case OP_STORE_MEMBASE_REG:
1829 return OP_STORE_MEMINDEX;
1830 case OP_STORER4_MEMBASE_REG:
1831 return OP_STORER4_MEMINDEX;
1832 case OP_STORER8_MEMBASE_REG:
1833 return OP_STORER8_MEMINDEX;
1834 case OP_STORE_MEMBASE_IMM:
1835 return OP_STORE_MEMBASE_REG;
1836 case OP_STOREI1_MEMBASE_IMM:
1837 return OP_STOREI1_MEMBASE_REG;
1838 case OP_STOREI2_MEMBASE_IMM:
1839 return OP_STOREI2_MEMBASE_REG;
1840 case OP_STOREI4_MEMBASE_IMM:
1841 return OP_STOREI4_MEMBASE_REG;
1843 g_assert_not_reached ();
1847 * Remove from the instruction list the instructions that can't be
1848 * represented with very simple instructions with no register
1852 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1854 MonoInst *ins, *temp, *last_ins = NULL;
1855 int rot_amount, imm8, low_imm;
1857 /* setup the virtual reg allocator */
1858 if (bb->max_vreg > cfg->rs->next_vreg)
1859 cfg->rs->next_vreg = bb->max_vreg;
1861 MONO_BB_FOR_EACH_INS (bb, ins) {
1863 switch (ins->opcode) {
1867 case OP_COMPARE_IMM:
1868 case OP_ICOMPARE_IMM:
1882 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1883 NEW_INS (cfg, temp, OP_ICONST);
1884 temp->inst_c0 = ins->inst_imm;
1885 temp->dreg = mono_regstate_next_int (cfg->rs);
1886 ins->sreg2 = temp->dreg;
1888 ins->opcode = mono_op_imm_to_op (ins->opcode);
1890 ins->opcode = map_to_reg_reg_op (ins->opcode);
1895 if (ins->inst_imm == 1) {
1896 ins->opcode = OP_MOVE;
1899 if (ins->inst_imm == 0) {
1900 ins->opcode = OP_ICONST;
1904 imm8 = mono_is_power_of_two (ins->inst_imm);
1906 ins->opcode = OP_SHL_IMM;
1907 ins->inst_imm = imm8;
1910 NEW_INS (cfg, temp, OP_ICONST);
1911 temp->inst_c0 = ins->inst_imm;
1912 temp->dreg = mono_regstate_next_int (cfg->rs);
1913 ins->sreg2 = temp->dreg;
1914 ins->opcode = OP_IMUL;
1916 case OP_LOCALLOC_IMM:
1917 NEW_INS (cfg, temp, OP_ICONST);
1918 temp->inst_c0 = ins->inst_imm;
1919 temp->dreg = mono_regstate_next_int (cfg->rs);
1920 ins->sreg1 = temp->dreg;
1921 ins->opcode = OP_LOCALLOC;
1923 case OP_LOAD_MEMBASE:
1924 case OP_LOADI4_MEMBASE:
1925 case OP_LOADU4_MEMBASE:
1926 case OP_LOADU1_MEMBASE:
1927 /* we can do two things: load the immed in a register
1928 * and use an indexed load, or see if the immed can be
1929 * represented as an ad_imm + a load with a smaller offset
1930 * that fits. We just do the first for now, optimize later.
1932 if (arm_is_imm12 (ins->inst_offset))
1934 NEW_INS (cfg, temp, OP_ICONST);
1935 temp->inst_c0 = ins->inst_offset;
1936 temp->dreg = mono_regstate_next_int (cfg->rs);
1937 ins->sreg2 = temp->dreg;
1938 ins->opcode = map_to_reg_reg_op (ins->opcode);
1940 case OP_LOADI2_MEMBASE:
1941 case OP_LOADU2_MEMBASE:
1942 case OP_LOADI1_MEMBASE:
1943 if (arm_is_imm8 (ins->inst_offset))
1945 NEW_INS (cfg, temp, OP_ICONST);
1946 temp->inst_c0 = ins->inst_offset;
1947 temp->dreg = mono_regstate_next_int (cfg->rs);
1948 ins->sreg2 = temp->dreg;
1949 ins->opcode = map_to_reg_reg_op (ins->opcode);
1951 case OP_LOADR4_MEMBASE:
1952 case OP_LOADR8_MEMBASE:
1953 if (arm_is_fpimm8 (ins->inst_offset))
1955 low_imm = ins->inst_offset & 0x1ff;
1956 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1957 NEW_INS (cfg, temp, OP_ADD_IMM);
1958 temp->inst_imm = ins->inst_offset & ~0x1ff;
1959 temp->sreg1 = ins->inst_basereg;
1960 temp->dreg = mono_regstate_next_int (cfg->rs);
1961 ins->inst_basereg = temp->dreg;
1962 ins->inst_offset = low_imm;
1965 /* VFP/FPA doesn't have indexed load instructions */
1966 g_assert_not_reached ();
1968 case OP_STORE_MEMBASE_REG:
1969 case OP_STOREI4_MEMBASE_REG:
1970 case OP_STOREI1_MEMBASE_REG:
1971 if (arm_is_imm12 (ins->inst_offset))
1973 NEW_INS (cfg, temp, OP_ICONST);
1974 temp->inst_c0 = ins->inst_offset;
1975 temp->dreg = mono_regstate_next_int (cfg->rs);
1976 ins->sreg2 = temp->dreg;
1977 ins->opcode = map_to_reg_reg_op (ins->opcode);
1979 case OP_STOREI2_MEMBASE_REG:
1980 if (arm_is_imm8 (ins->inst_offset))
1982 NEW_INS (cfg, temp, OP_ICONST);
1983 temp->inst_c0 = ins->inst_offset;
1984 temp->dreg = mono_regstate_next_int (cfg->rs);
1985 ins->sreg2 = temp->dreg;
1986 ins->opcode = map_to_reg_reg_op (ins->opcode);
1988 case OP_STORER4_MEMBASE_REG:
1989 case OP_STORER8_MEMBASE_REG:
1990 if (arm_is_fpimm8 (ins->inst_offset))
1992 low_imm = ins->inst_offset & 0x1ff;
1993 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1994 NEW_INS (cfg, temp, OP_ADD_IMM);
1995 temp->inst_imm = ins->inst_offset & ~0x1ff;
1996 temp->sreg1 = ins->inst_destbasereg;
1997 temp->dreg = mono_regstate_next_int (cfg->rs);
1998 ins->inst_destbasereg = temp->dreg;
1999 ins->inst_offset = low_imm;
2002 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2003 /* VFP/FPA doesn't have indexed store instructions */
2004 g_assert_not_reached ();
2006 case OP_STORE_MEMBASE_IMM:
2007 case OP_STOREI1_MEMBASE_IMM:
2008 case OP_STOREI2_MEMBASE_IMM:
2009 case OP_STOREI4_MEMBASE_IMM:
2010 NEW_INS (cfg, temp, OP_ICONST);
2011 temp->inst_c0 = ins->inst_imm;
2012 temp->dreg = mono_regstate_next_int (cfg->rs);
2013 ins->sreg1 = temp->dreg;
2014 ins->opcode = map_to_reg_reg_op (ins->opcode);
2016 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2018 gboolean swap = FALSE;
2021 /* Some fp compares require swapped operands */
2022 g_assert (ins->next);
2023 switch (ins->next->opcode) {
2025 ins->next->opcode = OP_FBLT;
2029 ins->next->opcode = OP_FBLT_UN;
2033 ins->next->opcode = OP_FBGE;
2037 ins->next->opcode = OP_FBGE_UN;
2045 ins->sreg1 = ins->sreg2;
2054 bb->last_ins = last_ins;
2055 bb->max_vreg = cfg->rs->next_vreg;
2060 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2062 /* sreg is a float, dreg is an integer reg */
2064 ARM_FIXZ (code, dreg, sreg);
2065 #elif defined(ARM_FPU_VFP)
2067 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2069 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2070 ARM_FMRS (code, dreg, ARM_VFP_F0);
2074 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2075 else if (size == 2) {
2076 ARM_SHL_IMM (code, dreg, dreg, 16);
2077 ARM_SHR_IMM (code, dreg, dreg, 16);
2081 ARM_SHL_IMM (code, dreg, dreg, 24);
2082 ARM_SAR_IMM (code, dreg, dreg, 24);
2083 } else if (size == 2) {
2084 ARM_SHL_IMM (code, dreg, dreg, 16);
2085 ARM_SAR_IMM (code, dreg, dreg, 16);
2093 const guchar *target;
2098 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2101 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2102 PatchData *pdata = (PatchData*)user_data;
2103 guchar *code = data;
2104 guint32 *thunks = data;
2105 guint32 *endthunks = (guint32*)(code + bsize);
2107 int difflow, diffhigh;
2109 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2110 difflow = (char*)pdata->code - (char*)thunks;
2111 diffhigh = (char*)pdata->code - (char*)endthunks;
2112 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2116 * The thunk is composed of 3 words:
2117 * load constant from thunks [2] into ARM_IP
2120 * Note that the LR register is already setup
2122 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2123 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2124 while (thunks < endthunks) {
2125 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2126 if (thunks [2] == (guint32)pdata->target) {
2127 arm_patch (pdata->code, (guchar*)thunks);
2128 mono_arch_flush_icache (pdata->code, 4);
2131 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2132 /* found a free slot instead: emit thunk */
2133 /* ARMREG_IP is fine to use since this can't be an IMT call
2136 code = (guchar*)thunks;
2137 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2138 if (thumb_supported)
2139 ARM_BX (code, ARMREG_IP);
2141 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2142 thunks [2] = (guint32)pdata->target;
2143 mono_arch_flush_icache ((guchar*)thunks, 12);
2145 arm_patch (pdata->code, (guchar*)thunks);
2146 mono_arch_flush_icache (pdata->code, 4);
2150 /* skip 12 bytes, the size of the thunk */
2154 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2160 handle_thunk (int absolute, guchar *code, const guchar *target) {
2161 MonoDomain *domain = mono_domain_get ();
2165 pdata.target = target;
2166 pdata.absolute = absolute;
2169 mono_domain_lock (domain);
2170 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2173 /* this uses the first available slot */
2175 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2177 mono_domain_unlock (domain);
2179 if (pdata.found != 1)
2180 g_print ("thunk failed for %p from %p\n", target, code);
2181 g_assert (pdata.found == 1);
2185 arm_patch (guchar *code, const guchar *target)
2187 guint32 *code32 = (void*)code;
2188 guint32 ins = *code32;
2189 guint32 prim = (ins >> 25) & 7;
2190 guint32 tval = GPOINTER_TO_UINT (target);
2192 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2193 if (prim == 5) { /* 101b */
2194 /* the diff starts 8 bytes from the branch opcode */
2195 gint diff = target - code - 8;
2197 gint tmask = 0xffffffff;
2198 if (tval & 1) { /* entering thumb mode */
2199 diff = target - 1 - code - 8;
2200 g_assert (thumb_supported);
2201 tbits = 0xf << 28; /* bl->blx bit pattern */
2202 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2203 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2207 tmask = ~(1 << 24); /* clear the link bit */
2208 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2213 if (diff <= 33554431) {
2215 ins = (ins & 0xff000000) | diff;
2217 *code32 = ins | tbits;
2221 /* diff between 0 and -33554432 */
2222 if (diff >= -33554432) {
2224 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2226 *code32 = ins | tbits;
2231 handle_thunk (TRUE, code, target);
2236 * The alternative call sequences looks like this:
2238 * ldr ip, [pc] // loads the address constant
2239 * b 1f // jumps around the constant
2240 * address constant embedded in the code
2245 * There are two cases for patching:
2246 * a) at the end of method emission: in this case code points to the start
2247 * of the call sequence
2248 * b) during runtime patching of the call site: in this case code points
2249 * to the mov pc, ip instruction
2251 * We have to handle also the thunk jump code sequence:
2255 * address constant // execution never reaches here
2257 if ((ins & 0x0ffffff0) == 0x12fff10) {
2258 /* Branch and exchange: the address is constructed in a reg
2259 * We can patch BX when the code sequence is the following:
2260 * ldr ip, [pc, #0] ; 0x8
2267 guint8 *emit = (guint8*)ccode;
2268 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2270 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2271 ARM_BX (emit, ARMREG_IP);
2273 /*patching from magic trampoline*/
2274 if (ins == ccode [3]) {
2275 g_assert (code32 [-4] == ccode [0]);
2276 g_assert (code32 [-3] == ccode [1]);
2277 g_assert (code32 [-1] == ccode [2]);
2278 code32 [-2] = (guint32)target;
2281 /*patching from JIT*/
2282 if (ins == ccode [0]) {
2283 g_assert (code32 [1] == ccode [1]);
2284 g_assert (code32 [3] == ccode [2]);
2285 g_assert (code32 [4] == ccode [3]);
2286 code32 [2] = (guint32)target;
2289 g_assert_not_reached ();
2290 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2298 guint8 *emit = (guint8*)ccode;
2299 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2301 ARM_BLX_REG (emit, ARMREG_IP);
2303 g_assert (code32 [-3] == ccode [0]);
2304 g_assert (code32 [-2] == ccode [1]);
2305 g_assert (code32 [0] == ccode [2]);
2307 code32 [-1] = (guint32)target;
2310 guint32 *tmp = ccode;
2311 guint8 *emit = (guint8*)tmp;
2312 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2313 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2314 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2315 ARM_BX (emit, ARMREG_IP);
2316 if (ins == ccode [2]) {
2317 g_assert_not_reached (); // should be -2 ...
2318 code32 [-1] = (guint32)target;
2321 if (ins == ccode [0]) {
2322 /* handles both thunk jump code and the far call sequence */
2323 code32 [2] = (guint32)target;
2326 g_assert_not_reached ();
2328 // g_print ("patched with 0x%08x\n", ins);
2332 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2333 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2334 * to be used with the emit macros.
2335 * Return -1 otherwise.
2338 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2341 for (i = 0; i < 31; i+= 2) {
2342 res = (val << (32 - i)) | (val >> i);
2345 *rot_amount = i? 32 - i: 0;
2352 * Emits in code a sequence of instructions that load the value 'val'
2353 * into the dreg register. Uses at most 4 instructions.
2356 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2358 int imm8, rot_amount;
2360 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2361 /* skip the constant pool */
2367 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2368 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2369 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2370 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2373 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2375 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2377 if (val & 0xFF0000) {
2378 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2380 if (val & 0xFF000000) {
2381 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2383 } else if (val & 0xFF00) {
2384 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2385 if (val & 0xFF0000) {
2386 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2388 if (val & 0xFF000000) {
2389 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2391 } else if (val & 0xFF0000) {
2392 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2393 if (val & 0xFF000000) {
2394 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2397 //g_assert_not_reached ();
2403 * emit_load_volatile_arguments:
2405 * Load volatile arguments from the stack to the original input registers.
2406 * Required before a tail call.
2409 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2411 MonoMethod *method = cfg->method;
2412 MonoMethodSignature *sig;
2417 /* FIXME: Generate intermediate code instead */
2419 sig = mono_method_signature (method);
2421 /* This is the opposite of the code in emit_prolog */
2425 cinfo = calculate_sizes (sig, sig->pinvoke);
2427 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2428 ArgInfo *ainfo = &cinfo->ret;
2429 inst = cfg->vret_addr;
2430 g_assert (arm_is_imm12 (inst->inst_offset));
2431 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2434 ArgInfo *ainfo = cinfo->args + i;
2435 inst = cfg->args [pos];
2437 if (cfg->verbose_level > 2)
2438 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2439 if (inst->opcode == OP_REGVAR) {
2440 if (ainfo->regtype == RegTypeGeneral)
2441 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2442 else if (ainfo->regtype == RegTypeFP) {
2443 g_assert_not_reached ();
2444 } else if (ainfo->regtype == RegTypeBase) {
2448 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2449 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2451 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2452 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2456 g_assert_not_reached ();
2458 if (ainfo->regtype == RegTypeGeneral) {
2459 switch (ainfo->size) {
2466 g_assert (arm_is_imm12 (inst->inst_offset));
2467 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2468 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2469 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2472 if (arm_is_imm12 (inst->inst_offset)) {
2473 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2475 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2476 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2480 } else if (ainfo->regtype == RegTypeBaseGen) {
2483 } else if (ainfo->regtype == RegTypeBase) {
2486 } else if (ainfo->regtype == RegTypeFP) {
2487 g_assert_not_reached ();
2488 } else if (ainfo->regtype == RegTypeStructByVal) {
2489 int doffset = inst->inst_offset;
2493 if (mono_class_from_mono_type (inst->inst_vtype))
2494 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2495 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2496 if (arm_is_imm12 (doffset)) {
2497 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2499 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2500 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2502 soffset += sizeof (gpointer);
2503 doffset += sizeof (gpointer);
2508 } else if (ainfo->regtype == RegTypeStructByAddr) {
2523 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2528 guint8 *code = cfg->native_code + cfg->code_len;
2529 MonoInst *last_ins = NULL;
2530 guint last_offset = 0;
2532 int imm8, rot_amount;
2534 /* we don't align basic blocks of loops on arm */
2536 if (cfg->verbose_level > 2)
2537 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2539 cpos = bb->max_offset;
2541 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2542 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2543 //g_assert (!mono_compile_aot);
2546 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2547 /* this is not thread save, but good enough */
2548 /* fixme: howto handle overflows? */
2549 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2552 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2553 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2554 (gpointer)"mono_break");
2555 code = emit_call_seq (cfg, code);
2558 MONO_BB_FOR_EACH_INS (bb, ins) {
2559 offset = code - cfg->native_code;
2561 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2563 if (offset > (cfg->code_size - max_len - 16)) {
2564 cfg->code_size *= 2;
2565 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2566 code = cfg->native_code + offset;
2568 // if (ins->cil_code)
2569 // g_print ("cil code\n");
2570 mono_debug_record_line_number (cfg, ins, offset);
2572 switch (ins->opcode) {
2573 case OP_MEMORY_BARRIER:
2576 g_assert_not_reached ();
2579 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2580 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2583 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2584 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2586 case OP_STOREI1_MEMBASE_IMM:
2587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2588 g_assert (arm_is_imm12 (ins->inst_offset));
2589 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2591 case OP_STOREI2_MEMBASE_IMM:
2592 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2593 g_assert (arm_is_imm8 (ins->inst_offset));
2594 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2596 case OP_STORE_MEMBASE_IMM:
2597 case OP_STOREI4_MEMBASE_IMM:
2598 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2599 g_assert (arm_is_imm12 (ins->inst_offset));
2600 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2602 case OP_STOREI1_MEMBASE_REG:
2603 g_assert (arm_is_imm12 (ins->inst_offset));
2604 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2606 case OP_STOREI2_MEMBASE_REG:
2607 g_assert (arm_is_imm8 (ins->inst_offset));
2608 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2610 case OP_STORE_MEMBASE_REG:
2611 case OP_STOREI4_MEMBASE_REG:
2612 /* this case is special, since it happens for spill code after lowering has been called */
2613 if (arm_is_imm12 (ins->inst_offset)) {
2614 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2616 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2617 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2620 case OP_STOREI1_MEMINDEX:
2621 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2623 case OP_STOREI2_MEMINDEX:
2624 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2626 case OP_STORE_MEMINDEX:
2627 case OP_STOREI4_MEMINDEX:
2628 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2631 g_assert_not_reached ();
2633 case OP_LOAD_MEMINDEX:
2634 case OP_LOADI4_MEMINDEX:
2635 case OP_LOADU4_MEMINDEX:
2636 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2638 case OP_LOADI1_MEMINDEX:
2639 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2641 case OP_LOADU1_MEMINDEX:
2642 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2644 case OP_LOADI2_MEMINDEX:
2645 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2647 case OP_LOADU2_MEMINDEX:
2648 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2650 case OP_LOAD_MEMBASE:
2651 case OP_LOADI4_MEMBASE:
2652 case OP_LOADU4_MEMBASE:
2653 /* this case is special, since it happens for spill code after lowering has been called */
2654 if (arm_is_imm12 (ins->inst_offset)) {
2655 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2657 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2658 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2661 case OP_LOADI1_MEMBASE:
2662 g_assert (arm_is_imm8 (ins->inst_offset));
2663 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2665 case OP_LOADU1_MEMBASE:
2666 g_assert (arm_is_imm12 (ins->inst_offset));
2667 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2669 case OP_LOADU2_MEMBASE:
2670 g_assert (arm_is_imm8 (ins->inst_offset));
2671 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2673 case OP_LOADI2_MEMBASE:
2674 g_assert (arm_is_imm8 (ins->inst_offset));
2675 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2677 case OP_ICONV_TO_I1:
2678 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2679 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2681 case OP_ICONV_TO_I2:
2682 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2683 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2685 case OP_ICONV_TO_U1:
2686 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2688 case OP_ICONV_TO_U2:
2689 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2690 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2694 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2696 case OP_COMPARE_IMM:
2697 case OP_ICOMPARE_IMM:
2698 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2699 g_assert (imm8 >= 0);
2700 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2704 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2705 * So instead of emitting a trap, we emit a call a C function and place a
2708 //*(int*)code = 0xef9f0001;
2711 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2712 (gpointer)"mono_break");
2713 code = emit_call_seq (cfg, code);
2717 case OP_DUMMY_STORE:
2718 case OP_NOT_REACHED:
2723 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2726 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2730 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2733 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2734 g_assert (imm8 >= 0);
2735 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2739 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2740 g_assert (imm8 >= 0);
2741 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2745 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2746 g_assert (imm8 >= 0);
2747 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2750 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2751 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2753 case OP_IADD_OVF_UN:
2754 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2755 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2758 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2759 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2761 case OP_ISUB_OVF_UN:
2762 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2763 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2765 case OP_ADD_OVF_CARRY:
2766 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2767 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2769 case OP_ADD_OVF_UN_CARRY:
2770 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2771 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2773 case OP_SUB_OVF_CARRY:
2774 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2775 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2777 case OP_SUB_OVF_UN_CARRY:
2778 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2779 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2783 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2786 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2787 g_assert (imm8 >= 0);
2788 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2791 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2795 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2799 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2800 g_assert (imm8 >= 0);
2801 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2805 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2806 g_assert (imm8 >= 0);
2807 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2809 case OP_ARM_RSBS_IMM:
2810 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2811 g_assert (imm8 >= 0);
2812 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2814 case OP_ARM_RSC_IMM:
2815 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2816 g_assert (imm8 >= 0);
2817 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2820 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2824 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2825 g_assert (imm8 >= 0);
2826 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2834 /* crappy ARM arch doesn't have a DIV instruction */
2835 g_assert_not_reached ();
2837 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2841 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2842 g_assert (imm8 >= 0);
2843 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2846 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2850 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2851 g_assert (imm8 >= 0);
2852 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2855 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2860 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2861 else if (ins->dreg != ins->sreg1)
2862 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2865 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2870 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2871 else if (ins->dreg != ins->sreg1)
2872 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2875 case OP_ISHR_UN_IMM:
2877 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2878 else if (ins->dreg != ins->sreg1)
2879 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2882 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2885 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2888 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2891 if (ins->dreg == ins->sreg2)
2892 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2894 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2897 g_assert_not_reached ();
2900 /* FIXME: handle ovf/ sreg2 != dreg */
2901 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2903 case OP_IMUL_OVF_UN:
2904 /* FIXME: handle ovf/ sreg2 != dreg */
2905 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2908 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2911 /* Load the GOT offset */
2912 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2913 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2915 *(gpointer*)code = NULL;
2917 /* Load the value from the GOT */
2918 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2920 case OP_ICONV_TO_I4:
2921 case OP_ICONV_TO_U4:
2923 if (ins->dreg != ins->sreg1)
2924 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2927 int saved = ins->sreg2;
2928 if (ins->sreg2 == ARM_LSW_REG) {
2929 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2932 if (ins->sreg1 != ARM_LSW_REG)
2933 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2934 if (saved != ARM_MSW_REG)
2935 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2940 ARM_MVFD (code, ins->dreg, ins->sreg1);
2941 #elif defined(ARM_FPU_VFP)
2942 ARM_CPYD (code, ins->dreg, ins->sreg1);
2945 case OP_FCONV_TO_R4:
2947 ARM_MVFS (code, ins->dreg, ins->sreg1);
2948 #elif defined(ARM_FPU_VFP)
2949 ARM_CVTD (code, ins->dreg, ins->sreg1);
2950 ARM_CVTS (code, ins->dreg, ins->dreg);
2955 * Keep in sync with mono_arch_emit_epilog
2957 g_assert (!cfg->method->save_lmf);
2959 code = emit_load_volatile_arguments (cfg, code);
2961 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2962 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2963 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2967 /* ensure ins->sreg1 is not NULL */
2968 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2972 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2973 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2975 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2976 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2978 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2988 call = (MonoCallInst*)ins;
2989 if (ins->flags & MONO_INST_HAS_METHOD)
2990 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2992 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2993 code = emit_call_seq (cfg, code);
2994 code = emit_move_return_value (cfg, ins, code);
3000 case OP_VOIDCALL_REG:
3002 code = emit_call_reg (code, ins->sreg1);
3003 code = emit_move_return_value (cfg, ins, code);
3005 case OP_FCALL_MEMBASE:
3006 case OP_LCALL_MEMBASE:
3007 case OP_VCALL_MEMBASE:
3008 case OP_VCALL2_MEMBASE:
3009 case OP_VOIDCALL_MEMBASE:
3010 case OP_CALL_MEMBASE:
3011 g_assert (arm_is_imm12 (ins->inst_offset));
3012 g_assert (ins->sreg1 != ARMREG_LR);
3013 call = (MonoCallInst*)ins;
3014 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3015 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3016 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3017 if (cfg->compile_aot) {
3019 * We can't embed the method in the code stream in PIC code. Instead,
3020 * we put it in V5 in code emitted by mono_arch_emit_imt_argument (),
3021 * and embed NULL here to signal the IMT thunk that the call is made
3024 *((gpointer*)code) = NULL;
3026 *((gpointer*)code) = (gpointer)call->method;
3030 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3031 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3033 code = emit_move_return_value (cfg, ins, code);
3036 g_assert_not_reached ();
3039 /* keep alignment */
3040 int alloca_waste = cfg->param_area;
3043 /* round the size to 8 bytes */
3044 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3045 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3047 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3048 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3049 /* memzero the area: dreg holds the size, sp is the pointer */
3050 if (ins->flags & MONO_INST_INIT) {
3051 guint8 *start_loop, *branch_to_cond;
3052 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3053 branch_to_cond = code;
3056 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3057 arm_patch (branch_to_cond, code);
3058 /* decrement by 4 and set flags */
3059 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3060 ARM_B_COND (code, ARMCOND_GE, 0);
3061 arm_patch (code - 4, start_loop);
3063 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3067 if (ins->sreg1 != ARMREG_R0)
3068 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3069 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3070 (gpointer)"mono_arch_throw_exception");
3071 code = emit_call_seq (cfg, code);
3075 if (ins->sreg1 != ARMREG_R0)
3076 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3077 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3078 (gpointer)"mono_arch_rethrow_exception");
3079 code = emit_call_seq (cfg, code);
3082 case OP_START_HANDLER: {
3083 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3085 if (arm_is_imm12 (spvar->inst_offset)) {
3086 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3088 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3089 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3093 case OP_ENDFILTER: {
3094 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3096 if (ins->sreg1 != ARMREG_R0)
3097 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3098 if (arm_is_imm12 (spvar->inst_offset)) {
3099 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3101 g_assert (ARMREG_IP != spvar->inst_basereg);
3102 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3103 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3105 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3108 case OP_ENDFINALLY: {
3109 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3111 if (arm_is_imm12 (spvar->inst_offset)) {
3112 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3114 g_assert (ARMREG_IP != spvar->inst_basereg);
3115 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3116 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3118 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3121 case OP_CALL_HANDLER:
3122 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3126 ins->inst_c0 = code - cfg->native_code;
3129 if (ins->flags & MONO_INST_BRLABEL) {
3130 /*if (ins->inst_i0->inst_c0) {
3132 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3134 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3138 /*if (ins->inst_target_bb->native_offset) {
3140 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3142 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3148 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3152 * In the normal case we have:
3153 * ldr pc, [pc, ins->sreg1 << 2]
3156 * ldr lr, [pc, ins->sreg1 << 2]
3158 * After follows the data.
3159 * FIXME: add aot support.
3162 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3163 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3164 if (offset > (cfg->code_size - max_len - 16)) {
3165 cfg->code_size += max_len;
3166 cfg->code_size *= 2;
3167 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3168 code = cfg->native_code + offset;
3170 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3172 code += 4 * GPOINTER_TO_INT (ins->klass);
3176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3181 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3186 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3191 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3192 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3196 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3199 case OP_COND_EXC_EQ:
3200 case OP_COND_EXC_NE_UN:
3201 case OP_COND_EXC_LT:
3202 case OP_COND_EXC_LT_UN:
3203 case OP_COND_EXC_GT:
3204 case OP_COND_EXC_GT_UN:
3205 case OP_COND_EXC_GE:
3206 case OP_COND_EXC_GE_UN:
3207 case OP_COND_EXC_LE:
3208 case OP_COND_EXC_LE_UN:
3209 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3211 case OP_COND_EXC_IEQ:
3212 case OP_COND_EXC_INE_UN:
3213 case OP_COND_EXC_ILT:
3214 case OP_COND_EXC_ILT_UN:
3215 case OP_COND_EXC_IGT:
3216 case OP_COND_EXC_IGT_UN:
3217 case OP_COND_EXC_IGE:
3218 case OP_COND_EXC_IGE_UN:
3219 case OP_COND_EXC_ILE:
3220 case OP_COND_EXC_ILE_UN:
3221 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3224 case OP_COND_EXC_OV:
3225 case OP_COND_EXC_NC:
3226 case OP_COND_EXC_NO:
3227 case OP_COND_EXC_IC:
3228 case OP_COND_EXC_IOV:
3229 case OP_COND_EXC_INC:
3230 case OP_COND_EXC_INO:
3243 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3246 /* floating point opcodes */
3249 if (cfg->compile_aot) {
3250 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3252 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3254 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3257 /* FIXME: we can optimize the imm load by dealing with part of
3258 * the displacement in LDFD (aligning to 512).
3260 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3261 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3265 if (cfg->compile_aot) {
3266 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3268 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3271 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3272 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3275 case OP_STORER8_MEMBASE_REG:
3276 /* This is generated by the local regalloc pass which runs after the lowering pass */
3277 if (!arm_is_fpimm8 (ins->inst_offset)) {
3278 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3279 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3281 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3284 case OP_LOADR8_MEMBASE:
3285 /* This is generated by the local regalloc pass which runs after the lowering pass */
3286 if (!arm_is_fpimm8 (ins->inst_offset)) {
3287 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3288 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3290 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3293 case OP_STORER4_MEMBASE_REG:
3294 g_assert (arm_is_fpimm8 (ins->inst_offset));
3295 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3297 case OP_LOADR4_MEMBASE:
3298 g_assert (arm_is_fpimm8 (ins->inst_offset));
3299 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3301 case OP_ICONV_TO_R_UN: {
3303 tmpreg = ins->dreg == 0? 1: 0;
3304 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3305 ARM_FLTD (code, ins->dreg, ins->sreg1);
3306 ARM_B_COND (code, ARMCOND_GE, 8);
3307 /* save the temp register */
3308 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3309 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3310 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3311 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3312 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3313 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3314 /* skip the constant pool */
3317 *(int*)code = 0x41f00000;
3322 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3323 * adfltd fdest, fdest, ftemp
3327 case OP_ICONV_TO_R4:
3328 ARM_FLTS (code, ins->dreg, ins->sreg1);
3330 case OP_ICONV_TO_R8:
3331 ARM_FLTD (code, ins->dreg, ins->sreg1);
3333 #elif defined(ARM_FPU_VFP)
3335 if (cfg->compile_aot) {
3336 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3338 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3340 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3343 /* FIXME: we can optimize the imm load by dealing with part of
3344 * the displacement in LDFD (aligning to 512).
3346 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3347 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3351 if (cfg->compile_aot) {
3352 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3354 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3356 ARM_CVTS (code, ins->dreg, ins->dreg);
3358 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3359 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3360 ARM_CVTS (code, ins->dreg, ins->dreg);
3363 case OP_STORER8_MEMBASE_REG:
3364 g_assert (arm_is_fpimm8 (ins->inst_offset));
3365 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3367 case OP_LOADR8_MEMBASE:
3368 g_assert (arm_is_fpimm8 (ins->inst_offset));
3369 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3371 case OP_STORER4_MEMBASE_REG:
3372 g_assert (arm_is_fpimm8 (ins->inst_offset));
3373 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3375 case OP_LOADR4_MEMBASE:
3376 g_assert (arm_is_fpimm8 (ins->inst_offset));
3377 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3379 case OP_ICONV_TO_R_UN: {
3380 g_assert_not_reached ();
3383 case OP_ICONV_TO_R4:
3384 g_assert_not_reached ();
3385 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3387 case OP_ICONV_TO_R8:
3388 g_assert_not_reached ();
3389 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3392 case OP_FCONV_TO_I1:
3393 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3395 case OP_FCONV_TO_U1:
3396 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3398 case OP_FCONV_TO_I2:
3399 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3401 case OP_FCONV_TO_U2:
3402 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3404 case OP_FCONV_TO_I4:
3406 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3408 case OP_FCONV_TO_U4:
3410 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3412 case OP_FCONV_TO_I8:
3413 case OP_FCONV_TO_U8:
3414 g_assert_not_reached ();
3415 /* Implemented as helper calls */
3417 case OP_LCONV_TO_R_UN:
3418 g_assert_not_reached ();
3419 /* Implemented as helper calls */
3421 case OP_LCONV_TO_OVF_I:
3422 case OP_LCONV_TO_OVF_I4_2: {
3423 guint32 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3425 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3428 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3429 high_bit_not_set = code;
3430 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3432 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3433 valid_negative = code;
3434 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3435 invalid_negative = code;
3436 ARM_B_COND (code, ARMCOND_AL, 0);
3438 arm_patch (high_bit_not_set, code);
3440 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3441 valid_positive = code;
3442 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3444 arm_patch (invalid_negative, code);
3445 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3447 arm_patch (valid_negative, code);
3448 arm_patch (valid_positive, code);
3450 if (ins->dreg != ins->sreg1)
3451 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3456 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3459 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3462 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3465 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3468 ARM_MNFD (code, ins->dreg, ins->sreg1);
3470 #elif defined(ARM_FPU_VFP)
3472 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3475 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3481 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3484 ARM_NEGD (code, ins->dreg, ins->sreg1);
3489 g_assert_not_reached ();
3493 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3494 #elif defined(ARM_FPU_VFP)
3495 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3500 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3501 #elif defined(ARM_FPU_VFP)
3502 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3504 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3505 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3509 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3510 #elif defined(ARM_FPU_VFP)
3511 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3513 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3514 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3518 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3519 #elif defined(ARM_FPU_VFP)
3520 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3522 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3523 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3524 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3529 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3530 #elif defined(ARM_FPU_VFP)
3531 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3533 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3534 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3539 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3540 #elif defined(ARM_FPU_VFP)
3541 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3543 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3544 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3545 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3547 /* ARM FPA flags table:
3548 * N Less than ARMCOND_MI
3549 * Z Equal ARMCOND_EQ
3550 * C Greater Than or Equal ARMCOND_CS
3551 * V Unordered ARMCOND_VS
3554 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3557 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3560 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3563 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3564 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3570 g_assert_not_reached ();
3573 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3576 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3577 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3582 if (ins->dreg != ins->sreg1)
3583 ARM_MVFD (code, ins->dreg, ins->sreg1);
3585 g_assert_not_reached ();
3590 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3591 g_assert_not_reached ();
3594 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3595 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3596 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3597 g_assert_not_reached ();
3603 last_offset = offset;
3606 cfg->code_len = code - cfg->native_code;
3610 mono_arch_register_lowlevel_calls (void)
3612 /* The signature doesn't matter */
3613 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
3616 #define patch_lis_ori(ip,val) do {\
3617 guint16 *__lis_ori = (guint16*)(ip); \
3618 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3619 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3623 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3625 MonoJumpInfo *patch_info;
3626 gboolean compile_aot = !run_cctors;
3628 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3629 unsigned char *ip = patch_info->ip.i + code;
3630 const unsigned char *target;
3632 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3633 gpointer *jt = (gpointer*)(ip + 8);
3635 /* jt is the inlined jump table, 2 instructions after ip
3636 * In the normal case we store the absolute addresses,
3637 * otherwise the displacements.
3639 for (i = 0; i < patch_info->data.table->table_size; i++)
3640 jt [i] = code + (int)patch_info->data.table->table [i];
3643 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3646 switch (patch_info->type) {
3647 case MONO_PATCH_INFO_BB:
3648 case MONO_PATCH_INFO_LABEL:
3651 /* No need to patch these */
3656 switch (patch_info->type) {
3657 case MONO_PATCH_INFO_IP:
3658 g_assert_not_reached ();
3659 patch_lis_ori (ip, ip);
3661 case MONO_PATCH_INFO_METHOD_REL:
3662 g_assert_not_reached ();
3663 *((gpointer *)(ip)) = code + patch_info->data.offset;
3665 case MONO_PATCH_INFO_METHODCONST:
3666 case MONO_PATCH_INFO_CLASS:
3667 case MONO_PATCH_INFO_IMAGE:
3668 case MONO_PATCH_INFO_FIELD:
3669 case MONO_PATCH_INFO_VTABLE:
3670 case MONO_PATCH_INFO_IID:
3671 case MONO_PATCH_INFO_SFLDA:
3672 case MONO_PATCH_INFO_LDSTR:
3673 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3674 case MONO_PATCH_INFO_LDTOKEN:
3675 g_assert_not_reached ();
3676 /* from OP_AOTCONST : lis + ori */
3677 patch_lis_ori (ip, target);
3679 case MONO_PATCH_INFO_R4:
3680 case MONO_PATCH_INFO_R8:
3681 g_assert_not_reached ();
3682 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3684 case MONO_PATCH_INFO_EXC_NAME:
3685 g_assert_not_reached ();
3686 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3688 case MONO_PATCH_INFO_NONE:
3689 case MONO_PATCH_INFO_BB_OVF:
3690 case MONO_PATCH_INFO_EXC_OVF:
3691 /* everything is dealt with at epilog output time */
3696 arm_patch (ip, target);
3701 * Stack frame layout:
3703 * ------------------- fp
3704 * MonoLMF structure or saved registers
3705 * -------------------
3707 * -------------------
3709 * -------------------
3710 * optional 8 bytes for tracing
3711 * -------------------
3712 * param area size is cfg->param_area
3713 * ------------------- sp
3716 mono_arch_emit_prolog (MonoCompile *cfg)
3718 MonoMethod *method = cfg->method;
3720 MonoMethodSignature *sig;
3722 int alloc_size, pos, max_offset, i, rot_amount;
3729 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3732 sig = mono_method_signature (method);
3733 cfg->code_size = 256 + sig->param_count * 20;
3734 code = cfg->native_code = g_malloc (cfg->code_size);
3736 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3738 alloc_size = cfg->stack_offset;
3741 if (!method->save_lmf) {
3742 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3743 prev_sp_offset = 8; /* ip and lr */
3744 for (i = 0; i < 16; ++i) {
3745 if (cfg->used_int_regs & (1 << i))
3746 prev_sp_offset += 4;
3749 ARM_PUSH (code, 0x5ff0);
3750 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3751 pos += sizeof (MonoLMF) - prev_sp_offset;
3755 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3756 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3757 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3758 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3761 /* the stack used in the pushed regs */
3762 if (prev_sp_offset & 4)
3764 cfg->stack_usage = alloc_size;
3766 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3767 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3769 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3770 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3773 if (cfg->frame_reg != ARMREG_SP)
3774 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3775 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3776 prev_sp_offset += alloc_size;
3778 /* compute max_offset in order to use short forward jumps
3779 * we could skip do it on arm because the immediate displacement
3780 * for jumps is large enough, it may be useful later for constant pools
3783 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3784 MonoInst *ins = bb->code;
3785 bb->max_offset = max_offset;
3787 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3790 MONO_BB_FOR_EACH_INS (bb, ins)
3791 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3794 /* load arguments allocated to register from the stack */
3797 cinfo = calculate_sizes (sig, sig->pinvoke);
3799 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3800 ArgInfo *ainfo = &cinfo->ret;
3801 inst = cfg->vret_addr;
3802 g_assert (arm_is_imm12 (inst->inst_offset));
3803 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3805 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3806 ArgInfo *ainfo = cinfo->args + i;
3807 inst = cfg->args [pos];
3809 if (cfg->verbose_level > 2)
3810 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3811 if (inst->opcode == OP_REGVAR) {
3812 if (ainfo->regtype == RegTypeGeneral)
3813 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3814 else if (ainfo->regtype == RegTypeFP) {
3815 g_assert_not_reached ();
3816 } else if (ainfo->regtype == RegTypeBase) {
3817 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3818 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3820 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3821 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3824 g_assert_not_reached ();
3826 if (cfg->verbose_level > 2)
3827 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3829 /* the argument should be put on the stack: FIXME handle size != word */
3830 if (ainfo->regtype == RegTypeGeneral) {
3831 switch (ainfo->size) {
3833 if (arm_is_imm12 (inst->inst_offset))
3834 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3836 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3837 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3841 if (arm_is_imm8 (inst->inst_offset)) {
3842 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3844 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3845 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3849 g_assert (arm_is_imm12 (inst->inst_offset));
3850 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3851 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3852 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3855 if (arm_is_imm12 (inst->inst_offset)) {
3856 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3858 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3859 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3863 } else if (ainfo->regtype == RegTypeBaseGen) {
3864 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3865 g_assert (arm_is_imm12 (inst->inst_offset));
3866 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3867 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3868 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3869 } else if (ainfo->regtype == RegTypeBase) {
3870 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3871 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3873 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3874 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3877 switch (ainfo->size) {
3879 if (arm_is_imm8 (inst->inst_offset)) {
3880 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3882 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3883 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3887 if (arm_is_imm8 (inst->inst_offset)) {
3888 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3890 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3891 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3895 if (arm_is_imm12 (inst->inst_offset)) {
3896 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3898 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3899 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3901 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3902 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3904 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3905 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3907 if (arm_is_imm12 (inst->inst_offset + 4)) {
3908 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3910 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3911 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3915 if (arm_is_imm12 (inst->inst_offset)) {
3916 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3918 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3919 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3923 } else if (ainfo->regtype == RegTypeFP) {
3924 g_assert_not_reached ();
3925 } else if (ainfo->regtype == RegTypeStructByVal) {
3926 int doffset = inst->inst_offset;
3930 if (mono_class_from_mono_type (inst->inst_vtype))
3931 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3932 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3933 if (arm_is_imm12 (doffset)) {
3934 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3936 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3937 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3939 soffset += sizeof (gpointer);
3940 doffset += sizeof (gpointer);
3942 if (ainfo->vtsize) {
3943 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3944 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3945 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3947 } else if (ainfo->regtype == RegTypeStructByAddr) {
3948 g_assert_not_reached ();
3949 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3950 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3952 g_assert_not_reached ();
3957 if (method->save_lmf) {
3959 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3960 (gpointer)"mono_get_lmf_addr");
3961 code = emit_call_seq (cfg, code);
3962 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3963 /* lmf_offset is the offset from the previous stack pointer,
3964 * alloc_size is the total stack space allocated, so the offset
3965 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3966 * The pointer to the struct is put in r1 (new_lmf).
3967 * r2 is used as scratch
3968 * The callee-saved registers are already in the MonoLMF structure
3970 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3971 /* r0 is the result from mono_get_lmf_addr () */
3972 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3973 /* new_lmf->previous_lmf = *lmf_addr */
3974 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3975 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3976 /* *(lmf_addr) = r1 */
3977 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3978 /* Skip method (only needed for trampoline LMF frames) */
3979 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3980 /* save the current IP */
3981 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3982 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3986 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3988 cfg->code_len = code - cfg->native_code;
3989 g_assert (cfg->code_len < cfg->code_size);
3996 mono_arch_emit_epilog (MonoCompile *cfg)
3998 MonoMethod *method = cfg->method;
3999 int pos, i, rot_amount;
4000 int max_epilog_size = 16 + 20*4;
4003 if (cfg->method->save_lmf)
4004 max_epilog_size += 128;
4006 if (mono_jit_trace_calls != NULL)
4007 max_epilog_size += 50;
4009 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4010 max_epilog_size += 50;
4012 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4013 cfg->code_size *= 2;
4014 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4015 mono_jit_stats.code_reallocs++;
4019 * Keep in sync with OP_JMP
4021 code = cfg->native_code + cfg->code_len;
4023 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4024 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4028 if (method->save_lmf) {
4030 /* all but r0-r3, sp and pc */
4031 pos += sizeof (MonoLMF) - (4 * 10);
4033 /* r2 contains the pointer to the current LMF */
4034 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4035 /* ip = previous_lmf */
4036 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4038 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4039 /* *(lmf_addr) = previous_lmf */
4040 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4041 /* FIXME: speedup: there is no actual need to restore the registers if
4042 * we didn't actually change them (idea from Zoltan).
4045 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4046 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4047 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4049 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4050 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4052 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4053 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4055 /* FIXME: add v4 thumb interworking support */
4056 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4059 cfg->code_len = code - cfg->native_code;
4061 g_assert (cfg->code_len < cfg->code_size);
4065 /* remove once throw_exception_by_name is eliminated */
4067 exception_id_by_name (const char *name)
4069 if (strcmp (name, "IndexOutOfRangeException") == 0)
4070 return MONO_EXC_INDEX_OUT_OF_RANGE;
4071 if (strcmp (name, "OverflowException") == 0)
4072 return MONO_EXC_OVERFLOW;
4073 if (strcmp (name, "ArithmeticException") == 0)
4074 return MONO_EXC_ARITHMETIC;
4075 if (strcmp (name, "DivideByZeroException") == 0)
4076 return MONO_EXC_DIVIDE_BY_ZERO;
4077 if (strcmp (name, "InvalidCastException") == 0)
4078 return MONO_EXC_INVALID_CAST;
4079 if (strcmp (name, "NullReferenceException") == 0)
4080 return MONO_EXC_NULL_REF;
4081 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4082 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4083 g_error ("Unknown intrinsic exception %s\n", name);
4088 mono_arch_emit_exceptions (MonoCompile *cfg)
4090 MonoJumpInfo *patch_info;
4093 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4094 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4095 int max_epilog_size = 50;
4097 /* count the number of exception infos */
4100 * make sure we have enough space for exceptions
4102 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4103 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4104 i = exception_id_by_name (patch_info->data.target);
4105 if (!exc_throw_found [i]) {
4106 max_epilog_size += 32;
4107 exc_throw_found [i] = TRUE;
4112 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4113 cfg->code_size *= 2;
4114 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4115 mono_jit_stats.code_reallocs++;
4118 code = cfg->native_code + cfg->code_len;
4120 /* add code to raise exceptions */
4121 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4122 switch (patch_info->type) {
4123 case MONO_PATCH_INFO_EXC: {
4124 MonoClass *exc_class;
4125 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4127 i = exception_id_by_name (patch_info->data.target);
4128 if (exc_throw_pos [i]) {
4129 arm_patch (ip, exc_throw_pos [i]);
4130 patch_info->type = MONO_PATCH_INFO_NONE;
4133 exc_throw_pos [i] = code;
4135 arm_patch (ip, code);
4137 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4138 g_assert (exc_class);
4140 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4141 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4142 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4143 patch_info->data.name = "mono_arch_throw_corlib_exception";
4144 patch_info->ip.i = code - cfg->native_code;
4146 *(guint32*)(gpointer)code = exc_class->type_token;
4156 cfg->code_len = code - cfg->native_code;
4158 g_assert (cfg->code_len < cfg->code_size);
4163 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4168 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4173 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4176 int this_dreg = ARMREG_R0;
4179 this_dreg = ARMREG_R1;
4181 /* add the this argument */
4182 if (this_reg != -1) {
4184 MONO_INST_NEW (cfg, this, OP_MOVE);
4185 this->type = this_type;
4186 this->sreg1 = this_reg;
4187 this->dreg = mono_regstate_next_int (cfg->rs);
4188 mono_bblock_add_inst (cfg->cbb, this);
4189 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
4194 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4195 vtarg->type = STACK_MP;
4196 vtarg->sreg1 = vt_reg;
4197 vtarg->dreg = mono_regstate_next_int (cfg->rs);
4198 mono_bblock_add_inst (cfg->cbb, vtarg);
4199 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
4204 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4210 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4217 mono_arch_print_tree (MonoInst *tree, int arity)
4222 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4228 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4234 mono_arch_get_patch_offset (guint8 *code)
4241 mono_arch_flush_register_windows (void)
4246 mono_arch_fixup_jinfo (MonoCompile *cfg)
4248 /* max encoded stack usage is 64KB * 4 */
4249 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4250 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4253 #ifdef MONO_ARCH_HAVE_IMT
4256 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
4258 if (cfg->compile_aot) {
4259 int method_reg = mono_regstate_next_int (cfg->rs);
4262 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4263 ins->dreg = method_reg;
4264 ins->inst_p0 = call->method;
4265 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4266 MONO_ADD_INS (cfg->cbb, ins);
4268 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4273 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4275 guint32 *code_ptr = (guint32*)code;
4277 /* The IMT value is stored in the code stream right after the LDC instruction. */
4278 if (!IS_LDR_PC (code_ptr [0])) {
4279 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4280 g_assert (IS_LDR_PC (code_ptr [0]));
4282 if (code_ptr [1] == 0)
4283 /* This is AOTed code, the IMT method is in V5 */
4284 return (MonoMethod*)regs [ARMREG_V5];
4286 return (MonoMethod*) code_ptr [1];
4290 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4292 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4296 #define ENABLE_WRONG_METHOD_CHECK 0
4297 #define BASE_SIZE (6 * 4)
4298 #define BSEARCH_ENTRY_SIZE (4 * 4)
4299 #define CMP_SIZE (3 * 4)
4300 #define BRANCH_SIZE (1 * 4)
4301 #define CALL_SIZE (2 * 4)
4302 #define WMC_SIZE (5 * 4)
4303 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4306 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4308 guint32 delta = DISTANCE (target, code);
4310 g_assert (delta >= 0 && delta <= 0xFFF);
4311 *target = *target | delta;
4317 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
4319 int size, i, extra_space = 0;
4320 arminstr_t *code, *start, *vtable_target = NULL;
4323 for (i = 0; i < count; ++i) {
4324 MonoIMTCheckItem *item = imt_entries [i];
4325 if (item->is_equals) {
4326 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
4328 if (item->check_target_idx) {
4329 if (!item->compare_done)
4330 item->chunk_size += CMP_SIZE;
4331 item->chunk_size += BRANCH_SIZE;
4333 #if ENABLE_WRONG_METHOD_CHECK
4334 item->chunk_size += WMC_SIZE;
4337 item->chunk_size += CALL_SIZE;
4339 item->chunk_size += BSEARCH_ENTRY_SIZE;
4340 imt_entries [item->check_target_idx]->compare_done = TRUE;
4342 size += item->chunk_size;
4345 start = code = mono_code_manager_reserve (domain->code_mp, size);
4348 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4349 for (i = 0; i < count; ++i) {
4350 MonoIMTCheckItem *item = imt_entries [i];
4351 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
4355 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4356 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4357 vtable_target = code;
4358 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4360 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4361 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4362 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4364 for (i = 0; i < count; ++i) {
4365 MonoIMTCheckItem *item = imt_entries [i];
4366 arminstr_t *imt_method = NULL;
4367 item->code_target = (guint8*)code;
4369 if (item->is_equals) {
4370 if (item->check_target_idx) {
4371 if (!item->compare_done) {
4373 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4374 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4376 item->jmp_code = (guint8*)code;
4377 ARM_B_COND (code, ARMCOND_NE, 0);
4379 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4380 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4382 /*Enable the commented code to assert on wrong method*/
4383 #if ENABLE_WRONG_METHOD_CHECK
4385 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4386 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4387 ARM_B_COND (code, ARMCOND_NE, 1);
4389 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4390 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4392 #if ENABLE_WRONG_METHOD_CHECK
4398 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
4400 /*must emit after unconditional branch*/
4401 if (vtable_target) {
4402 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4403 item->chunk_size += 4;
4404 vtable_target = NULL;
4407 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4409 code += extra_space;
4413 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4414 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4416 item->jmp_code = (guint8*)code;
4417 ARM_B_COND (code, ARMCOND_GE, 0);
4422 for (i = 0; i < count; ++i) {
4423 MonoIMTCheckItem *item = imt_entries [i];
4424 if (item->jmp_code) {
4425 if (item->check_target_idx)
4426 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4428 if (i > 0 && item->is_equals) {
4430 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4431 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4432 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
4439 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4440 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4445 mono_arch_flush_icache ((guint8*)start, size);
4446 mono_stats.imt_thunks_size += code - start;
4448 g_assert (DISTANCE (start, code) <= size);
4455 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4457 /* FIXME: implement */
4458 g_assert_not_reached ();