2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
34 static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
38 * floating point support: on ARM it is a mess, there are at least 3
39 * different setups, each of which binary incompat with the other.
40 * 1) FPA: old and ugly, but unfortunately what current distros use
41 * the double binary format has the two words swapped. 8 double registers.
42 * Implemented usually by kernel emulation.
43 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
44 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
45 * 3) VFP: the new and actually sensible and useful FP support. Implemented
46 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
48 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
50 int mono_exc_esp_offset = 0;
52 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
53 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
54 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
56 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
57 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
58 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
60 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
61 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
64 void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
67 mono_arch_regname (int reg)
69 static const char * rnames[] = {
70 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
71 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
72 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
75 if (reg >= 0 && reg < 16)
81 mono_arch_fregname (int reg)
83 static const char * rnames[] = {
84 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
85 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
86 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
87 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
88 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
89 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
92 if (reg >= 0 && reg < 32)
98 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
100 int imm8, rot_amount;
101 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
102 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
105 g_assert (dreg != sreg);
106 code = mono_arm_emit_load_imm (code, dreg, imm);
107 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
112 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
114 /* we can use r0-r3, since this is called only for incoming args on the stack */
115 if (size > sizeof (gpointer) * 4) {
117 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
118 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
119 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
120 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
121 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
122 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
123 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
124 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
125 ARM_B_COND (code, ARMCOND_NE, 0);
126 arm_patch (code - 4, start_loop);
129 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
130 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
132 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
133 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
139 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
140 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
141 doffset = soffset = 0;
143 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
144 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
150 g_assert (size == 0);
155 emit_call_reg (guint8 *code, int reg)
158 ARM_BLX_REG (code, reg);
160 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
164 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
170 emit_call_seq (MonoCompile *cfg, guint8 *code)
172 if (cfg->method->dynamic) {
173 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
175 *(gpointer*)code = NULL;
177 code = emit_call_reg (code, ARMREG_IP);
185 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
187 switch (ins->opcode) {
190 case OP_FCALL_MEMBASE:
192 if (ins->dreg != ARM_FPA_F0)
193 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
202 * mono_arch_get_argument_info:
203 * @csig: a method signature
204 * @param_count: the number of parameters to consider
205 * @arg_info: an array to store the result infos
207 * Gathers information on parameters such as size, alignment and
208 * padding. arg_info should be large enought to hold param_count + 1 entries.
210 * Returns the size of the activation frame.
213 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
215 int k, frame_size = 0;
216 int size, align, pad;
219 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
220 frame_size += sizeof (gpointer);
224 arg_info [0].offset = offset;
227 frame_size += sizeof (gpointer);
231 arg_info [0].size = frame_size;
233 for (k = 0; k < param_count; k++) {
236 size = mono_type_native_stack_size (csig->params [k], &align);
238 size = mini_type_stack_size (NULL, csig->params [k], &align);
240 /* ignore alignment for now */
243 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
244 arg_info [k].pad = pad;
246 arg_info [k + 1].pad = 0;
247 arg_info [k + 1].size = size;
249 arg_info [k + 1].offset = offset;
253 align = MONO_ARCH_FRAME_ALIGNMENT;
254 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
255 arg_info [k].pad = pad;
261 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
265 reg = (ldr >> 16 ) & 0xf;
266 offset = ldr & 0xfff;
267 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
269 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
272 *displacement = offset;
277 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
279 guint32* code = (guint32*)code_ptr;
281 /* Locate the address of the method-specific trampoline. The call using
282 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
283 looks something like this:
292 The call sequence could be also:
295 function pointer literal
299 Note that on ARM5+ we can use one instruction instead of the last two.
300 Therefore, we need to locate the 'ldr rA' instruction to know which
301 register was used to hold the method addrs.
304 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
307 /* Three possible code sequences can happen here:
311 * ldr pc, [rX - #offset]
317 * ldr pc, [rX - #offset]
319 * direct branch with bl:
323 * direct branch with mov:
327 * We only need to identify interface and virtual calls, the others can be ignored.
330 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
331 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
333 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
334 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
340 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
344 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
347 return (gpointer*)((char*)vt + displacement);
350 #define MAX_ARCH_DELEGATE_PARAMS 3
353 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
355 guint8 *code, *start;
357 /* FIXME: Support more cases */
358 if (MONO_TYPE_ISSTRUCT (sig->ret))
362 static guint8* cached = NULL;
363 mono_mini_arch_lock ();
365 mono_mini_arch_unlock ();
369 start = code = mono_global_codeman_reserve (12);
371 /* Replace the this argument with the target */
372 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
373 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
374 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
376 g_assert ((code - start) <= 12);
378 mono_arch_flush_icache (code, 12);
380 mono_mini_arch_unlock ();
383 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
386 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
388 for (i = 0; i < sig->param_count; ++i)
389 if (!mono_is_regsize_var (sig->params [i]))
392 mono_mini_arch_lock ();
393 code = cache [sig->param_count];
395 mono_mini_arch_unlock ();
399 size = 8 + sig->param_count * 4;
400 start = code = mono_global_codeman_reserve (size);
402 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
403 /* slide down the arguments */
404 for (i = 0; i < sig->param_count; ++i) {
405 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
407 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
409 g_assert ((code - start) <= size);
411 mono_arch_flush_icache (code, size);
412 cache [sig->param_count] = start;
413 mono_mini_arch_unlock ();
421 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
423 /* FIXME: handle returning a struct */
424 if (MONO_TYPE_ISSTRUCT (sig->ret))
425 return (gpointer)regs [ARMREG_R1];
426 return (gpointer)regs [ARMREG_R0];
430 * Initialize the cpu to execute managed code.
433 mono_arch_cpu_init (void)
438 * Initialize architecture specific code.
441 mono_arch_init (void)
443 InitializeCriticalSection (&mini_arch_mutex);
447 * Cleanup architecture specific code.
450 mono_arch_cleanup (void)
455 * This function returns the optimizations supported on this cpu.
458 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
462 thumb_supported = TRUE;
467 FILE *file = fopen ("/proc/cpuinfo", "r");
469 while ((line = fgets (buf, 512, file))) {
470 if (strncmp (line, "Processor", 9) == 0) {
471 char *ver = strstr (line, "(v");
472 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
477 if (strncmp (line, "Features", 8) == 0) {
478 char *th = strstr (line, "thumb");
480 thumb_supported = TRUE;
488 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
492 /* no arm-specific optimizations yet */
498 is_regsize_var (MonoType *t) {
501 t = mono_type_get_underlying_type (t);
508 case MONO_TYPE_FNPTR:
510 case MONO_TYPE_OBJECT:
511 case MONO_TYPE_STRING:
512 case MONO_TYPE_CLASS:
513 case MONO_TYPE_SZARRAY:
514 case MONO_TYPE_ARRAY:
516 case MONO_TYPE_GENERICINST:
517 if (!mono_type_generic_inst_is_valuetype (t))
520 case MONO_TYPE_VALUETYPE:
527 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
532 for (i = 0; i < cfg->num_varinfo; i++) {
533 MonoInst *ins = cfg->varinfo [i];
534 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
537 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
540 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
543 /* we can only allocate 32 bit values */
544 if (is_regsize_var (ins->inst_vtype)) {
545 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
546 g_assert (i == vmv->idx);
547 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
554 #define USE_EXTRA_TEMPS 0
557 mono_arch_get_global_int_regs (MonoCompile *cfg)
560 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
561 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
562 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
563 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
564 if (cfg->compile_aot)
565 /* V5 is reserved for holding the IMT method */
566 cfg->used_int_regs |= (1 << ARMREG_V5);
568 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
569 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
570 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
576 * mono_arch_regalloc_cost:
578 * Return the cost, in number of memory references, of the action of
579 * allocating the variable VMV into a register during global register
583 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
590 mono_arch_flush_icache (guint8 *code, gint size)
593 sys_icache_invalidate (code, size);
595 __asm __volatile ("mov r0, %0\n"
598 "swi 0x9f0002 @ sys_cacheflush"
600 : "r" (code), "r" (code + size), "r" (0)
601 : "r0", "r1", "r3" );
616 guint16 vtsize; /* in param area */
618 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
619 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
634 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
637 if (*gr > ARMREG_R3) {
638 ainfo->offset = *stack_size;
639 ainfo->reg = ARMREG_SP; /* in the caller */
640 ainfo->regtype = RegTypeBase;
651 /* first word in r3 and the second on the stack */
652 ainfo->offset = *stack_size;
653 ainfo->reg = ARMREG_SP; /* in the caller */
654 ainfo->regtype = RegTypeBaseGen;
656 } else if (*gr >= ARMREG_R3) {
661 ainfo->offset = *stack_size;
662 ainfo->reg = ARMREG_SP; /* in the caller */
663 ainfo->regtype = RegTypeBase;
678 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
681 int n = sig->hasthis + sig->param_count;
683 guint32 stack_size = 0;
684 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
688 /* FIXME: handle returning a struct */
689 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
690 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
691 cinfo->struct_ret = ARMREG_R0;
696 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
699 DEBUG(printf("params: %d\n", sig->param_count));
700 for (i = 0; i < sig->param_count; ++i) {
701 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
702 /* Prevent implicit arguments and sig_cookie from
703 being passed in registers */
705 /* Emit the signature cookie just before the implicit arguments */
706 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
708 DEBUG(printf("param %d: ", i));
709 if (sig->params [i]->byref) {
710 DEBUG(printf("byref\n"));
711 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
715 simpletype = mono_type_get_underlying_type (sig->params [i])->type;
716 switch (simpletype) {
717 case MONO_TYPE_BOOLEAN:
720 cinfo->args [n].size = 1;
721 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
727 cinfo->args [n].size = 2;
728 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
733 cinfo->args [n].size = 4;
734 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
740 case MONO_TYPE_FNPTR:
741 case MONO_TYPE_CLASS:
742 case MONO_TYPE_OBJECT:
743 case MONO_TYPE_STRING:
744 case MONO_TYPE_SZARRAY:
745 case MONO_TYPE_ARRAY:
747 cinfo->args [n].size = sizeof (gpointer);
748 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
751 case MONO_TYPE_GENERICINST:
752 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
753 cinfo->args [n].size = sizeof (gpointer);
754 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
759 case MONO_TYPE_TYPEDBYREF:
760 case MONO_TYPE_VALUETYPE: {
765 if (simpletype == MONO_TYPE_TYPEDBYREF) {
766 size = sizeof (MonoTypedRef);
768 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
770 size = mono_class_native_size (klass, NULL);
772 size = mono_class_value_size (klass, NULL);
774 DEBUG(printf ("load %d bytes struct\n",
775 mono_class_native_size (sig->params [i]->data.klass, NULL)));
778 align_size += (sizeof (gpointer) - 1);
779 align_size &= ~(sizeof (gpointer) - 1);
780 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
781 cinfo->args [n].regtype = RegTypeStructByVal;
782 /* FIXME: align gr and stack_size if needed */
783 if (gr > ARMREG_R3) {
784 cinfo->args [n].size = 0;
785 cinfo->args [n].vtsize = nwords;
787 int rest = ARMREG_R3 - gr + 1;
788 int n_in_regs = rest >= nwords? nwords: rest;
789 cinfo->args [n].size = n_in_regs;
790 cinfo->args [n].vtsize = nwords - n_in_regs;
791 cinfo->args [n].reg = gr;
794 cinfo->args [n].offset = stack_size;
795 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
796 stack_size += nwords * sizeof (gpointer);
803 cinfo->args [n].size = 8;
804 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
808 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
813 simpletype = mono_type_get_underlying_type (sig->ret)->type;
814 switch (simpletype) {
815 case MONO_TYPE_BOOLEAN:
826 case MONO_TYPE_FNPTR:
827 case MONO_TYPE_CLASS:
828 case MONO_TYPE_OBJECT:
829 case MONO_TYPE_SZARRAY:
830 case MONO_TYPE_ARRAY:
831 case MONO_TYPE_STRING:
832 cinfo->ret.reg = ARMREG_R0;
836 cinfo->ret.reg = ARMREG_R0;
840 cinfo->ret.reg = ARMREG_R0;
841 /* FIXME: cinfo->ret.reg = ???;
842 cinfo->ret.regtype = RegTypeFP;*/
844 case MONO_TYPE_GENERICINST:
845 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
846 cinfo->ret.reg = ARMREG_R0;
850 case MONO_TYPE_VALUETYPE:
852 case MONO_TYPE_TYPEDBYREF:
856 g_error ("Can't handle as return value 0x%x", sig->ret->type);
860 /* align stack size to 8 */
861 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
862 stack_size = (stack_size + 7) & ~7;
864 cinfo->stack_usage = stack_size;
870 * Set var information according to the calling convention. arm version.
871 * The locals var stuff should most likely be split in another method.
874 mono_arch_allocate_vars (MonoCompile *m)
876 MonoMethodSignature *sig;
877 MonoMethodHeader *header;
879 int i, offset, size, align, curinst;
880 int frame_reg = ARMREG_FP;
882 /* FIXME: this will change when we use FP as gcc does */
883 m->flags |= MONO_CFG_HAS_SPILLUP;
885 /* allow room for the vararg method args: void* and long/double */
886 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
887 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
889 header = mono_method_get_header (m->method);
892 * We use the frame register also for any method that has
893 * exception clauses. This way, when the handlers are called,
894 * the code will reference local variables using the frame reg instead of
895 * the stack pointer: if we had to restore the stack pointer, we'd
896 * corrupt the method frames that are already on the stack (since
897 * filters get called before stack unwinding happens) when the filter
898 * code would call any method (this also applies to finally etc.).
900 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
901 frame_reg = ARMREG_FP;
902 m->frame_reg = frame_reg;
903 if (frame_reg != ARMREG_SP) {
904 m->used_int_regs |= 1 << frame_reg;
907 sig = mono_method_signature (m->method);
911 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
912 /* FIXME: handle long and FP values */
913 switch (mono_type_get_underlying_type (sig->ret)->type) {
917 m->ret->opcode = OP_REGVAR;
918 m->ret->inst_c0 = ARMREG_R0;
922 /* local vars are at a positive offset from the stack pointer */
924 * also note that if the function uses alloca, we use FP
925 * to point at the local variables.
927 offset = 0; /* linkage area */
928 /* align the offset to 16 bytes: not sure this is needed here */
930 //offset &= ~(8 - 1);
932 /* add parameter area size for called functions */
933 offset += m->param_area;
936 if (m->flags & MONO_CFG_HAS_FPOUT)
939 /* allow room to save the return value */
940 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
943 /* the MonoLMF structure is stored just below the stack pointer */
945 if (sig->call_convention == MONO_CALL_VARARG) {
949 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
951 offset += sizeof(gpointer) - 1;
952 offset &= ~(sizeof(gpointer) - 1);
953 inst->inst_offset = offset;
954 inst->opcode = OP_REGOFFSET;
955 inst->inst_basereg = frame_reg;
956 if (G_UNLIKELY (m->verbose_level > 1)) {
957 printf ("vret_addr =");
958 mono_print_ins (m->vret_addr);
960 offset += sizeof(gpointer);
961 if (sig->call_convention == MONO_CALL_VARARG)
962 m->sig_cookie += sizeof (gpointer);
965 curinst = m->locals_start;
966 for (i = curinst; i < m->num_varinfo; ++i) {
967 inst = m->varinfo [i];
968 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
971 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
972 * pinvoke wrappers when they call functions returning structure */
973 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
974 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
976 size = mono_type_size (inst->inst_vtype, &align);
978 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
979 * since it loads/stores misaligned words, which don't do the right thing.
981 if (align < 4 && size >= 4)
984 offset &= ~(align - 1);
985 inst->inst_offset = offset;
986 inst->opcode = OP_REGOFFSET;
987 inst->inst_basereg = frame_reg;
989 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
994 inst = m->args [curinst];
995 if (inst->opcode != OP_REGVAR) {
996 inst->opcode = OP_REGOFFSET;
997 inst->inst_basereg = frame_reg;
998 offset += sizeof (gpointer) - 1;
999 offset &= ~(sizeof (gpointer) - 1);
1000 inst->inst_offset = offset;
1001 offset += sizeof (gpointer);
1002 if (sig->call_convention == MONO_CALL_VARARG)
1003 m->sig_cookie += sizeof (gpointer);
1008 for (i = 0; i < sig->param_count; ++i) {
1009 inst = m->args [curinst];
1010 if (inst->opcode != OP_REGVAR) {
1011 inst->opcode = OP_REGOFFSET;
1012 inst->inst_basereg = frame_reg;
1013 size = mono_type_size (sig->params [i], &align);
1014 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1015 * since it loads/stores misaligned words, which don't do the right thing.
1017 if (align < 4 && size >= 4)
1019 offset += align - 1;
1020 offset &= ~(align - 1);
1021 inst->inst_offset = offset;
1023 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1024 m->sig_cookie += size;
1029 /* align the offset to 8 bytes */
1034 m->stack_offset = offset;
1039 mono_arch_create_vars (MonoCompile *cfg)
1041 MonoMethodSignature *sig;
1043 sig = mono_method_signature (cfg->method);
1045 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1046 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1047 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1048 printf ("vret_addr = ");
1049 mono_print_ins (cfg->vret_addr);
1055 * take the arguments and generate the arch-specific
1056 * instructions to properly call the function in call.
1057 * This includes pushing, moving arguments to the right register
1059 * Issue: who does the spilling if needed, and when?
1062 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1064 MonoMethodSignature *sig;
1069 sig = call->signature;
1070 n = sig->param_count + sig->hasthis;
1072 cinfo = calculate_sizes (sig, sig->pinvoke);
1073 if (cinfo->struct_ret)
1074 call->used_iregs |= 1 << cinfo->struct_ret;
1076 for (i = 0; i < n; ++i) {
1077 ainfo = cinfo->args + i;
1078 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1080 cfg->disable_aot = TRUE;
1082 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1083 sig_arg->inst_p0 = call->signature;
1085 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1086 arg->inst_imm = cinfo->sig_cookie.offset;
1087 arg->inst_left = sig_arg;
1089 /* prepend, so they get reversed */
1090 arg->next = call->out_args;
1091 call->out_args = arg;
1093 if (is_virtual && i == 0) {
1094 /* the argument will be attached to the call instrucion */
1095 in = call->args [i];
1096 call->used_iregs |= 1 << ainfo->reg;
1098 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1099 in = call->args [i];
1100 arg->cil_code = in->cil_code;
1101 arg->inst_left = in;
1102 arg->inst_right = (MonoInst*)call;
1103 arg->type = in->type;
1104 /* prepend, we'll need to reverse them later */
1105 arg->next = call->out_args;
1106 call->out_args = arg;
1107 if (ainfo->regtype == RegTypeGeneral) {
1108 arg->backend.reg3 = ainfo->reg;
1109 call->used_iregs |= 1 << ainfo->reg;
1110 if (arg->type == STACK_I8)
1111 call->used_iregs |= 1 << (ainfo->reg + 1);
1112 if (arg->type == STACK_R8) {
1113 if (ainfo->size == 4) {
1114 #ifndef MONO_ARCH_SOFT_FLOAT
1115 arg->opcode = OP_OUTARG_R4;
1118 call->used_iregs |= 1 << (ainfo->reg + 1);
1120 cfg->flags |= MONO_CFG_HAS_FPOUT;
1122 } else if (ainfo->regtype == RegTypeStructByAddr) {
1123 /* FIXME: where si the data allocated? */
1124 arg->backend.reg3 = ainfo->reg;
1125 call->used_iregs |= 1 << ainfo->reg;
1126 g_assert_not_reached ();
1127 } else if (ainfo->regtype == RegTypeStructByVal) {
1129 /* mark the used regs */
1130 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
1131 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
1133 arg->opcode = OP_OUTARG_VT;
1134 /* vtsize and offset have just 12 bits of encoding in number of words */
1135 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
1136 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
1137 } else if (ainfo->regtype == RegTypeBase) {
1138 arg->opcode = OP_OUTARG_MEMBASE;
1139 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
1140 } else if (ainfo->regtype == RegTypeBaseGen) {
1141 call->used_iregs |= 1 << ARMREG_R3;
1142 arg->opcode = OP_OUTARG_MEMBASE;
1143 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
1144 if (arg->type == STACK_R8)
1145 cfg->flags |= MONO_CFG_HAS_FPOUT;
1146 } else if (ainfo->regtype == RegTypeFP) {
1147 arg->backend.reg3 = ainfo->reg;
1148 /* FP args are passed in int regs */
1149 call->used_iregs |= 1 << ainfo->reg;
1150 if (ainfo->size == 8) {
1151 arg->opcode = OP_OUTARG_R8;
1152 call->used_iregs |= 1 << (ainfo->reg + 1);
1154 arg->opcode = OP_OUTARG_R4;
1156 cfg->flags |= MONO_CFG_HAS_FPOUT;
1158 g_assert_not_reached ();
1163 * Reverse the call->out_args list.
1166 MonoInst *prev = NULL, *list = call->out_args, *next;
1173 call->out_args = prev;
1175 call->stack_usage = cinfo->stack_usage;
1176 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
1177 cfg->flags |= MONO_CFG_HAS_CALLS;
1179 * should set more info in call, such as the stack space
1180 * used by the args that needs to be added back to esp
1188 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1191 MonoMethodSignature *sig;
1195 sig = call->signature;
1196 n = sig->param_count + sig->hasthis;
1198 cinfo = calculate_sizes (sig, sig->pinvoke);
1200 for (i = 0; i < n; ++i) {
1201 ArgInfo *ainfo = cinfo->args + i;
1204 if (i >= sig->hasthis)
1205 t = sig->params [i - sig->hasthis];
1207 t = &mono_defaults.int_class->byval_arg;
1208 t = mono_type_get_underlying_type (t);
1210 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1215 in = call->args [i];
1217 switch (ainfo->regtype) {
1218 case RegTypeGeneral:
1219 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1220 MONO_INST_NEW (cfg, ins, OP_MOVE);
1221 ins->dreg = mono_alloc_ireg (cfg);
1222 ins->sreg1 = in->dreg + 1;
1223 MONO_ADD_INS (cfg->cbb, ins);
1224 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1226 MONO_INST_NEW (cfg, ins, OP_MOVE);
1227 ins->dreg = mono_alloc_ireg (cfg);
1228 ins->sreg1 = in->dreg + 2;
1229 MONO_ADD_INS (cfg->cbb, ins);
1230 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1231 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1232 #ifndef MONO_ARCH_SOFT_FLOAT
1236 if (ainfo->size == 4) {
1237 #ifdef MONO_ARCH_SOFT_FLOAT
1238 /* mono_emit_call_args () have already done the r8->r4 conversion */
1239 /* The converted value is in an int vreg */
1240 MONO_INST_NEW (cfg, ins, OP_MOVE);
1241 ins->dreg = mono_alloc_ireg (cfg);
1242 ins->sreg1 = in->dreg;
1243 MONO_ADD_INS (cfg->cbb, ins);
1244 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1246 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1247 creg = mono_alloc_ireg (cfg);
1248 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1249 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1252 #ifdef MONO_ARCH_SOFT_FLOAT
1253 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1254 ins->dreg = mono_alloc_ireg (cfg);
1255 ins->sreg1 = in->dreg;
1256 MONO_ADD_INS (cfg->cbb, ins);
1257 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1259 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1260 ins->dreg = mono_alloc_ireg (cfg);
1261 ins->sreg1 = in->dreg;
1262 MONO_ADD_INS (cfg->cbb, ins);
1263 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1266 creg = mono_alloc_ireg (cfg);
1267 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1268 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1269 creg = mono_alloc_ireg (cfg);
1270 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1271 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1274 cfg->flags |= MONO_CFG_HAS_FPOUT;
1276 MONO_INST_NEW (cfg, ins, OP_MOVE);
1277 ins->dreg = mono_alloc_ireg (cfg);
1278 ins->sreg1 = in->dreg;
1279 MONO_ADD_INS (cfg->cbb, ins);
1281 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1284 case RegTypeStructByAddr:
1287 /* FIXME: where si the data allocated? */
1288 arg->backend.reg3 = ainfo->reg;
1289 call->used_iregs |= 1 << ainfo->reg;
1290 g_assert_not_reached ();
1293 case RegTypeStructByVal:
1294 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1295 ins->opcode = OP_OUTARG_VT;
1296 ins->sreg1 = in->dreg;
1297 ins->klass = in->klass;
1298 ins->inst_p0 = call;
1299 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1300 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1301 MONO_ADD_INS (cfg->cbb, ins);
1304 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1305 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1306 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1307 if (t->type == MONO_TYPE_R8) {
1308 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1310 #ifdef MONO_ARCH_SOFT_FLOAT
1311 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1313 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1317 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1320 case RegTypeBaseGen:
1321 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1322 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1323 MONO_INST_NEW (cfg, ins, OP_MOVE);
1324 ins->dreg = mono_alloc_ireg (cfg);
1325 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1326 MONO_ADD_INS (cfg->cbb, ins);
1327 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1328 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1331 #ifdef MONO_ARCH_SOFT_FLOAT
1332 g_assert_not_reached ();
1335 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1336 creg = mono_alloc_ireg (cfg);
1337 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1339 creg = mono_alloc_ireg (cfg);
1340 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1342 cfg->flags |= MONO_CFG_HAS_FPOUT;
1344 g_assert_not_reached ();
1351 arg->backend.reg3 = ainfo->reg;
1352 /* FP args are passed in int regs */
1353 call->used_iregs |= 1 << ainfo->reg;
1354 if (ainfo->size == 8) {
1355 arg->opcode = OP_OUTARG_R8;
1356 call->used_iregs |= 1 << (ainfo->reg + 1);
1358 arg->opcode = OP_OUTARG_R4;
1361 cfg->flags |= MONO_CFG_HAS_FPOUT;
1365 g_assert_not_reached ();
1369 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1372 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1373 vtarg->sreg1 = call->vret_var->dreg;
1374 vtarg->dreg = mono_alloc_preg (cfg);
1375 MONO_ADD_INS (cfg->cbb, vtarg);
1377 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1380 call->stack_usage = cinfo->stack_usage;
1386 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1388 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1389 ArgInfo *ainfo = ins->inst_p1;
1390 int ovf_size = ainfo->vtsize;
1391 int doffset = ainfo->offset;
1392 int i, soffset, dreg;
1395 for (i = 0; i < ainfo->size; ++i) {
1396 dreg = mono_alloc_ireg (cfg);
1397 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1398 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1399 soffset += sizeof (gpointer);
1401 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1403 mini_emit_memcpy2 (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1407 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1409 MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
1412 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1415 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1416 ins->sreg1 = val->dreg + 1;
1417 ins->sreg2 = val->dreg + 2;
1418 MONO_ADD_INS (cfg->cbb, ins);
1421 #ifdef MONO_ARCH_SOFT_FLOAT
1422 if (ret->type == MONO_TYPE_R8) {
1425 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1426 ins->dreg = cfg->ret->dreg;
1427 ins->sreg1 = val->dreg;
1428 MONO_ADD_INS (cfg->cbb, ins);
1431 if (ret->type == MONO_TYPE_R4) {
1432 /* Already converted to an int in method_to_ir () */
1433 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1440 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1444 mono_arch_is_inst_imm (gint64 imm)
1450 * Allow tracing to work with this interface (with an optional argument)
1454 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1458 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1459 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1460 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1461 code = emit_call_reg (code, ARMREG_R2);
1474 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1477 int save_mode = SAVE_NONE;
1479 MonoMethod *method = cfg->method;
1480 int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
1481 int save_offset = cfg->param_area;
1485 offset = code - cfg->native_code;
1486 /* we need about 16 instructions */
1487 if (offset > (cfg->code_size - 16 * 4)) {
1488 cfg->code_size *= 2;
1489 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1490 code = cfg->native_code + offset;
1493 case MONO_TYPE_VOID:
1494 /* special case string .ctor icall */
1495 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1496 save_mode = SAVE_ONE;
1498 save_mode = SAVE_NONE;
1502 save_mode = SAVE_TWO;
1506 save_mode = SAVE_FP;
1508 case MONO_TYPE_VALUETYPE:
1509 save_mode = SAVE_STRUCT;
1512 save_mode = SAVE_ONE;
1516 switch (save_mode) {
1518 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1519 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1520 if (enable_arguments) {
1521 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1522 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1526 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1527 if (enable_arguments) {
1528 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1532 /* FIXME: what reg? */
1533 if (enable_arguments) {
1534 /* FIXME: what reg? */
1538 if (enable_arguments) {
1539 /* FIXME: get the actual address */
1540 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1548 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1549 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1550 code = emit_call_reg (code, ARMREG_IP);
1552 switch (save_mode) {
1554 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1555 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1558 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1572 * The immediate field for cond branches is big enough for all reasonable methods
1574 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1575 if (ins->flags & MONO_INST_BRLABEL) { \
1576 if (0 && ins->inst_i0->inst_c0) { \
1577 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1579 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1580 ARM_B_COND (code, (condcode), 0); \
1583 if (0 && ins->inst_true_bb->native_offset) { \
1584 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1586 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1587 ARM_B_COND (code, (condcode), 0); \
1591 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1593 /* emit an exception if condition is fail
1595 * We assign the extra code used to throw the implicit exceptions
1596 * to cfg->bb_exit as far as the big branch handling is concerned
1598 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1600 mono_add_patch_info (cfg, code - cfg->native_code, \
1601 MONO_PATCH_INFO_EXC, exc_name); \
1602 ARM_BL_COND (code, (condcode), 0); \
1605 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1608 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1613 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1615 MonoInst *ins, *n, *last_ins = NULL;
1617 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1618 switch (ins->opcode) {
1621 /* Already done by an arch-independent pass */
1625 /* remove unnecessary multiplication with 1 */
1626 if (ins->inst_imm == 1) {
1627 if (ins->dreg != ins->sreg1) {
1628 ins->opcode = OP_MOVE;
1630 MONO_DELETE_INS (bb, ins);
1634 int power2 = mono_is_power_of_two (ins->inst_imm);
1636 ins->opcode = OP_SHL_IMM;
1637 ins->inst_imm = power2;
1641 case OP_LOAD_MEMBASE:
1642 case OP_LOADI4_MEMBASE:
1644 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1645 * OP_LOAD_MEMBASE offset(basereg), reg
1647 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1648 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1649 ins->inst_basereg == last_ins->inst_destbasereg &&
1650 ins->inst_offset == last_ins->inst_offset) {
1651 if (ins->dreg == last_ins->sreg1) {
1652 MONO_DELETE_INS (bb, ins);
1655 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1656 ins->opcode = OP_MOVE;
1657 ins->sreg1 = last_ins->sreg1;
1661 * Note: reg1 must be different from the basereg in the second load
1662 * OP_LOAD_MEMBASE offset(basereg), reg1
1663 * OP_LOAD_MEMBASE offset(basereg), reg2
1665 * OP_LOAD_MEMBASE offset(basereg), reg1
1666 * OP_MOVE reg1, reg2
1668 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1669 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1670 ins->inst_basereg != last_ins->dreg &&
1671 ins->inst_basereg == last_ins->inst_basereg &&
1672 ins->inst_offset == last_ins->inst_offset) {
1674 if (ins->dreg == last_ins->dreg) {
1675 MONO_DELETE_INS (bb, ins);
1678 ins->opcode = OP_MOVE;
1679 ins->sreg1 = last_ins->dreg;
1682 //g_assert_not_reached ();
1686 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1687 * OP_LOAD_MEMBASE offset(basereg), reg
1689 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1690 * OP_ICONST reg, imm
1692 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1693 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1694 ins->inst_basereg == last_ins->inst_destbasereg &&
1695 ins->inst_offset == last_ins->inst_offset) {
1696 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1697 ins->opcode = OP_ICONST;
1698 ins->inst_c0 = last_ins->inst_imm;
1699 g_assert_not_reached (); // check this rule
1703 case OP_LOADU1_MEMBASE:
1704 case OP_LOADI1_MEMBASE:
1705 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1706 ins->inst_basereg == last_ins->inst_destbasereg &&
1707 ins->inst_offset == last_ins->inst_offset) {
1708 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1709 ins->sreg1 = last_ins->sreg1;
1712 case OP_LOADU2_MEMBASE:
1713 case OP_LOADI2_MEMBASE:
1714 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1715 ins->inst_basereg == last_ins->inst_destbasereg &&
1716 ins->inst_offset == last_ins->inst_offset) {
1717 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1718 ins->sreg1 = last_ins->sreg1;
1722 ins->opcode = OP_MOVE;
1726 if (ins->dreg == ins->sreg1) {
1727 MONO_DELETE_INS (bb, ins);
1731 * OP_MOVE sreg, dreg
1732 * OP_MOVE dreg, sreg
1734 if (last_ins && last_ins->opcode == OP_MOVE &&
1735 ins->sreg1 == last_ins->dreg &&
1736 ins->dreg == last_ins->sreg1) {
1737 MONO_DELETE_INS (bb, ins);
1745 bb->last_ins = last_ins;
1749 * the branch_cc_table should maintain the order of these
1763 branch_cc_table [] = {
1777 #define NEW_INS(cfg,dest,op) do { \
1778 MONO_INST_NEW ((cfg), (dest), (op)); \
1779 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1783 map_to_reg_reg_op (int op)
1792 case OP_COMPARE_IMM:
1794 case OP_ICOMPARE_IMM:
1808 case OP_LOAD_MEMBASE:
1809 return OP_LOAD_MEMINDEX;
1810 case OP_LOADI4_MEMBASE:
1811 return OP_LOADI4_MEMINDEX;
1812 case OP_LOADU4_MEMBASE:
1813 return OP_LOADU4_MEMINDEX;
1814 case OP_LOADU1_MEMBASE:
1815 return OP_LOADU1_MEMINDEX;
1816 case OP_LOADI2_MEMBASE:
1817 return OP_LOADI2_MEMINDEX;
1818 case OP_LOADU2_MEMBASE:
1819 return OP_LOADU2_MEMINDEX;
1820 case OP_LOADI1_MEMBASE:
1821 return OP_LOADI1_MEMINDEX;
1822 case OP_STOREI1_MEMBASE_REG:
1823 return OP_STOREI1_MEMINDEX;
1824 case OP_STOREI2_MEMBASE_REG:
1825 return OP_STOREI2_MEMINDEX;
1826 case OP_STOREI4_MEMBASE_REG:
1827 return OP_STOREI4_MEMINDEX;
1828 case OP_STORE_MEMBASE_REG:
1829 return OP_STORE_MEMINDEX;
1830 case OP_STORER4_MEMBASE_REG:
1831 return OP_STORER4_MEMINDEX;
1832 case OP_STORER8_MEMBASE_REG:
1833 return OP_STORER8_MEMINDEX;
1834 case OP_STORE_MEMBASE_IMM:
1835 return OP_STORE_MEMBASE_REG;
1836 case OP_STOREI1_MEMBASE_IMM:
1837 return OP_STOREI1_MEMBASE_REG;
1838 case OP_STOREI2_MEMBASE_IMM:
1839 return OP_STOREI2_MEMBASE_REG;
1840 case OP_STOREI4_MEMBASE_IMM:
1841 return OP_STOREI4_MEMBASE_REG;
1843 g_assert_not_reached ();
1847 * Remove from the instruction list the instructions that can't be
1848 * represented with very simple instructions with no register
1852 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1854 MonoInst *ins, *temp, *last_ins = NULL;
1855 int rot_amount, imm8, low_imm;
1857 /* setup the virtual reg allocator */
1858 if (bb->max_vreg > cfg->rs->next_vreg)
1859 cfg->rs->next_vreg = bb->max_vreg;
1861 MONO_BB_FOR_EACH_INS (bb, ins) {
1863 switch (ins->opcode) {
1867 case OP_COMPARE_IMM:
1868 case OP_ICOMPARE_IMM:
1882 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1883 NEW_INS (cfg, temp, OP_ICONST);
1884 temp->inst_c0 = ins->inst_imm;
1885 temp->dreg = mono_regstate_next_int (cfg->rs);
1886 ins->sreg2 = temp->dreg;
1888 ins->opcode = mono_op_imm_to_op (ins->opcode);
1890 ins->opcode = map_to_reg_reg_op (ins->opcode);
1895 if (ins->inst_imm == 1) {
1896 ins->opcode = OP_MOVE;
1899 if (ins->inst_imm == 0) {
1900 ins->opcode = OP_ICONST;
1904 imm8 = mono_is_power_of_two (ins->inst_imm);
1906 ins->opcode = OP_SHL_IMM;
1907 ins->inst_imm = imm8;
1910 NEW_INS (cfg, temp, OP_ICONST);
1911 temp->inst_c0 = ins->inst_imm;
1912 temp->dreg = mono_regstate_next_int (cfg->rs);
1913 ins->sreg2 = temp->dreg;
1914 ins->opcode = OP_IMUL;
1916 case OP_LOCALLOC_IMM:
1917 NEW_INS (cfg, temp, OP_ICONST);
1918 temp->inst_c0 = ins->inst_imm;
1919 temp->dreg = mono_regstate_next_int (cfg->rs);
1920 ins->sreg1 = temp->dreg;
1921 ins->opcode = OP_LOCALLOC;
1923 case OP_LOAD_MEMBASE:
1924 case OP_LOADI4_MEMBASE:
1925 case OP_LOADU4_MEMBASE:
1926 case OP_LOADU1_MEMBASE:
1927 /* we can do two things: load the immed in a register
1928 * and use an indexed load, or see if the immed can be
1929 * represented as an ad_imm + a load with a smaller offset
1930 * that fits. We just do the first for now, optimize later.
1932 if (arm_is_imm12 (ins->inst_offset))
1934 NEW_INS (cfg, temp, OP_ICONST);
1935 temp->inst_c0 = ins->inst_offset;
1936 temp->dreg = mono_regstate_next_int (cfg->rs);
1937 ins->sreg2 = temp->dreg;
1938 ins->opcode = map_to_reg_reg_op (ins->opcode);
1940 case OP_LOADI2_MEMBASE:
1941 case OP_LOADU2_MEMBASE:
1942 case OP_LOADI1_MEMBASE:
1943 if (arm_is_imm8 (ins->inst_offset))
1945 NEW_INS (cfg, temp, OP_ICONST);
1946 temp->inst_c0 = ins->inst_offset;
1947 temp->dreg = mono_regstate_next_int (cfg->rs);
1948 ins->sreg2 = temp->dreg;
1949 ins->opcode = map_to_reg_reg_op (ins->opcode);
1951 case OP_LOADR4_MEMBASE:
1952 case OP_LOADR8_MEMBASE:
1953 if (arm_is_fpimm8 (ins->inst_offset))
1955 low_imm = ins->inst_offset & 0x1ff;
1956 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1957 NEW_INS (cfg, temp, OP_ADD_IMM);
1958 temp->inst_imm = ins->inst_offset & ~0x1ff;
1959 temp->sreg1 = ins->inst_basereg;
1960 temp->dreg = mono_regstate_next_int (cfg->rs);
1961 ins->inst_basereg = temp->dreg;
1962 ins->inst_offset = low_imm;
1965 /* VFP/FPA doesn't have indexed load instructions */
1966 g_assert_not_reached ();
1968 case OP_STORE_MEMBASE_REG:
1969 case OP_STOREI4_MEMBASE_REG:
1970 case OP_STOREI1_MEMBASE_REG:
1971 if (arm_is_imm12 (ins->inst_offset))
1973 NEW_INS (cfg, temp, OP_ICONST);
1974 temp->inst_c0 = ins->inst_offset;
1975 temp->dreg = mono_regstate_next_int (cfg->rs);
1976 ins->sreg2 = temp->dreg;
1977 ins->opcode = map_to_reg_reg_op (ins->opcode);
1979 case OP_STOREI2_MEMBASE_REG:
1980 if (arm_is_imm8 (ins->inst_offset))
1982 NEW_INS (cfg, temp, OP_ICONST);
1983 temp->inst_c0 = ins->inst_offset;
1984 temp->dreg = mono_regstate_next_int (cfg->rs);
1985 ins->sreg2 = temp->dreg;
1986 ins->opcode = map_to_reg_reg_op (ins->opcode);
1988 case OP_STORER4_MEMBASE_REG:
1989 case OP_STORER8_MEMBASE_REG:
1990 if (arm_is_fpimm8 (ins->inst_offset))
1992 low_imm = ins->inst_offset & 0x1ff;
1993 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1994 NEW_INS (cfg, temp, OP_ADD_IMM);
1995 temp->inst_imm = ins->inst_offset & ~0x1ff;
1996 temp->sreg1 = ins->inst_destbasereg;
1997 temp->dreg = mono_regstate_next_int (cfg->rs);
1998 ins->inst_destbasereg = temp->dreg;
1999 ins->inst_offset = low_imm;
2002 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2003 /* VFP/FPA doesn't have indexed store instructions */
2004 g_assert_not_reached ();
2006 case OP_STORE_MEMBASE_IMM:
2007 case OP_STOREI1_MEMBASE_IMM:
2008 case OP_STOREI2_MEMBASE_IMM:
2009 case OP_STOREI4_MEMBASE_IMM:
2010 NEW_INS (cfg, temp, OP_ICONST);
2011 temp->inst_c0 = ins->inst_imm;
2012 temp->dreg = mono_regstate_next_int (cfg->rs);
2013 ins->sreg1 = temp->dreg;
2014 ins->opcode = map_to_reg_reg_op (ins->opcode);
2016 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2018 gboolean swap = FALSE;
2021 /* Some fp compares require swapped operands */
2022 g_assert (ins->next);
2023 switch (ins->next->opcode) {
2025 ins->next->opcode = OP_FBLT;
2029 ins->next->opcode = OP_FBLT_UN;
2033 ins->next->opcode = OP_FBGE;
2037 ins->next->opcode = OP_FBGE_UN;
2045 ins->sreg1 = ins->sreg2;
2054 bb->last_ins = last_ins;
2055 bb->max_vreg = cfg->rs->next_vreg;
2060 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2062 /* sreg is a float, dreg is an integer reg */
2064 ARM_FIXZ (code, dreg, sreg);
2065 #elif defined(ARM_FPU_VFP)
2067 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2069 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2070 ARM_FMRS (code, dreg, ARM_VFP_F0);
2074 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2075 else if (size == 2) {
2076 ARM_SHL_IMM (code, dreg, dreg, 16);
2077 ARM_SHR_IMM (code, dreg, dreg, 16);
2081 ARM_SHL_IMM (code, dreg, dreg, 24);
2082 ARM_SAR_IMM (code, dreg, dreg, 24);
2083 } else if (size == 2) {
2084 ARM_SHL_IMM (code, dreg, dreg, 16);
2085 ARM_SAR_IMM (code, dreg, dreg, 16);
2093 const guchar *target;
2098 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2101 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2102 PatchData *pdata = (PatchData*)user_data;
2103 guchar *code = data;
2104 guint32 *thunks = data;
2105 guint32 *endthunks = (guint32*)(code + bsize);
2107 int difflow, diffhigh;
2109 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2110 difflow = (char*)pdata->code - (char*)thunks;
2111 diffhigh = (char*)pdata->code - (char*)endthunks;
2112 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2116 * The thunk is composed of 3 words:
2117 * load constant from thunks [2] into ARM_IP
2120 * Note that the LR register is already setup
2122 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2123 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2124 while (thunks < endthunks) {
2125 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2126 if (thunks [2] == (guint32)pdata->target) {
2127 arm_patch (pdata->code, (guchar*)thunks);
2128 mono_arch_flush_icache (pdata->code, 4);
2131 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2132 /* found a free slot instead: emit thunk */
2133 /* ARMREG_IP is fine to use since this can't be an IMT call
2136 code = (guchar*)thunks;
2137 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2138 if (thumb_supported)
2139 ARM_BX (code, ARMREG_IP);
2141 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2142 thunks [2] = (guint32)pdata->target;
2143 mono_arch_flush_icache ((guchar*)thunks, 12);
2145 arm_patch (pdata->code, (guchar*)thunks);
2146 mono_arch_flush_icache (pdata->code, 4);
2150 /* skip 12 bytes, the size of the thunk */
2154 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2160 handle_thunk (int absolute, guchar *code, const guchar *target) {
2161 MonoDomain *domain = mono_domain_get ();
2165 pdata.target = target;
2166 pdata.absolute = absolute;
2169 mono_domain_lock (domain);
2170 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2173 /* this uses the first available slot */
2175 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2177 mono_domain_unlock (domain);
2179 if (pdata.found != 1)
2180 g_print ("thunk failed for %p from %p\n", target, code);
2181 g_assert (pdata.found == 1);
2185 arm_patch (guchar *code, const guchar *target)
2187 guint32 *code32 = (void*)code;
2188 guint32 ins = *code32;
2189 guint32 prim = (ins >> 25) & 7;
2190 guint32 tval = GPOINTER_TO_UINT (target);
2192 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2193 if (prim == 5) { /* 101b */
2194 /* the diff starts 8 bytes from the branch opcode */
2195 gint diff = target - code - 8;
2197 gint tmask = 0xffffffff;
2198 if (tval & 1) { /* entering thumb mode */
2199 diff = target - 1 - code - 8;
2200 g_assert (thumb_supported);
2201 tbits = 0xf << 28; /* bl->blx bit pattern */
2202 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2203 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2207 tmask = ~(1 << 24); /* clear the link bit */
2208 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2213 if (diff <= 33554431) {
2215 ins = (ins & 0xff000000) | diff;
2217 *code32 = ins | tbits;
2221 /* diff between 0 and -33554432 */
2222 if (diff >= -33554432) {
2224 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2226 *code32 = ins | tbits;
2231 handle_thunk (TRUE, code, target);
2236 * The alternative call sequences looks like this:
2238 * ldr ip, [pc] // loads the address constant
2239 * b 1f // jumps around the constant
2240 * address constant embedded in the code
2245 * There are two cases for patching:
2246 * a) at the end of method emission: in this case code points to the start
2247 * of the call sequence
2248 * b) during runtime patching of the call site: in this case code points
2249 * to the mov pc, ip instruction
2251 * We have to handle also the thunk jump code sequence:
2255 * address constant // execution never reaches here
2257 if ((ins & 0x0ffffff0) == 0x12fff10) {
2258 /* Branch and exchange: the address is constructed in a reg
2259 * We can patch BX when the code sequence is the following:
2260 * ldr ip, [pc, #0] ; 0x8
2267 guint8 *emit = (guint8*)ccode;
2268 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2270 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2271 ARM_BX (emit, ARMREG_IP);
2273 /*patching from magic trampoline*/
2274 if (ins == ccode [3]) {
2275 g_assert (code32 [-4] == ccode [0]);
2276 g_assert (code32 [-3] == ccode [1]);
2277 g_assert (code32 [-1] == ccode [2]);
2278 code32 [-2] = (guint32)target;
2281 /*patching from JIT*/
2282 if (ins == ccode [0]) {
2283 g_assert (code32 [1] == ccode [1]);
2284 g_assert (code32 [3] == ccode [2]);
2285 g_assert (code32 [4] == ccode [3]);
2286 code32 [2] = (guint32)target;
2289 g_assert_not_reached ();
2290 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2298 guint8 *emit = (guint8*)ccode;
2299 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2301 ARM_BLX_REG (emit, ARMREG_IP);
2303 g_assert (code32 [-3] == ccode [0]);
2304 g_assert (code32 [-2] == ccode [1]);
2305 g_assert (code32 [0] == ccode [2]);
2307 code32 [-1] = (guint32)target;
2310 guint32 *tmp = ccode;
2311 guint8 *emit = (guint8*)tmp;
2312 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2313 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2314 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2315 ARM_BX (emit, ARMREG_IP);
2316 if (ins == ccode [2]) {
2317 g_assert_not_reached (); // should be -2 ...
2318 code32 [-1] = (guint32)target;
2321 if (ins == ccode [0]) {
2322 /* handles both thunk jump code and the far call sequence */
2323 code32 [2] = (guint32)target;
2326 g_assert_not_reached ();
2328 // g_print ("patched with 0x%08x\n", ins);
2332 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2333 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2334 * to be used with the emit macros.
2335 * Return -1 otherwise.
2338 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2341 for (i = 0; i < 31; i+= 2) {
2342 res = (val << (32 - i)) | (val >> i);
2345 *rot_amount = i? 32 - i: 0;
2352 * Emits in code a sequence of instructions that load the value 'val'
2353 * into the dreg register. Uses at most 4 instructions.
2356 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2358 int imm8, rot_amount;
2360 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2361 /* skip the constant pool */
2367 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2368 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2369 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2370 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2373 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2375 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2377 if (val & 0xFF0000) {
2378 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2380 if (val & 0xFF000000) {
2381 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2383 } else if (val & 0xFF00) {
2384 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2385 if (val & 0xFF0000) {
2386 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2388 if (val & 0xFF000000) {
2389 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2391 } else if (val & 0xFF0000) {
2392 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2393 if (val & 0xFF000000) {
2394 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2397 //g_assert_not_reached ();
2403 * emit_load_volatile_arguments:
2405 * Load volatile arguments from the stack to the original input registers.
2406 * Required before a tail call.
2409 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2411 MonoMethod *method = cfg->method;
2412 MonoMethodSignature *sig;
2417 /* FIXME: Generate intermediate code instead */
2419 sig = mono_method_signature (method);
2421 /* This is the opposite of the code in emit_prolog */
2425 cinfo = calculate_sizes (sig, sig->pinvoke);
2427 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2428 ArgInfo *ainfo = &cinfo->ret;
2429 inst = cfg->vret_addr;
2430 g_assert (arm_is_imm12 (inst->inst_offset));
2431 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2433 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2434 ArgInfo *ainfo = cinfo->args + i;
2435 inst = cfg->args [pos];
2437 if (cfg->verbose_level > 2)
2438 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2439 if (inst->opcode == OP_REGVAR) {
2440 if (ainfo->regtype == RegTypeGeneral)
2441 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2442 else if (ainfo->regtype == RegTypeFP) {
2443 g_assert_not_reached ();
2444 } else if (ainfo->regtype == RegTypeBase) {
2448 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2449 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2451 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2452 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2456 g_assert_not_reached ();
2458 if (ainfo->regtype == RegTypeGeneral) {
2459 switch (ainfo->size) {
2466 g_assert (arm_is_imm12 (inst->inst_offset));
2467 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2468 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2469 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2472 if (arm_is_imm12 (inst->inst_offset)) {
2473 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2475 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2476 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2480 } else if (ainfo->regtype == RegTypeBaseGen) {
2483 } else if (ainfo->regtype == RegTypeBase) {
2486 } else if (ainfo->regtype == RegTypeFP) {
2487 g_assert_not_reached ();
2488 } else if (ainfo->regtype == RegTypeStructByVal) {
2489 int doffset = inst->inst_offset;
2493 if (mono_class_from_mono_type (inst->inst_vtype))
2494 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2495 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2496 if (arm_is_imm12 (doffset)) {
2497 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2499 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2500 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2502 soffset += sizeof (gpointer);
2503 doffset += sizeof (gpointer);
2508 } else if (ainfo->regtype == RegTypeStructByAddr) {
2523 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2528 guint8 *code = cfg->native_code + cfg->code_len;
2529 MonoInst *last_ins = NULL;
2530 guint last_offset = 0;
2532 int imm8, rot_amount;
2534 /* we don't align basic blocks of loops on arm */
2536 if (cfg->verbose_level > 2)
2537 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2539 cpos = bb->max_offset;
2541 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2542 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2543 //g_assert (!mono_compile_aot);
2546 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2547 /* this is not thread save, but good enough */
2548 /* fixme: howto handle overflows? */
2549 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2552 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2553 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2554 (gpointer)"mono_break");
2555 code = emit_call_seq (cfg, code);
2558 MONO_BB_FOR_EACH_INS (bb, ins) {
2559 offset = code - cfg->native_code;
2561 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2563 if (offset > (cfg->code_size - max_len - 16)) {
2564 cfg->code_size *= 2;
2565 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2566 code = cfg->native_code + offset;
2568 // if (ins->cil_code)
2569 // g_print ("cil code\n");
2570 mono_debug_record_line_number (cfg, ins, offset);
2572 switch (ins->opcode) {
2573 case OP_MEMORY_BARRIER:
2576 g_assert_not_reached ();
2579 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2580 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2583 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2584 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2586 case OP_STOREI1_MEMBASE_IMM:
2587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2588 g_assert (arm_is_imm12 (ins->inst_offset));
2589 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2591 case OP_STOREI2_MEMBASE_IMM:
2592 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2593 g_assert (arm_is_imm8 (ins->inst_offset));
2594 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2596 case OP_STORE_MEMBASE_IMM:
2597 case OP_STOREI4_MEMBASE_IMM:
2598 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2599 g_assert (arm_is_imm12 (ins->inst_offset));
2600 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2602 case OP_STOREI1_MEMBASE_REG:
2603 g_assert (arm_is_imm12 (ins->inst_offset));
2604 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2606 case OP_STOREI2_MEMBASE_REG:
2607 g_assert (arm_is_imm8 (ins->inst_offset));
2608 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2610 case OP_STORE_MEMBASE_REG:
2611 case OP_STOREI4_MEMBASE_REG:
2612 /* this case is special, since it happens for spill code after lowering has been called */
2613 if (arm_is_imm12 (ins->inst_offset)) {
2614 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2616 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2617 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2620 case OP_STOREI1_MEMINDEX:
2621 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2623 case OP_STOREI2_MEMINDEX:
2624 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2626 case OP_STORE_MEMINDEX:
2627 case OP_STOREI4_MEMINDEX:
2628 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2631 g_assert_not_reached ();
2633 case OP_LOAD_MEMINDEX:
2634 case OP_LOADI4_MEMINDEX:
2635 case OP_LOADU4_MEMINDEX:
2636 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2638 case OP_LOADI1_MEMINDEX:
2639 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2641 case OP_LOADU1_MEMINDEX:
2642 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2644 case OP_LOADI2_MEMINDEX:
2645 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2647 case OP_LOADU2_MEMINDEX:
2648 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2650 case OP_LOAD_MEMBASE:
2651 case OP_LOADI4_MEMBASE:
2652 case OP_LOADU4_MEMBASE:
2653 /* this case is special, since it happens for spill code after lowering has been called */
2654 if (arm_is_imm12 (ins->inst_offset)) {
2655 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2657 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2658 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2661 case OP_LOADI1_MEMBASE:
2662 g_assert (arm_is_imm8 (ins->inst_offset));
2663 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2665 case OP_LOADU1_MEMBASE:
2666 g_assert (arm_is_imm12 (ins->inst_offset));
2667 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2669 case OP_LOADU2_MEMBASE:
2670 g_assert (arm_is_imm8 (ins->inst_offset));
2671 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2673 case OP_LOADI2_MEMBASE:
2674 g_assert (arm_is_imm8 (ins->inst_offset));
2675 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2677 case OP_ICONV_TO_I1:
2678 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2679 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2681 case OP_ICONV_TO_I2:
2682 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2683 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2685 case OP_ICONV_TO_U1:
2686 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2688 case OP_ICONV_TO_U2:
2689 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2690 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2694 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2696 case OP_COMPARE_IMM:
2697 case OP_ICOMPARE_IMM:
2698 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2699 g_assert (imm8 >= 0);
2700 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2704 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2705 * So instead of emitting a trap, we emit a call a C function and place a
2708 //*(int*)code = 0xef9f0001;
2711 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2712 (gpointer)"mono_break");
2713 code = emit_call_seq (cfg, code);
2717 case OP_DUMMY_STORE:
2718 case OP_NOT_REACHED:
2723 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2726 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2730 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2733 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2734 g_assert (imm8 >= 0);
2735 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2739 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2740 g_assert (imm8 >= 0);
2741 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2745 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2746 g_assert (imm8 >= 0);
2747 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2750 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2751 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2753 case OP_IADD_OVF_UN:
2754 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2755 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2758 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2759 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2761 case OP_ISUB_OVF_UN:
2762 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2763 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2765 case OP_ADD_OVF_CARRY:
2766 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2767 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2769 case OP_ADD_OVF_UN_CARRY:
2770 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2771 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2773 case OP_SUB_OVF_CARRY:
2774 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2775 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2777 case OP_SUB_OVF_UN_CARRY:
2778 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2779 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2783 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2786 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2787 g_assert (imm8 >= 0);
2788 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2791 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2795 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2799 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2800 g_assert (imm8 >= 0);
2801 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2805 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2806 g_assert (imm8 >= 0);
2807 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2809 case OP_ARM_RSBS_IMM:
2810 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2811 g_assert (imm8 >= 0);
2812 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2814 case OP_ARM_RSC_IMM:
2815 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2816 g_assert (imm8 >= 0);
2817 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2820 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2824 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2825 g_assert (imm8 >= 0);
2826 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2834 /* crappy ARM arch doesn't have a DIV instruction */
2835 g_assert_not_reached ();
2837 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2841 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2842 g_assert (imm8 >= 0);
2843 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2846 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2850 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2851 g_assert (imm8 >= 0);
2852 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2855 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2860 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2861 else if (ins->dreg != ins->sreg1)
2862 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2865 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2870 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2871 else if (ins->dreg != ins->sreg1)
2872 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2875 case OP_ISHR_UN_IMM:
2877 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2878 else if (ins->dreg != ins->sreg1)
2879 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2882 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2885 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2888 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2891 if (ins->dreg == ins->sreg2)
2892 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2894 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2897 g_assert_not_reached ();
2900 /* FIXME: handle ovf/ sreg2 != dreg */
2901 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2903 case OP_IMUL_OVF_UN:
2904 /* FIXME: handle ovf/ sreg2 != dreg */
2905 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2908 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2911 /* Load the GOT offset */
2912 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2913 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2915 *(gpointer*)code = NULL;
2917 /* Load the value from the GOT */
2918 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2920 case OP_ICONV_TO_I4:
2921 case OP_ICONV_TO_U4:
2923 if (ins->dreg != ins->sreg1)
2924 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2927 int saved = ins->sreg2;
2928 if (ins->sreg2 == ARM_LSW_REG) {
2929 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2932 if (ins->sreg1 != ARM_LSW_REG)
2933 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2934 if (saved != ARM_MSW_REG)
2935 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2940 ARM_MVFD (code, ins->dreg, ins->sreg1);
2941 #elif defined(ARM_FPU_VFP)
2942 ARM_CPYD (code, ins->dreg, ins->sreg1);
2945 case OP_FCONV_TO_R4:
2947 ARM_MVFS (code, ins->dreg, ins->sreg1);
2948 #elif defined(ARM_FPU_VFP)
2949 ARM_CVTD (code, ins->dreg, ins->sreg1);
2950 ARM_CVTS (code, ins->dreg, ins->dreg);
2955 * Keep in sync with mono_arch_emit_epilog
2957 g_assert (!cfg->method->save_lmf);
2959 code = emit_load_volatile_arguments (cfg, code);
2961 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2962 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2963 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2967 /* ensure ins->sreg1 is not NULL */
2968 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2972 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2973 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2975 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2976 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2978 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2988 call = (MonoCallInst*)ins;
2989 if (ins->flags & MONO_INST_HAS_METHOD)
2990 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2992 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2993 code = emit_call_seq (cfg, code);
2994 code = emit_move_return_value (cfg, ins, code);
3000 case OP_VOIDCALL_REG:
3002 code = emit_call_reg (code, ins->sreg1);
3003 code = emit_move_return_value (cfg, ins, code);
3005 case OP_FCALL_MEMBASE:
3006 case OP_LCALL_MEMBASE:
3007 case OP_VCALL_MEMBASE:
3008 case OP_VCALL2_MEMBASE:
3009 case OP_VOIDCALL_MEMBASE:
3010 case OP_CALL_MEMBASE:
3011 g_assert (arm_is_imm12 (ins->inst_offset));
3012 g_assert (ins->sreg1 != ARMREG_LR);
3013 call = (MonoCallInst*)ins;
3014 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3015 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3016 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3017 if (cfg->compile_aot) {
3019 * We can't embed the method in the code stream in PIC code. Instead,
3020 * we put it in V5 in code emitted by mono_arch_emit_imt_argument (),
3021 * and embed NULL here to signal the IMT thunk that the call is made
3024 *((gpointer*)code) = NULL;
3026 *((gpointer*)code) = (gpointer)call->method;
3030 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3031 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3033 code = emit_move_return_value (cfg, ins, code);
3036 g_assert_not_reached ();
3039 /* keep alignment */
3040 int alloca_waste = cfg->param_area;
3043 /* round the size to 8 bytes */
3044 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3045 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3047 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3048 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3049 /* memzero the area: dreg holds the size, sp is the pointer */
3050 if (ins->flags & MONO_INST_INIT) {
3051 guint8 *start_loop, *branch_to_cond;
3052 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3053 branch_to_cond = code;
3056 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3057 arm_patch (branch_to_cond, code);
3058 /* decrement by 4 and set flags */
3059 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3060 ARM_B_COND (code, ARMCOND_GE, 0);
3061 arm_patch (code - 4, start_loop);
3063 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3067 if (ins->sreg1 != ARMREG_R0)
3068 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3069 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3070 (gpointer)"mono_arch_throw_exception");
3071 code = emit_call_seq (cfg, code);
3075 if (ins->sreg1 != ARMREG_R0)
3076 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3077 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3078 (gpointer)"mono_arch_rethrow_exception");
3079 code = emit_call_seq (cfg, code);
3082 case OP_START_HANDLER: {
3083 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3085 if (arm_is_imm12 (spvar->inst_offset)) {
3086 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3088 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3089 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3093 case OP_ENDFILTER: {
3094 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3096 if (ins->sreg1 != ARMREG_R0)
3097 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3098 if (arm_is_imm12 (spvar->inst_offset)) {
3099 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3101 g_assert (ARMREG_IP != spvar->inst_basereg);
3102 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3103 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3105 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3108 case OP_ENDFINALLY: {
3109 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3111 if (arm_is_imm12 (spvar->inst_offset)) {
3112 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3114 g_assert (ARMREG_IP != spvar->inst_basereg);
3115 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3116 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3118 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3121 case OP_CALL_HANDLER:
3122 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3126 ins->inst_c0 = code - cfg->native_code;
3129 if (ins->flags & MONO_INST_BRLABEL) {
3130 /*if (ins->inst_i0->inst_c0) {
3132 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3134 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3138 /*if (ins->inst_target_bb->native_offset) {
3140 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3142 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3148 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3152 * In the normal case we have:
3153 * ldr pc, [pc, ins->sreg1 << 2]
3156 * ldr lr, [pc, ins->sreg1 << 2]
3158 * After follows the data.
3159 * FIXME: add aot support.
3162 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3163 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3164 if (offset > (cfg->code_size - max_len - 16)) {
3165 cfg->code_size += max_len;
3166 cfg->code_size *= 2;
3167 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3168 code = cfg->native_code + offset;
3170 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3172 code += 4 * GPOINTER_TO_INT (ins->klass);
3176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3181 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3186 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3191 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3192 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3196 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3199 case OP_COND_EXC_EQ:
3200 case OP_COND_EXC_NE_UN:
3201 case OP_COND_EXC_LT:
3202 case OP_COND_EXC_LT_UN:
3203 case OP_COND_EXC_GT:
3204 case OP_COND_EXC_GT_UN:
3205 case OP_COND_EXC_GE:
3206 case OP_COND_EXC_GE_UN:
3207 case OP_COND_EXC_LE:
3208 case OP_COND_EXC_LE_UN:
3209 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3211 case OP_COND_EXC_IEQ:
3212 case OP_COND_EXC_INE_UN:
3213 case OP_COND_EXC_ILT:
3214 case OP_COND_EXC_ILT_UN:
3215 case OP_COND_EXC_IGT:
3216 case OP_COND_EXC_IGT_UN:
3217 case OP_COND_EXC_IGE:
3218 case OP_COND_EXC_IGE_UN:
3219 case OP_COND_EXC_ILE:
3220 case OP_COND_EXC_ILE_UN:
3221 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3224 case OP_COND_EXC_OV:
3225 case OP_COND_EXC_NC:
3226 case OP_COND_EXC_NO:
3227 case OP_COND_EXC_IC:
3228 case OP_COND_EXC_IOV:
3229 case OP_COND_EXC_INC:
3230 case OP_COND_EXC_INO:
3243 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3246 /* floating point opcodes */
3249 if (cfg->compile_aot) {
3250 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3252 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3254 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3257 /* FIXME: we can optimize the imm load by dealing with part of
3258 * the displacement in LDFD (aligning to 512).
3260 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3261 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3265 if (cfg->compile_aot) {
3266 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3268 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3271 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3272 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3275 case OP_STORER8_MEMBASE_REG:
3276 /* This is generated by the local regalloc pass which runs after the lowering pass */
3277 if (!arm_is_fpimm8 (ins->inst_offset)) {
3278 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3279 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3281 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3284 case OP_LOADR8_MEMBASE:
3285 /* This is generated by the local regalloc pass which runs after the lowering pass */
3286 if (!arm_is_fpimm8 (ins->inst_offset)) {
3287 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3288 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3290 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3293 case OP_STORER4_MEMBASE_REG:
3294 g_assert (arm_is_fpimm8 (ins->inst_offset));
3295 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3297 case OP_LOADR4_MEMBASE:
3298 g_assert (arm_is_fpimm8 (ins->inst_offset));
3299 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3301 case OP_ICONV_TO_R_UN: {
3303 tmpreg = ins->dreg == 0? 1: 0;
3304 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3305 ARM_FLTD (code, ins->dreg, ins->sreg1);
3306 ARM_B_COND (code, ARMCOND_GE, 8);
3307 /* save the temp register */
3308 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3309 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3310 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3311 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3312 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3313 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3314 /* skip the constant pool */
3317 *(int*)code = 0x41f00000;
3322 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3323 * adfltd fdest, fdest, ftemp
3327 case OP_ICONV_TO_R4:
3328 ARM_FLTS (code, ins->dreg, ins->sreg1);
3330 case OP_ICONV_TO_R8:
3331 ARM_FLTD (code, ins->dreg, ins->sreg1);
3333 #elif defined(ARM_FPU_VFP)
3335 if (cfg->compile_aot) {
3336 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3338 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3340 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3343 /* FIXME: we can optimize the imm load by dealing with part of
3344 * the displacement in LDFD (aligning to 512).
3346 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3347 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3351 if (cfg->compile_aot) {
3352 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3354 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3356 ARM_CVTS (code, ins->dreg, ins->dreg);
3358 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3359 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3360 ARM_CVTS (code, ins->dreg, ins->dreg);
3363 case OP_STORER8_MEMBASE_REG:
3364 g_assert (arm_is_fpimm8 (ins->inst_offset));
3365 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3367 case OP_LOADR8_MEMBASE:
3368 g_assert (arm_is_fpimm8 (ins->inst_offset));
3369 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3371 case OP_STORER4_MEMBASE_REG:
3372 g_assert (arm_is_fpimm8 (ins->inst_offset));
3373 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3375 case OP_LOADR4_MEMBASE:
3376 g_assert (arm_is_fpimm8 (ins->inst_offset));
3377 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3379 case OP_ICONV_TO_R_UN: {
3380 g_assert_not_reached ();
3383 case OP_ICONV_TO_R4:
3384 g_assert_not_reached ();
3385 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3387 case OP_ICONV_TO_R8:
3388 g_assert_not_reached ();
3389 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3392 case OP_FCONV_TO_I1:
3393 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3395 case OP_FCONV_TO_U1:
3396 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3398 case OP_FCONV_TO_I2:
3399 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3401 case OP_FCONV_TO_U2:
3402 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3404 case OP_FCONV_TO_I4:
3406 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3408 case OP_FCONV_TO_U4:
3410 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3412 case OP_FCONV_TO_I8:
3413 case OP_FCONV_TO_U8:
3414 g_assert_not_reached ();
3415 /* Implemented as helper calls */
3417 case OP_LCONV_TO_R_UN:
3418 g_assert_not_reached ();
3419 /* Implemented as helper calls */
3421 case OP_LCONV_TO_OVF_I:
3422 case OP_LCONV_TO_OVF_I4_2: {
3424 guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
3425 // Check if its negative
3426 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
3427 negative_branch = code;
3428 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
3429 // Its positive msword == 0
3430 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
3431 msword_positive_branch = code;
3432 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
3434 ovf_ex_target = code;
3435 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
3437 ppc_patch (negative_branch, code);
3438 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
3439 msword_negative_branch = code;
3440 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
3441 ppc_patch (msword_negative_branch, ovf_ex_target);
3443 ppc_patch (msword_positive_branch, code);
3444 if (ins->dreg != ins->sreg1)
3445 ppc_mr (code, ins->dreg, ins->sreg1);
3447 if (ins->dreg != ins->sreg1)
3448 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3453 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3456 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3459 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3462 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3465 ARM_MNFD (code, ins->dreg, ins->sreg1);
3467 #elif defined(ARM_FPU_VFP)
3469 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3472 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3475 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3481 ARM_NEGD (code, ins->dreg, ins->sreg1);
3486 g_assert_not_reached ();
3490 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3491 #elif defined(ARM_FPU_VFP)
3492 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3497 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3498 #elif defined(ARM_FPU_VFP)
3499 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3501 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3502 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3506 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3507 #elif defined(ARM_FPU_VFP)
3508 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3510 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3511 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3515 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3516 #elif defined(ARM_FPU_VFP)
3517 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3519 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3520 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3521 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3526 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3527 #elif defined(ARM_FPU_VFP)
3528 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3530 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3531 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3536 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3537 #elif defined(ARM_FPU_VFP)
3538 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3540 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3541 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3542 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3544 /* ARM FPA flags table:
3545 * N Less than ARMCOND_MI
3546 * Z Equal ARMCOND_EQ
3547 * C Greater Than or Equal ARMCOND_CS
3548 * V Unordered ARMCOND_VS
3551 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3554 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3557 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3560 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3561 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3567 g_assert_not_reached ();
3570 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3573 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3574 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3579 if (ins->dreg != ins->sreg1)
3580 ARM_MVFD (code, ins->dreg, ins->sreg1);
3582 g_assert_not_reached ();
3587 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3588 g_assert_not_reached ();
3591 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3592 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3593 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3594 g_assert_not_reached ();
3600 last_offset = offset;
3603 cfg->code_len = code - cfg->native_code;
3607 mono_arch_register_lowlevel_calls (void)
3611 #define patch_lis_ori(ip,val) do {\
3612 guint16 *__lis_ori = (guint16*)(ip); \
3613 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3614 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3618 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3620 MonoJumpInfo *patch_info;
3621 gboolean compile_aot = !run_cctors;
3623 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3624 unsigned char *ip = patch_info->ip.i + code;
3625 const unsigned char *target;
3627 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3628 gpointer *jt = (gpointer*)(ip + 8);
3630 /* jt is the inlined jump table, 2 instructions after ip
3631 * In the normal case we store the absolute addresses,
3632 * otherwise the displacements.
3634 for (i = 0; i < patch_info->data.table->table_size; i++)
3635 jt [i] = code + (int)patch_info->data.table->table [i];
3638 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3641 switch (patch_info->type) {
3642 case MONO_PATCH_INFO_BB:
3643 case MONO_PATCH_INFO_LABEL:
3646 /* No need to patch these */
3651 switch (patch_info->type) {
3652 case MONO_PATCH_INFO_IP:
3653 g_assert_not_reached ();
3654 patch_lis_ori (ip, ip);
3656 case MONO_PATCH_INFO_METHOD_REL:
3657 g_assert_not_reached ();
3658 *((gpointer *)(ip)) = code + patch_info->data.offset;
3660 case MONO_PATCH_INFO_METHODCONST:
3661 case MONO_PATCH_INFO_CLASS:
3662 case MONO_PATCH_INFO_IMAGE:
3663 case MONO_PATCH_INFO_FIELD:
3664 case MONO_PATCH_INFO_VTABLE:
3665 case MONO_PATCH_INFO_IID:
3666 case MONO_PATCH_INFO_SFLDA:
3667 case MONO_PATCH_INFO_LDSTR:
3668 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3669 case MONO_PATCH_INFO_LDTOKEN:
3670 g_assert_not_reached ();
3671 /* from OP_AOTCONST : lis + ori */
3672 patch_lis_ori (ip, target);
3674 case MONO_PATCH_INFO_R4:
3675 case MONO_PATCH_INFO_R8:
3676 g_assert_not_reached ();
3677 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3679 case MONO_PATCH_INFO_EXC_NAME:
3680 g_assert_not_reached ();
3681 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3683 case MONO_PATCH_INFO_NONE:
3684 case MONO_PATCH_INFO_BB_OVF:
3685 case MONO_PATCH_INFO_EXC_OVF:
3686 /* everything is dealt with at epilog output time */
3691 arm_patch (ip, target);
3696 * Stack frame layout:
3698 * ------------------- fp
3699 * MonoLMF structure or saved registers
3700 * -------------------
3702 * -------------------
3704 * -------------------
3705 * optional 8 bytes for tracing
3706 * -------------------
3707 * param area size is cfg->param_area
3708 * ------------------- sp
3711 mono_arch_emit_prolog (MonoCompile *cfg)
3713 MonoMethod *method = cfg->method;
3715 MonoMethodSignature *sig;
3717 int alloc_size, pos, max_offset, i, rot_amount;
3724 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3727 sig = mono_method_signature (method);
3728 cfg->code_size = 256 + sig->param_count * 20;
3729 code = cfg->native_code = g_malloc (cfg->code_size);
3731 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3733 alloc_size = cfg->stack_offset;
3736 if (!method->save_lmf) {
3737 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3738 prev_sp_offset = 8; /* ip and lr */
3739 for (i = 0; i < 16; ++i) {
3740 if (cfg->used_int_regs & (1 << i))
3741 prev_sp_offset += 4;
3744 ARM_PUSH (code, 0x5ff0);
3745 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3746 pos += sizeof (MonoLMF) - prev_sp_offset;
3750 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3751 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3752 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3753 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3756 /* the stack used in the pushed regs */
3757 if (prev_sp_offset & 4)
3759 cfg->stack_usage = alloc_size;
3761 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3762 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3764 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3765 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3768 if (cfg->frame_reg != ARMREG_SP)
3769 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3770 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3771 prev_sp_offset += alloc_size;
3773 /* compute max_offset in order to use short forward jumps
3774 * we could skip do it on arm because the immediate displacement
3775 * for jumps is large enough, it may be useful later for constant pools
3778 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3779 MonoInst *ins = bb->code;
3780 bb->max_offset = max_offset;
3782 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3785 MONO_BB_FOR_EACH_INS (bb, ins)
3786 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3789 /* load arguments allocated to register from the stack */
3792 cinfo = calculate_sizes (sig, sig->pinvoke);
3794 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3795 ArgInfo *ainfo = &cinfo->ret;
3796 inst = cfg->vret_addr;
3797 g_assert (arm_is_imm12 (inst->inst_offset));
3798 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3800 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3801 ArgInfo *ainfo = cinfo->args + i;
3802 inst = cfg->args [pos];
3804 if (cfg->verbose_level > 2)
3805 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3806 if (inst->opcode == OP_REGVAR) {
3807 if (ainfo->regtype == RegTypeGeneral)
3808 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3809 else if (ainfo->regtype == RegTypeFP) {
3810 g_assert_not_reached ();
3811 } else if (ainfo->regtype == RegTypeBase) {
3812 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3813 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3815 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3816 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3819 g_assert_not_reached ();
3821 if (cfg->verbose_level > 2)
3822 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3824 /* the argument should be put on the stack: FIXME handle size != word */
3825 if (ainfo->regtype == RegTypeGeneral) {
3826 switch (ainfo->size) {
3828 if (arm_is_imm12 (inst->inst_offset))
3829 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3831 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3832 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3836 if (arm_is_imm8 (inst->inst_offset)) {
3837 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3839 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3840 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3844 g_assert (arm_is_imm12 (inst->inst_offset));
3845 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3846 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3847 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3850 if (arm_is_imm12 (inst->inst_offset)) {
3851 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3853 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3854 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3858 } else if (ainfo->regtype == RegTypeBaseGen) {
3859 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3860 g_assert (arm_is_imm12 (inst->inst_offset));
3861 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3862 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3863 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3864 } else if (ainfo->regtype == RegTypeBase) {
3865 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3866 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3868 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3869 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3872 switch (ainfo->size) {
3874 if (arm_is_imm8 (inst->inst_offset)) {
3875 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3877 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3878 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3882 if (arm_is_imm8 (inst->inst_offset)) {
3883 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3885 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3886 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3890 if (arm_is_imm12 (inst->inst_offset)) {
3891 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3893 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3894 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3896 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3897 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3899 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3900 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3902 if (arm_is_imm12 (inst->inst_offset + 4)) {
3903 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3905 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3906 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3910 if (arm_is_imm12 (inst->inst_offset)) {
3911 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3913 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3914 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3918 } else if (ainfo->regtype == RegTypeFP) {
3919 g_assert_not_reached ();
3920 } else if (ainfo->regtype == RegTypeStructByVal) {
3921 int doffset = inst->inst_offset;
3925 if (mono_class_from_mono_type (inst->inst_vtype))
3926 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3927 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3928 if (arm_is_imm12 (doffset)) {
3929 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3931 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3932 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3934 soffset += sizeof (gpointer);
3935 doffset += sizeof (gpointer);
3937 if (ainfo->vtsize) {
3938 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3939 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3940 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3942 } else if (ainfo->regtype == RegTypeStructByAddr) {
3943 g_assert_not_reached ();
3944 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3945 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3947 g_assert_not_reached ();
3952 if (method->save_lmf) {
3954 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3955 (gpointer)"mono_get_lmf_addr");
3956 code = emit_call_seq (cfg, code);
3957 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3958 /* lmf_offset is the offset from the previous stack pointer,
3959 * alloc_size is the total stack space allocated, so the offset
3960 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3961 * The pointer to the struct is put in r1 (new_lmf).
3962 * r2 is used as scratch
3963 * The callee-saved registers are already in the MonoLMF structure
3965 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3966 /* r0 is the result from mono_get_lmf_addr () */
3967 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3968 /* new_lmf->previous_lmf = *lmf_addr */
3969 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3970 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3971 /* *(lmf_addr) = r1 */
3972 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3973 /* Skip method (only needed for trampoline LMF frames) */
3974 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3975 /* save the current IP */
3976 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3977 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3981 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3983 cfg->code_len = code - cfg->native_code;
3984 g_assert (cfg->code_len < cfg->code_size);
3991 mono_arch_emit_epilog (MonoCompile *cfg)
3993 MonoMethod *method = cfg->method;
3994 int pos, i, rot_amount;
3995 int max_epilog_size = 16 + 20*4;
3998 if (cfg->method->save_lmf)
3999 max_epilog_size += 128;
4001 if (mono_jit_trace_calls != NULL)
4002 max_epilog_size += 50;
4004 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4005 max_epilog_size += 50;
4007 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4008 cfg->code_size *= 2;
4009 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4010 mono_jit_stats.code_reallocs++;
4014 * Keep in sync with OP_JMP
4016 code = cfg->native_code + cfg->code_len;
4018 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4019 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4023 if (method->save_lmf) {
4025 /* all but r0-r3, sp and pc */
4026 pos += sizeof (MonoLMF) - (4 * 10);
4028 /* r2 contains the pointer to the current LMF */
4029 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4030 /* ip = previous_lmf */
4031 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4033 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4034 /* *(lmf_addr) = previous_lmf */
4035 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4036 /* FIXME: speedup: there is no actual need to restore the registers if
4037 * we didn't actually change them (idea from Zoltan).
4040 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4041 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4042 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4044 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4045 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4047 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4048 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4050 /* FIXME: add v4 thumb interworking support */
4051 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4054 cfg->code_len = code - cfg->native_code;
4056 g_assert (cfg->code_len < cfg->code_size);
4060 /* remove once throw_exception_by_name is eliminated */
4062 exception_id_by_name (const char *name)
4064 if (strcmp (name, "IndexOutOfRangeException") == 0)
4065 return MONO_EXC_INDEX_OUT_OF_RANGE;
4066 if (strcmp (name, "OverflowException") == 0)
4067 return MONO_EXC_OVERFLOW;
4068 if (strcmp (name, "ArithmeticException") == 0)
4069 return MONO_EXC_ARITHMETIC;
4070 if (strcmp (name, "DivideByZeroException") == 0)
4071 return MONO_EXC_DIVIDE_BY_ZERO;
4072 if (strcmp (name, "InvalidCastException") == 0)
4073 return MONO_EXC_INVALID_CAST;
4074 if (strcmp (name, "NullReferenceException") == 0)
4075 return MONO_EXC_NULL_REF;
4076 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4077 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4078 g_error ("Unknown intrinsic exception %s\n", name);
4083 mono_arch_emit_exceptions (MonoCompile *cfg)
4085 MonoJumpInfo *patch_info;
4088 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4089 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4090 int max_epilog_size = 50;
4092 /* count the number of exception infos */
4095 * make sure we have enough space for exceptions
4097 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4098 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4099 i = exception_id_by_name (patch_info->data.target);
4100 if (!exc_throw_found [i]) {
4101 max_epilog_size += 32;
4102 exc_throw_found [i] = TRUE;
4107 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4108 cfg->code_size *= 2;
4109 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4110 mono_jit_stats.code_reallocs++;
4113 code = cfg->native_code + cfg->code_len;
4115 /* add code to raise exceptions */
4116 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4117 switch (patch_info->type) {
4118 case MONO_PATCH_INFO_EXC: {
4119 MonoClass *exc_class;
4120 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4122 i = exception_id_by_name (patch_info->data.target);
4123 if (exc_throw_pos [i]) {
4124 arm_patch (ip, exc_throw_pos [i]);
4125 patch_info->type = MONO_PATCH_INFO_NONE;
4128 exc_throw_pos [i] = code;
4130 arm_patch (ip, code);
4132 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4133 g_assert (exc_class);
4135 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4136 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4137 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4138 patch_info->data.name = "mono_arch_throw_corlib_exception";
4139 patch_info->ip.i = code - cfg->native_code;
4141 *(guint32*)(gpointer)code = exc_class->type_token;
4151 cfg->code_len = code - cfg->native_code;
4153 g_assert (cfg->code_len < cfg->code_size);
4158 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4163 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4168 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4171 int this_dreg = ARMREG_R0;
4174 this_dreg = ARMREG_R1;
4176 /* add the this argument */
4177 if (this_reg != -1) {
4179 MONO_INST_NEW (cfg, this, OP_MOVE);
4180 this->type = this_type;
4181 this->sreg1 = this_reg;
4182 this->dreg = mono_regstate_next_int (cfg->rs);
4183 mono_bblock_add_inst (cfg->cbb, this);
4184 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
4189 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4190 vtarg->type = STACK_MP;
4191 vtarg->sreg1 = vt_reg;
4192 vtarg->dreg = mono_regstate_next_int (cfg->rs);
4193 mono_bblock_add_inst (cfg->cbb, vtarg);
4194 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
4199 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4205 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4212 mono_arch_print_tree (MonoInst *tree, int arity)
4217 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4223 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4229 mono_arch_get_patch_offset (guint8 *code)
4236 mono_arch_flush_register_windows (void)
4241 mono_arch_fixup_jinfo (MonoCompile *cfg)
4243 /* max encoded stack usage is 64KB * 4 */
4244 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4245 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4248 #ifdef MONO_ARCH_HAVE_IMT
4251 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
4253 if (cfg->compile_aot) {
4254 int method_reg = mono_regstate_next_int (cfg->rs);
4257 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4258 ins->dreg = method_reg;
4259 ins->inst_p0 = call->method;
4260 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4261 MONO_ADD_INS (cfg->cbb, ins);
4263 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4268 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4270 guint32 *code_ptr = (guint32*)code;
4272 /* The IMT value is stored in the code stream right after the LDC instruction. */
4273 if (!IS_LDR_PC (code_ptr [0])) {
4274 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4275 g_assert (IS_LDR_PC (code_ptr [0]));
4277 if (code_ptr [1] == 0)
4278 /* This is AOTed code, the IMT method is in V5 */
4279 return (MonoMethod*)regs [ARMREG_V5];
4281 return (MonoMethod*) code_ptr [1];
4285 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4287 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4291 #define ENABLE_WRONG_METHOD_CHECK 0
4292 #define BASE_SIZE (6 * 4)
4293 #define BSEARCH_ENTRY_SIZE (4 * 4)
4294 #define CMP_SIZE (3 * 4)
4295 #define BRANCH_SIZE (1 * 4)
4296 #define CALL_SIZE (2 * 4)
4297 #define WMC_SIZE (5 * 4)
4298 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4301 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4303 guint32 delta = DISTANCE (target, code);
4305 g_assert (delta >= 0 && delta <= 0xFFF);
4306 *target = *target | delta;
4312 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
4314 int size, i, extra_space = 0;
4315 arminstr_t *code, *start, *vtable_target = NULL;
4318 for (i = 0; i < count; ++i) {
4319 MonoIMTCheckItem *item = imt_entries [i];
4320 if (item->is_equals) {
4321 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
4323 if (item->check_target_idx) {
4324 if (!item->compare_done)
4325 item->chunk_size += CMP_SIZE;
4326 item->chunk_size += BRANCH_SIZE;
4328 #if ENABLE_WRONG_METHOD_CHECK
4329 item->chunk_size += WMC_SIZE;
4332 item->chunk_size += CALL_SIZE;
4334 item->chunk_size += BSEARCH_ENTRY_SIZE;
4335 imt_entries [item->check_target_idx]->compare_done = TRUE;
4337 size += item->chunk_size;
4340 start = code = mono_code_manager_reserve (domain->code_mp, size);
4343 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4344 for (i = 0; i < count; ++i) {
4345 MonoIMTCheckItem *item = imt_entries [i];
4346 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
4350 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4351 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4352 vtable_target = code;
4353 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4355 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4356 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4357 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4359 for (i = 0; i < count; ++i) {
4360 MonoIMTCheckItem *item = imt_entries [i];
4361 arminstr_t *imt_method = NULL;
4362 item->code_target = (guint8*)code;
4364 if (item->is_equals) {
4365 if (item->check_target_idx) {
4366 if (!item->compare_done) {
4368 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4369 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4371 item->jmp_code = (guint8*)code;
4372 ARM_B_COND (code, ARMCOND_NE, 0);
4374 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4375 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4377 /*Enable the commented code to assert on wrong method*/
4378 #if ENABLE_WRONG_METHOD_CHECK
4380 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4381 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4382 ARM_B_COND (code, ARMCOND_NE, 1);
4384 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4385 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4387 #if ENABLE_WRONG_METHOD_CHECK
4393 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
4395 /*must emit after unconditional branch*/
4396 if (vtable_target) {
4397 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4398 item->chunk_size += 4;
4399 vtable_target = NULL;
4402 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4404 code += extra_space;
4408 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4409 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4411 item->jmp_code = (guint8*)code;
4412 ARM_B_COND (code, ARMCOND_GE, 0);
4417 for (i = 0; i < count; ++i) {
4418 MonoIMTCheckItem *item = imt_entries [i];
4419 if (item->jmp_code) {
4420 if (item->check_target_idx)
4421 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4423 if (i > 0 && item->is_equals) {
4425 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4426 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4427 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
4434 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4435 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4440 mono_arch_flush_icache ((guint8*)start, size);
4441 mono_stats.imt_thunks_size += code - start;
4443 g_assert (DISTANCE (start, code) <= size);
4450 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4452 /* FIXME: implement */
4453 g_assert_not_reached ();