2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
36 * floating point support: on ARM it is a mess, there are at least 3
37 * different setups, each of which binary incompat with the other.
38 * 1) FPA: old and ugly, but unfortunately what current distros use
39 * the double binary format has the two words swapped. 8 double registers.
40 * Implemented usually by kernel emulation.
41 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
42 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
43 * 3) VFP: the new and actually sensible and useful FP support. Implemented
44 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
46 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
48 int mono_exc_esp_offset = 0;
50 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
51 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
52 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
54 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
55 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
56 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
58 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
59 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
62 void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
65 mono_arch_regname (int reg)
67 static const char * rnames[] = {
68 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
69 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
70 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
73 if (reg >= 0 && reg < 16)
79 mono_arch_fregname (int reg)
81 static const char * rnames[] = {
82 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
83 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
84 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
85 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
86 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
87 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
90 if (reg >= 0 && reg < 32)
96 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
99 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
100 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
103 g_assert (dreg != sreg);
104 code = mono_arm_emit_load_imm (code, dreg, imm);
105 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
110 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
112 /* we can use r0-r3, since this is called only for incoming args on the stack */
113 if (size > sizeof (gpointer) * 4) {
115 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
116 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
117 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
118 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
119 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
120 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
121 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
122 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
123 ARM_B_COND (code, ARMCOND_NE, 0);
124 arm_patch (code - 4, start_loop);
127 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
128 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
130 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
131 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
137 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
138 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
139 doffset = soffset = 0;
141 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
142 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
148 g_assert (size == 0);
153 emit_call_reg (guint8 *code, int reg)
156 ARM_BLX_REG (code, reg);
158 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
162 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
168 emit_call_seq (MonoCompile *cfg, guint8 *code)
170 if (cfg->method->dynamic) {
171 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
173 *(gpointer*)code = NULL;
175 code = emit_call_reg (code, ARMREG_IP);
183 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
185 switch (ins->opcode) {
188 case OP_FCALL_MEMBASE:
190 if (ins->dreg != ARM_FPA_F0)
191 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
200 * mono_arch_get_argument_info:
201 * @csig: a method signature
202 * @param_count: the number of parameters to consider
203 * @arg_info: an array to store the result infos
205 * Gathers information on parameters such as size, alignment and
206 * padding. arg_info should be large enought to hold param_count + 1 entries.
208 * Returns the size of the activation frame.
211 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
213 int k, frame_size = 0;
214 guint32 size, align, pad;
217 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
218 frame_size += sizeof (gpointer);
222 arg_info [0].offset = offset;
225 frame_size += sizeof (gpointer);
229 arg_info [0].size = frame_size;
231 for (k = 0; k < param_count; k++) {
232 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
234 /* ignore alignment for now */
237 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
238 arg_info [k].pad = pad;
240 arg_info [k + 1].pad = 0;
241 arg_info [k + 1].size = size;
243 arg_info [k + 1].offset = offset;
247 align = MONO_ARCH_FRAME_ALIGNMENT;
248 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
249 arg_info [k].pad = pad;
255 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
259 reg = (ldr >> 16 ) & 0xf;
260 offset = ldr & 0xfff;
261 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
263 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
266 *displacement = offset;
271 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
273 guint32* code = (guint32*)code_ptr;
275 /* Locate the address of the method-specific trampoline. The call using
276 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
277 looks something like this:
286 The call sequence could be also:
289 function pointer literal
293 Note that on ARM5+ we can use one instruction instead of the last two.
294 Therefore, we need to locate the 'ldr rA' instruction to know which
295 register was used to hold the method addrs.
298 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
301 /* Three possible code sequences can happen here:
305 * ldr pc, [rX - #offset]
311 * ldr pc, [rX - #offset]
313 * direct branch with bl:
317 * direct branch with mov:
321 * We only need to identify interface and virtual calls, the others can be ignored.
324 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
325 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
327 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
328 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
334 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
338 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
341 return (gpointer*)((char*)vt + displacement);
344 #define MAX_ARCH_DELEGATE_PARAMS 3
347 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
349 guint8 *code, *start;
351 /* FIXME: Support more cases */
352 if (MONO_TYPE_ISSTRUCT (sig->ret))
356 static guint8* cached = NULL;
357 mono_mini_arch_lock ();
359 mono_mini_arch_unlock ();
363 start = code = mono_global_codeman_reserve (12);
365 /* Replace the this argument with the target */
366 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
367 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
368 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
370 g_assert ((code - start) <= 12);
372 mono_arch_flush_icache (code, 12);
374 mono_mini_arch_unlock ();
377 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
380 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
382 for (i = 0; i < sig->param_count; ++i)
383 if (!mono_is_regsize_var (sig->params [i]))
386 mono_mini_arch_lock ();
387 code = cache [sig->param_count];
389 mono_mini_arch_unlock ();
393 size = 8 + sig->param_count * 4;
394 start = code = mono_global_codeman_reserve (size);
396 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
397 /* slide down the arguments */
398 for (i = 0; i < sig->param_count; ++i) {
399 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= size);
405 mono_arch_flush_icache (code, size);
406 cache [sig->param_count] = start;
407 mono_mini_arch_unlock ();
415 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
417 /* FIXME: handle returning a struct */
418 if (MONO_TYPE_ISSTRUCT (sig->ret))
419 return (gpointer)regs [ARMREG_R1];
420 return (gpointer)regs [ARMREG_R0];
424 * Initialize the cpu to execute managed code.
427 mono_arch_cpu_init (void)
432 * Initialize architecture specific code.
435 mono_arch_init (void)
437 InitializeCriticalSection (&mini_arch_mutex);
441 * Cleanup architecture specific code.
444 mono_arch_cleanup (void)
449 * This function returns the optimizations supported on this cpu.
452 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
456 thumb_supported = TRUE;
461 FILE *file = fopen ("/proc/cpuinfo", "r");
463 while ((line = fgets (buf, 512, file))) {
464 if (strncmp (line, "Processor", 9) == 0) {
465 char *ver = strstr (line, "(v");
466 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
471 if (strncmp (line, "Features", 8) == 0) {
472 char *th = strstr (line, "thumb");
474 thumb_supported = TRUE;
482 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
486 /* no arm-specific optimizations yet */
492 is_regsize_var (MonoType *t) {
495 t = mini_type_get_underlying_type (NULL, t);
502 case MONO_TYPE_FNPTR:
504 case MONO_TYPE_OBJECT:
505 case MONO_TYPE_STRING:
506 case MONO_TYPE_CLASS:
507 case MONO_TYPE_SZARRAY:
508 case MONO_TYPE_ARRAY:
510 case MONO_TYPE_GENERICINST:
511 if (!mono_type_generic_inst_is_valuetype (t))
514 case MONO_TYPE_VALUETYPE:
521 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
526 for (i = 0; i < cfg->num_varinfo; i++) {
527 MonoInst *ins = cfg->varinfo [i];
528 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
531 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
534 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
537 /* we can only allocate 32 bit values */
538 if (is_regsize_var (ins->inst_vtype)) {
539 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
540 g_assert (i == vmv->idx);
541 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
548 #define USE_EXTRA_TEMPS 0
551 mono_arch_get_global_int_regs (MonoCompile *cfg)
554 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
555 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
556 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
557 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
558 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
559 /* V5 is reserved for passing the vtable/rgctx/IMT method */
560 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
561 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
562 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
568 * mono_arch_regalloc_cost:
570 * Return the cost, in number of memory references, of the action of
571 * allocating the variable VMV into a register during global register
575 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
582 mono_arch_flush_icache (guint8 *code, gint size)
585 sys_icache_invalidate (code, size);
587 __asm __volatile ("mov r0, %0\n"
590 "swi 0x9f0002 @ sys_cacheflush"
592 : "r" (code), "r" (code + size), "r" (0)
593 : "r0", "r1", "r3" );
608 guint16 vtsize; /* in param area */
610 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
611 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
626 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
629 if (*gr > ARMREG_R3) {
630 ainfo->offset = *stack_size;
631 ainfo->reg = ARMREG_SP; /* in the caller */
632 ainfo->regtype = RegTypeBase;
643 /* first word in r3 and the second on the stack */
644 ainfo->offset = *stack_size;
645 ainfo->reg = ARMREG_SP; /* in the caller */
646 ainfo->regtype = RegTypeBaseGen;
648 } else if (*gr >= ARMREG_R3) {
653 ainfo->offset = *stack_size;
654 ainfo->reg = ARMREG_SP; /* in the caller */
655 ainfo->regtype = RegTypeBase;
670 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
673 int n = sig->hasthis + sig->param_count;
674 MonoType *simpletype;
675 guint32 stack_size = 0;
676 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
680 /* FIXME: handle returning a struct */
681 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
682 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
683 cinfo->struct_ret = ARMREG_R0;
688 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
691 DEBUG(printf("params: %d\n", sig->param_count));
692 for (i = 0; i < sig->param_count; ++i) {
693 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
694 /* Prevent implicit arguments and sig_cookie from
695 being passed in registers */
697 /* Emit the signature cookie just before the implicit arguments */
698 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
700 DEBUG(printf("param %d: ", i));
701 if (sig->params [i]->byref) {
702 DEBUG(printf("byref\n"));
703 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
707 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
708 switch (simpletype->type) {
709 case MONO_TYPE_BOOLEAN:
712 cinfo->args [n].size = 1;
713 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
719 cinfo->args [n].size = 2;
720 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
725 cinfo->args [n].size = 4;
726 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
732 case MONO_TYPE_FNPTR:
733 case MONO_TYPE_CLASS:
734 case MONO_TYPE_OBJECT:
735 case MONO_TYPE_STRING:
736 case MONO_TYPE_SZARRAY:
737 case MONO_TYPE_ARRAY:
739 cinfo->args [n].size = sizeof (gpointer);
740 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
743 case MONO_TYPE_GENERICINST:
744 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
745 cinfo->args [n].size = sizeof (gpointer);
746 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
751 case MONO_TYPE_TYPEDBYREF:
752 case MONO_TYPE_VALUETYPE: {
757 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
758 size = sizeof (MonoTypedRef);
760 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
762 size = mono_class_native_size (klass, NULL);
764 size = mono_class_value_size (klass, NULL);
766 DEBUG(printf ("load %d bytes struct\n",
767 mono_class_native_size (sig->params [i]->data.klass, NULL)));
770 align_size += (sizeof (gpointer) - 1);
771 align_size &= ~(sizeof (gpointer) - 1);
772 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
773 cinfo->args [n].regtype = RegTypeStructByVal;
774 /* FIXME: align gr and stack_size if needed */
775 if (gr > ARMREG_R3) {
776 cinfo->args [n].size = 0;
777 cinfo->args [n].vtsize = nwords;
779 int rest = ARMREG_R3 - gr + 1;
780 int n_in_regs = rest >= nwords? nwords: rest;
781 cinfo->args [n].size = n_in_regs;
782 cinfo->args [n].vtsize = nwords - n_in_regs;
783 cinfo->args [n].reg = gr;
786 cinfo->args [n].offset = stack_size;
787 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
788 stack_size += nwords * sizeof (gpointer);
795 cinfo->args [n].size = 8;
796 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
800 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
805 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
806 switch (simpletype->type) {
807 case MONO_TYPE_BOOLEAN:
818 case MONO_TYPE_FNPTR:
819 case MONO_TYPE_CLASS:
820 case MONO_TYPE_OBJECT:
821 case MONO_TYPE_SZARRAY:
822 case MONO_TYPE_ARRAY:
823 case MONO_TYPE_STRING:
824 cinfo->ret.reg = ARMREG_R0;
828 cinfo->ret.reg = ARMREG_R0;
832 cinfo->ret.reg = ARMREG_R0;
833 /* FIXME: cinfo->ret.reg = ???;
834 cinfo->ret.regtype = RegTypeFP;*/
836 case MONO_TYPE_GENERICINST:
837 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
838 cinfo->ret.reg = ARMREG_R0;
842 case MONO_TYPE_VALUETYPE:
844 case MONO_TYPE_TYPEDBYREF:
848 g_error ("Can't handle as return value 0x%x", sig->ret->type);
852 /* align stack size to 8 */
853 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
854 stack_size = (stack_size + 7) & ~7;
856 cinfo->stack_usage = stack_size;
862 * Set var information according to the calling convention. arm version.
863 * The locals var stuff should most likely be split in another method.
866 mono_arch_allocate_vars (MonoCompile *cfg)
868 MonoMethodSignature *sig;
869 MonoMethodHeader *header;
871 int i, offset, size, align, curinst;
872 int frame_reg = ARMREG_FP;
874 /* FIXME: this will change when we use FP as gcc does */
875 cfg->flags |= MONO_CFG_HAS_SPILLUP;
877 /* allow room for the vararg method args: void* and long/double */
878 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
879 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
881 header = mono_method_get_header (cfg->method);
884 * We use the frame register also for any method that has
885 * exception clauses. This way, when the handlers are called,
886 * the code will reference local variables using the frame reg instead of
887 * the stack pointer: if we had to restore the stack pointer, we'd
888 * corrupt the method frames that are already on the stack (since
889 * filters get called before stack unwinding happens) when the filter
890 * code would call any method (this also applies to finally etc.).
892 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
893 frame_reg = ARMREG_FP;
894 cfg->frame_reg = frame_reg;
895 if (frame_reg != ARMREG_SP) {
896 cfg->used_int_regs |= 1 << frame_reg;
899 if (!cfg->compile_aot || cfg->uses_rgctx_reg)
900 /* V5 is reserved for passing the vtable/rgctx/IMT method */
901 cfg->used_int_regs |= (1 << ARMREG_V5);
903 sig = mono_method_signature (cfg->method);
907 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
908 /* FIXME: handle long and FP values */
909 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
913 cfg->ret->opcode = OP_REGVAR;
914 cfg->ret->inst_c0 = ARMREG_R0;
918 /* local vars are at a positive offset from the stack pointer */
920 * also note that if the function uses alloca, we use FP
921 * to point at the local variables.
923 offset = 0; /* linkage area */
924 /* align the offset to 16 bytes: not sure this is needed here */
926 //offset &= ~(8 - 1);
928 /* add parameter area size for called functions */
929 offset += cfg->param_area;
932 if (cfg->flags & MONO_CFG_HAS_FPOUT)
935 /* allow room to save the return value */
936 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
939 /* the MonoLMF structure is stored just below the stack pointer */
941 if (sig->call_convention == MONO_CALL_VARARG) {
945 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
946 inst = cfg->vret_addr;
947 offset += sizeof(gpointer) - 1;
948 offset &= ~(sizeof(gpointer) - 1);
949 inst->inst_offset = offset;
950 inst->opcode = OP_REGOFFSET;
951 inst->inst_basereg = frame_reg;
952 if (G_UNLIKELY (cfg->verbose_level > 1)) {
953 printf ("vret_addr =");
954 mono_print_ins (cfg->vret_addr);
956 offset += sizeof(gpointer);
957 if (sig->call_convention == MONO_CALL_VARARG)
958 cfg->sig_cookie += sizeof (gpointer);
961 curinst = cfg->locals_start;
962 for (i = curinst; i < cfg->num_varinfo; ++i) {
963 inst = cfg->varinfo [i];
964 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
967 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
968 * pinvoke wrappers when they call functions returning structure */
969 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
971 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
975 size = mono_type_size (inst->inst_vtype, &align);
977 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
978 * since it loads/stores misaligned words, which don't do the right thing.
980 if (align < 4 && size >= 4)
983 offset &= ~(align - 1);
984 inst->inst_offset = offset;
985 inst->opcode = OP_REGOFFSET;
986 inst->inst_basereg = frame_reg;
988 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
993 inst = cfg->args [curinst];
994 if (inst->opcode != OP_REGVAR) {
995 inst->opcode = OP_REGOFFSET;
996 inst->inst_basereg = frame_reg;
997 offset += sizeof (gpointer) - 1;
998 offset &= ~(sizeof (gpointer) - 1);
999 inst->inst_offset = offset;
1000 offset += sizeof (gpointer);
1001 if (sig->call_convention == MONO_CALL_VARARG)
1002 cfg->sig_cookie += sizeof (gpointer);
1007 for (i = 0; i < sig->param_count; ++i) {
1008 inst = cfg->args [curinst];
1009 if (inst->opcode != OP_REGVAR) {
1010 inst->opcode = OP_REGOFFSET;
1011 inst->inst_basereg = frame_reg;
1012 size = mono_type_size (sig->params [i], &align);
1013 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1014 * since it loads/stores misaligned words, which don't do the right thing.
1016 if (align < 4 && size >= 4)
1018 offset += align - 1;
1019 offset &= ~(align - 1);
1020 inst->inst_offset = offset;
1022 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1023 cfg->sig_cookie += size;
1028 /* align the offset to 8 bytes */
1033 cfg->stack_offset = offset;
1037 mono_arch_create_vars (MonoCompile *cfg)
1039 MonoMethodSignature *sig;
1041 sig = mono_method_signature (cfg->method);
1043 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1044 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1045 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1046 printf ("vret_addr = ");
1047 mono_print_ins (cfg->vret_addr);
1053 * take the arguments and generate the arch-specific
1054 * instructions to properly call the function in call.
1055 * This includes pushing, moving arguments to the right register
1057 * Issue: who does the spilling if needed, and when?
1060 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1062 MonoMethodSignature *sig;
1067 sig = call->signature;
1068 n = sig->param_count + sig->hasthis;
1070 cinfo = calculate_sizes (sig, sig->pinvoke);
1071 if (cinfo->struct_ret)
1072 call->used_iregs |= 1 << cinfo->struct_ret;
1074 for (i = 0; i < n; ++i) {
1075 ainfo = cinfo->args + i;
1076 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1078 cfg->disable_aot = TRUE;
1080 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1081 sig_arg->inst_p0 = call->signature;
1083 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1084 arg->inst_imm = cinfo->sig_cookie.offset;
1085 arg->inst_left = sig_arg;
1087 /* prepend, so they get reversed */
1088 arg->next = call->out_args;
1089 call->out_args = arg;
1091 if (is_virtual && i == 0) {
1092 /* the argument will be attached to the call instrucion */
1093 in = call->args [i];
1094 call->used_iregs |= 1 << ainfo->reg;
1096 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1097 in = call->args [i];
1098 arg->cil_code = in->cil_code;
1099 arg->inst_left = in;
1100 arg->inst_right = (MonoInst*)call;
1101 arg->type = in->type;
1102 /* prepend, we'll need to reverse them later */
1103 arg->next = call->out_args;
1104 call->out_args = arg;
1105 if (ainfo->regtype == RegTypeGeneral) {
1106 arg->backend.reg3 = ainfo->reg;
1107 call->used_iregs |= 1 << ainfo->reg;
1108 if (arg->type == STACK_I8)
1109 call->used_iregs |= 1 << (ainfo->reg + 1);
1110 if (arg->type == STACK_R8) {
1111 if (ainfo->size == 4) {
1112 #ifndef MONO_ARCH_SOFT_FLOAT
1113 arg->opcode = OP_OUTARG_R4;
1116 call->used_iregs |= 1 << (ainfo->reg + 1);
1118 cfg->flags |= MONO_CFG_HAS_FPOUT;
1120 } else if (ainfo->regtype == RegTypeStructByAddr) {
1121 /* FIXME: where si the data allocated? */
1122 arg->backend.reg3 = ainfo->reg;
1123 call->used_iregs |= 1 << ainfo->reg;
1124 g_assert_not_reached ();
1125 } else if (ainfo->regtype == RegTypeStructByVal) {
1127 /* mark the used regs */
1128 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
1129 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
1131 arg->opcode = OP_OUTARG_VT;
1132 /* vtsize and offset have just 12 bits of encoding in number of words */
1133 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
1134 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
1135 } else if (ainfo->regtype == RegTypeBase) {
1136 arg->opcode = OP_OUTARG_MEMBASE;
1137 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
1138 } else if (ainfo->regtype == RegTypeBaseGen) {
1139 call->used_iregs |= 1 << ARMREG_R3;
1140 arg->opcode = OP_OUTARG_MEMBASE;
1141 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
1142 if (arg->type == STACK_R8)
1143 cfg->flags |= MONO_CFG_HAS_FPOUT;
1144 } else if (ainfo->regtype == RegTypeFP) {
1145 arg->backend.reg3 = ainfo->reg;
1146 /* FP args are passed in int regs */
1147 call->used_iregs |= 1 << ainfo->reg;
1148 if (ainfo->size == 8) {
1149 arg->opcode = OP_OUTARG_R8;
1150 call->used_iregs |= 1 << (ainfo->reg + 1);
1152 arg->opcode = OP_OUTARG_R4;
1154 cfg->flags |= MONO_CFG_HAS_FPOUT;
1156 g_assert_not_reached ();
1161 * Reverse the call->out_args list.
1164 MonoInst *prev = NULL, *list = call->out_args, *next;
1171 call->out_args = prev;
1173 call->stack_usage = cinfo->stack_usage;
1174 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
1175 cfg->flags |= MONO_CFG_HAS_CALLS;
1177 * should set more info in call, such as the stack space
1178 * used by the args that needs to be added back to esp
1186 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1189 MonoMethodSignature *sig;
1193 sig = call->signature;
1194 n = sig->param_count + sig->hasthis;
1196 cinfo = calculate_sizes (sig, sig->pinvoke);
1198 for (i = 0; i < n; ++i) {
1199 ArgInfo *ainfo = cinfo->args + i;
1202 if (i >= sig->hasthis)
1203 t = sig->params [i - sig->hasthis];
1205 t = &mono_defaults.int_class->byval_arg;
1206 t = mini_type_get_underlying_type (NULL, t);
1208 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1213 in = call->args [i];
1215 switch (ainfo->regtype) {
1216 case RegTypeGeneral:
1217 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1218 MONO_INST_NEW (cfg, ins, OP_MOVE);
1219 ins->dreg = mono_alloc_ireg (cfg);
1220 ins->sreg1 = in->dreg + 1;
1221 MONO_ADD_INS (cfg->cbb, ins);
1222 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1224 MONO_INST_NEW (cfg, ins, OP_MOVE);
1225 ins->dreg = mono_alloc_ireg (cfg);
1226 ins->sreg1 = in->dreg + 2;
1227 MONO_ADD_INS (cfg->cbb, ins);
1228 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1229 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1230 #ifndef MONO_ARCH_SOFT_FLOAT
1234 if (ainfo->size == 4) {
1235 #ifdef MONO_ARCH_SOFT_FLOAT
1236 /* mono_emit_call_args () have already done the r8->r4 conversion */
1237 /* The converted value is in an int vreg */
1238 MONO_INST_NEW (cfg, ins, OP_MOVE);
1239 ins->dreg = mono_alloc_ireg (cfg);
1240 ins->sreg1 = in->dreg;
1241 MONO_ADD_INS (cfg->cbb, ins);
1242 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1245 creg = mono_alloc_ireg (cfg);
1246 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1247 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1250 #ifdef MONO_ARCH_SOFT_FLOAT
1251 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1252 ins->dreg = mono_alloc_ireg (cfg);
1253 ins->sreg1 = in->dreg;
1254 MONO_ADD_INS (cfg->cbb, ins);
1255 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1257 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1258 ins->dreg = mono_alloc_ireg (cfg);
1259 ins->sreg1 = in->dreg;
1260 MONO_ADD_INS (cfg->cbb, ins);
1261 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1264 creg = mono_alloc_ireg (cfg);
1265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1266 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1267 creg = mono_alloc_ireg (cfg);
1268 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1269 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1272 cfg->flags |= MONO_CFG_HAS_FPOUT;
1274 MONO_INST_NEW (cfg, ins, OP_MOVE);
1275 ins->dreg = mono_alloc_ireg (cfg);
1276 ins->sreg1 = in->dreg;
1277 MONO_ADD_INS (cfg->cbb, ins);
1279 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1282 case RegTypeStructByAddr:
1285 /* FIXME: where si the data allocated? */
1286 arg->backend.reg3 = ainfo->reg;
1287 call->used_iregs |= 1 << ainfo->reg;
1288 g_assert_not_reached ();
1291 case RegTypeStructByVal:
1292 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1293 ins->opcode = OP_OUTARG_VT;
1294 ins->sreg1 = in->dreg;
1295 ins->klass = in->klass;
1296 ins->inst_p0 = call;
1297 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1298 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1299 MONO_ADD_INS (cfg->cbb, ins);
1302 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1304 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1305 if (t->type == MONO_TYPE_R8) {
1306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1308 #ifdef MONO_ARCH_SOFT_FLOAT
1309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1311 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1315 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1318 case RegTypeBaseGen:
1319 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1320 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1321 MONO_INST_NEW (cfg, ins, OP_MOVE);
1322 ins->dreg = mono_alloc_ireg (cfg);
1323 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1324 MONO_ADD_INS (cfg->cbb, ins);
1325 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1326 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1329 #ifdef MONO_ARCH_SOFT_FLOAT
1330 g_assert_not_reached ();
1333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1334 creg = mono_alloc_ireg (cfg);
1335 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1337 creg = mono_alloc_ireg (cfg);
1338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1340 cfg->flags |= MONO_CFG_HAS_FPOUT;
1342 g_assert_not_reached ();
1349 arg->backend.reg3 = ainfo->reg;
1350 /* FP args are passed in int regs */
1351 call->used_iregs |= 1 << ainfo->reg;
1352 if (ainfo->size == 8) {
1353 arg->opcode = OP_OUTARG_R8;
1354 call->used_iregs |= 1 << (ainfo->reg + 1);
1356 arg->opcode = OP_OUTARG_R4;
1359 cfg->flags |= MONO_CFG_HAS_FPOUT;
1363 g_assert_not_reached ();
1367 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1370 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1371 vtarg->sreg1 = call->vret_var->dreg;
1372 vtarg->dreg = mono_alloc_preg (cfg);
1373 MONO_ADD_INS (cfg->cbb, vtarg);
1375 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1378 call->stack_usage = cinfo->stack_usage;
1384 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1386 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1387 ArgInfo *ainfo = ins->inst_p1;
1388 int ovf_size = ainfo->vtsize;
1389 int doffset = ainfo->offset;
1390 int i, soffset, dreg;
1393 for (i = 0; i < ainfo->size; ++i) {
1394 dreg = mono_alloc_ireg (cfg);
1395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1396 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1397 soffset += sizeof (gpointer);
1399 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1401 mini_emit_memcpy2 (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1405 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1407 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1410 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1413 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1414 ins->sreg1 = val->dreg + 1;
1415 ins->sreg2 = val->dreg + 2;
1416 MONO_ADD_INS (cfg->cbb, ins);
1419 #ifdef MONO_ARCH_SOFT_FLOAT
1420 if (ret->type == MONO_TYPE_R8) {
1423 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1424 ins->dreg = cfg->ret->dreg;
1425 ins->sreg1 = val->dreg;
1426 MONO_ADD_INS (cfg->cbb, ins);
1429 if (ret->type == MONO_TYPE_R4) {
1430 /* Already converted to an int in method_to_ir () */
1431 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1438 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1442 mono_arch_is_inst_imm (gint64 imm)
1448 * Allow tracing to work with this interface (with an optional argument)
1452 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1456 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1457 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1458 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1459 code = emit_call_reg (code, ARMREG_R2);
1472 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1475 int save_mode = SAVE_NONE;
1477 MonoMethod *method = cfg->method;
1478 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1479 int save_offset = cfg->param_area;
1483 offset = code - cfg->native_code;
1484 /* we need about 16 instructions */
1485 if (offset > (cfg->code_size - 16 * 4)) {
1486 cfg->code_size *= 2;
1487 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1488 code = cfg->native_code + offset;
1491 case MONO_TYPE_VOID:
1492 /* special case string .ctor icall */
1493 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1494 save_mode = SAVE_ONE;
1496 save_mode = SAVE_NONE;
1500 save_mode = SAVE_TWO;
1504 save_mode = SAVE_FP;
1506 case MONO_TYPE_VALUETYPE:
1507 save_mode = SAVE_STRUCT;
1510 save_mode = SAVE_ONE;
1514 switch (save_mode) {
1516 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1517 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1518 if (enable_arguments) {
1519 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1520 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1524 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1525 if (enable_arguments) {
1526 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1530 /* FIXME: what reg? */
1531 if (enable_arguments) {
1532 /* FIXME: what reg? */
1536 if (enable_arguments) {
1537 /* FIXME: get the actual address */
1538 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1546 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1547 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1548 code = emit_call_reg (code, ARMREG_IP);
1550 switch (save_mode) {
1552 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1553 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1556 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1570 * The immediate field for cond branches is big enough for all reasonable methods
1572 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1573 if (ins->flags & MONO_INST_BRLABEL) { \
1574 if (0 && ins->inst_i0->inst_c0) { \
1575 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1577 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1578 ARM_B_COND (code, (condcode), 0); \
1581 if (0 && ins->inst_true_bb->native_offset) { \
1582 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1584 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1585 ARM_B_COND (code, (condcode), 0); \
1589 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1591 /* emit an exception if condition is fail
1593 * We assign the extra code used to throw the implicit exceptions
1594 * to cfg->bb_exit as far as the big branch handling is concerned
1596 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1598 mono_add_patch_info (cfg, code - cfg->native_code, \
1599 MONO_PATCH_INFO_EXC, exc_name); \
1600 ARM_BL_COND (code, (condcode), 0); \
1603 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1606 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1611 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1613 MonoInst *ins, *n, *last_ins = NULL;
1615 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1616 switch (ins->opcode) {
1619 /* Already done by an arch-independent pass */
1623 /* remove unnecessary multiplication with 1 */
1624 if (ins->inst_imm == 1) {
1625 if (ins->dreg != ins->sreg1) {
1626 ins->opcode = OP_MOVE;
1628 MONO_DELETE_INS (bb, ins);
1632 int power2 = mono_is_power_of_two (ins->inst_imm);
1634 ins->opcode = OP_SHL_IMM;
1635 ins->inst_imm = power2;
1639 case OP_LOAD_MEMBASE:
1640 case OP_LOADI4_MEMBASE:
1642 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1643 * OP_LOAD_MEMBASE offset(basereg), reg
1645 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1646 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1647 ins->inst_basereg == last_ins->inst_destbasereg &&
1648 ins->inst_offset == last_ins->inst_offset) {
1649 if (ins->dreg == last_ins->sreg1) {
1650 MONO_DELETE_INS (bb, ins);
1653 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1654 ins->opcode = OP_MOVE;
1655 ins->sreg1 = last_ins->sreg1;
1659 * Note: reg1 must be different from the basereg in the second load
1660 * OP_LOAD_MEMBASE offset(basereg), reg1
1661 * OP_LOAD_MEMBASE offset(basereg), reg2
1663 * OP_LOAD_MEMBASE offset(basereg), reg1
1664 * OP_MOVE reg1, reg2
1666 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1667 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1668 ins->inst_basereg != last_ins->dreg &&
1669 ins->inst_basereg == last_ins->inst_basereg &&
1670 ins->inst_offset == last_ins->inst_offset) {
1672 if (ins->dreg == last_ins->dreg) {
1673 MONO_DELETE_INS (bb, ins);
1676 ins->opcode = OP_MOVE;
1677 ins->sreg1 = last_ins->dreg;
1680 //g_assert_not_reached ();
1684 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1685 * OP_LOAD_MEMBASE offset(basereg), reg
1687 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1688 * OP_ICONST reg, imm
1690 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1691 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1692 ins->inst_basereg == last_ins->inst_destbasereg &&
1693 ins->inst_offset == last_ins->inst_offset) {
1694 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1695 ins->opcode = OP_ICONST;
1696 ins->inst_c0 = last_ins->inst_imm;
1697 g_assert_not_reached (); // check this rule
1701 case OP_LOADU1_MEMBASE:
1702 case OP_LOADI1_MEMBASE:
1703 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1704 ins->inst_basereg == last_ins->inst_destbasereg &&
1705 ins->inst_offset == last_ins->inst_offset) {
1706 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1707 ins->sreg1 = last_ins->sreg1;
1710 case OP_LOADU2_MEMBASE:
1711 case OP_LOADI2_MEMBASE:
1712 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1713 ins->inst_basereg == last_ins->inst_destbasereg &&
1714 ins->inst_offset == last_ins->inst_offset) {
1715 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1716 ins->sreg1 = last_ins->sreg1;
1720 ins->opcode = OP_MOVE;
1724 if (ins->dreg == ins->sreg1) {
1725 MONO_DELETE_INS (bb, ins);
1729 * OP_MOVE sreg, dreg
1730 * OP_MOVE dreg, sreg
1732 if (last_ins && last_ins->opcode == OP_MOVE &&
1733 ins->sreg1 == last_ins->dreg &&
1734 ins->dreg == last_ins->sreg1) {
1735 MONO_DELETE_INS (bb, ins);
1743 bb->last_ins = last_ins;
1747 * the branch_cc_table should maintain the order of these
1761 branch_cc_table [] = {
1775 #define NEW_INS(cfg,dest,op) do { \
1776 MONO_INST_NEW ((cfg), (dest), (op)); \
1777 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1781 map_to_reg_reg_op (int op)
1790 case OP_COMPARE_IMM:
1792 case OP_ICOMPARE_IMM:
1806 case OP_LOAD_MEMBASE:
1807 return OP_LOAD_MEMINDEX;
1808 case OP_LOADI4_MEMBASE:
1809 return OP_LOADI4_MEMINDEX;
1810 case OP_LOADU4_MEMBASE:
1811 return OP_LOADU4_MEMINDEX;
1812 case OP_LOADU1_MEMBASE:
1813 return OP_LOADU1_MEMINDEX;
1814 case OP_LOADI2_MEMBASE:
1815 return OP_LOADI2_MEMINDEX;
1816 case OP_LOADU2_MEMBASE:
1817 return OP_LOADU2_MEMINDEX;
1818 case OP_LOADI1_MEMBASE:
1819 return OP_LOADI1_MEMINDEX;
1820 case OP_STOREI1_MEMBASE_REG:
1821 return OP_STOREI1_MEMINDEX;
1822 case OP_STOREI2_MEMBASE_REG:
1823 return OP_STOREI2_MEMINDEX;
1824 case OP_STOREI4_MEMBASE_REG:
1825 return OP_STOREI4_MEMINDEX;
1826 case OP_STORE_MEMBASE_REG:
1827 return OP_STORE_MEMINDEX;
1828 case OP_STORER4_MEMBASE_REG:
1829 return OP_STORER4_MEMINDEX;
1830 case OP_STORER8_MEMBASE_REG:
1831 return OP_STORER8_MEMINDEX;
1832 case OP_STORE_MEMBASE_IMM:
1833 return OP_STORE_MEMBASE_REG;
1834 case OP_STOREI1_MEMBASE_IMM:
1835 return OP_STOREI1_MEMBASE_REG;
1836 case OP_STOREI2_MEMBASE_IMM:
1837 return OP_STOREI2_MEMBASE_REG;
1838 case OP_STOREI4_MEMBASE_IMM:
1839 return OP_STOREI4_MEMBASE_REG;
1841 g_assert_not_reached ();
1845 * Remove from the instruction list the instructions that can't be
1846 * represented with very simple instructions with no register
1850 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1852 MonoInst *ins, *temp, *last_ins = NULL;
1853 int rot_amount, imm8, low_imm;
1855 /* setup the virtual reg allocator */
1856 if (bb->max_vreg > cfg->rs->next_vreg)
1857 cfg->rs->next_vreg = bb->max_vreg;
1859 MONO_BB_FOR_EACH_INS (bb, ins) {
1861 switch (ins->opcode) {
1865 case OP_COMPARE_IMM:
1866 case OP_ICOMPARE_IMM:
1880 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1881 NEW_INS (cfg, temp, OP_ICONST);
1882 temp->inst_c0 = ins->inst_imm;
1883 temp->dreg = mono_regstate_next_int (cfg->rs);
1884 ins->sreg2 = temp->dreg;
1886 ins->opcode = mono_op_imm_to_op (ins->opcode);
1888 ins->opcode = map_to_reg_reg_op (ins->opcode);
1893 if (ins->inst_imm == 1) {
1894 ins->opcode = OP_MOVE;
1897 if (ins->inst_imm == 0) {
1898 ins->opcode = OP_ICONST;
1902 imm8 = mono_is_power_of_two (ins->inst_imm);
1904 ins->opcode = OP_SHL_IMM;
1905 ins->inst_imm = imm8;
1908 NEW_INS (cfg, temp, OP_ICONST);
1909 temp->inst_c0 = ins->inst_imm;
1910 temp->dreg = mono_regstate_next_int (cfg->rs);
1911 ins->sreg2 = temp->dreg;
1912 ins->opcode = OP_IMUL;
1914 case OP_LOCALLOC_IMM:
1915 NEW_INS (cfg, temp, OP_ICONST);
1916 temp->inst_c0 = ins->inst_imm;
1917 temp->dreg = mono_regstate_next_int (cfg->rs);
1918 ins->sreg1 = temp->dreg;
1919 ins->opcode = OP_LOCALLOC;
1921 case OP_LOAD_MEMBASE:
1922 case OP_LOADI4_MEMBASE:
1923 case OP_LOADU4_MEMBASE:
1924 case OP_LOADU1_MEMBASE:
1925 /* we can do two things: load the immed in a register
1926 * and use an indexed load, or see if the immed can be
1927 * represented as an ad_imm + a load with a smaller offset
1928 * that fits. We just do the first for now, optimize later.
1930 if (arm_is_imm12 (ins->inst_offset))
1932 NEW_INS (cfg, temp, OP_ICONST);
1933 temp->inst_c0 = ins->inst_offset;
1934 temp->dreg = mono_regstate_next_int (cfg->rs);
1935 ins->sreg2 = temp->dreg;
1936 ins->opcode = map_to_reg_reg_op (ins->opcode);
1938 case OP_LOADI2_MEMBASE:
1939 case OP_LOADU2_MEMBASE:
1940 case OP_LOADI1_MEMBASE:
1941 if (arm_is_imm8 (ins->inst_offset))
1943 NEW_INS (cfg, temp, OP_ICONST);
1944 temp->inst_c0 = ins->inst_offset;
1945 temp->dreg = mono_regstate_next_int (cfg->rs);
1946 ins->sreg2 = temp->dreg;
1947 ins->opcode = map_to_reg_reg_op (ins->opcode);
1949 case OP_LOADR4_MEMBASE:
1950 case OP_LOADR8_MEMBASE:
1951 if (arm_is_fpimm8 (ins->inst_offset))
1953 low_imm = ins->inst_offset & 0x1ff;
1954 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1955 NEW_INS (cfg, temp, OP_ADD_IMM);
1956 temp->inst_imm = ins->inst_offset & ~0x1ff;
1957 temp->sreg1 = ins->inst_basereg;
1958 temp->dreg = mono_regstate_next_int (cfg->rs);
1959 ins->inst_basereg = temp->dreg;
1960 ins->inst_offset = low_imm;
1963 /* VFP/FPA doesn't have indexed load instructions */
1964 g_assert_not_reached ();
1966 case OP_STORE_MEMBASE_REG:
1967 case OP_STOREI4_MEMBASE_REG:
1968 case OP_STOREI1_MEMBASE_REG:
1969 if (arm_is_imm12 (ins->inst_offset))
1971 NEW_INS (cfg, temp, OP_ICONST);
1972 temp->inst_c0 = ins->inst_offset;
1973 temp->dreg = mono_regstate_next_int (cfg->rs);
1974 ins->sreg2 = temp->dreg;
1975 ins->opcode = map_to_reg_reg_op (ins->opcode);
1977 case OP_STOREI2_MEMBASE_REG:
1978 if (arm_is_imm8 (ins->inst_offset))
1980 NEW_INS (cfg, temp, OP_ICONST);
1981 temp->inst_c0 = ins->inst_offset;
1982 temp->dreg = mono_regstate_next_int (cfg->rs);
1983 ins->sreg2 = temp->dreg;
1984 ins->opcode = map_to_reg_reg_op (ins->opcode);
1986 case OP_STORER4_MEMBASE_REG:
1987 case OP_STORER8_MEMBASE_REG:
1988 if (arm_is_fpimm8 (ins->inst_offset))
1990 low_imm = ins->inst_offset & 0x1ff;
1991 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1992 NEW_INS (cfg, temp, OP_ADD_IMM);
1993 temp->inst_imm = ins->inst_offset & ~0x1ff;
1994 temp->sreg1 = ins->inst_destbasereg;
1995 temp->dreg = mono_regstate_next_int (cfg->rs);
1996 ins->inst_destbasereg = temp->dreg;
1997 ins->inst_offset = low_imm;
2000 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2001 /* VFP/FPA doesn't have indexed store instructions */
2002 g_assert_not_reached ();
2004 case OP_STORE_MEMBASE_IMM:
2005 case OP_STOREI1_MEMBASE_IMM:
2006 case OP_STOREI2_MEMBASE_IMM:
2007 case OP_STOREI4_MEMBASE_IMM:
2008 NEW_INS (cfg, temp, OP_ICONST);
2009 temp->inst_c0 = ins->inst_imm;
2010 temp->dreg = mono_regstate_next_int (cfg->rs);
2011 ins->sreg1 = temp->dreg;
2012 ins->opcode = map_to_reg_reg_op (ins->opcode);
2014 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2016 gboolean swap = FALSE;
2019 /* Some fp compares require swapped operands */
2020 g_assert (ins->next);
2021 switch (ins->next->opcode) {
2023 ins->next->opcode = OP_FBLT;
2027 ins->next->opcode = OP_FBLT_UN;
2031 ins->next->opcode = OP_FBGE;
2035 ins->next->opcode = OP_FBGE_UN;
2043 ins->sreg1 = ins->sreg2;
2052 bb->last_ins = last_ins;
2053 bb->max_vreg = cfg->rs->next_vreg;
2058 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2060 /* sreg is a float, dreg is an integer reg */
2062 ARM_FIXZ (code, dreg, sreg);
2063 #elif defined(ARM_FPU_VFP)
2065 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2067 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2068 ARM_FMRS (code, dreg, ARM_VFP_F0);
2072 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2073 else if (size == 2) {
2074 ARM_SHL_IMM (code, dreg, dreg, 16);
2075 ARM_SHR_IMM (code, dreg, dreg, 16);
2079 ARM_SHL_IMM (code, dreg, dreg, 24);
2080 ARM_SAR_IMM (code, dreg, dreg, 24);
2081 } else if (size == 2) {
2082 ARM_SHL_IMM (code, dreg, dreg, 16);
2083 ARM_SAR_IMM (code, dreg, dreg, 16);
2091 const guchar *target;
2096 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2099 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2100 PatchData *pdata = (PatchData*)user_data;
2101 guchar *code = data;
2102 guint32 *thunks = data;
2103 guint32 *endthunks = (guint32*)(code + bsize);
2105 int difflow, diffhigh;
2107 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2108 difflow = (char*)pdata->code - (char*)thunks;
2109 diffhigh = (char*)pdata->code - (char*)endthunks;
2110 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2114 * The thunk is composed of 3 words:
2115 * load constant from thunks [2] into ARM_IP
2118 * Note that the LR register is already setup
2120 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2121 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2122 while (thunks < endthunks) {
2123 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2124 if (thunks [2] == (guint32)pdata->target) {
2125 arm_patch (pdata->code, (guchar*)thunks);
2126 mono_arch_flush_icache (pdata->code, 4);
2129 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2130 /* found a free slot instead: emit thunk */
2131 /* ARMREG_IP is fine to use since this can't be an IMT call
2134 code = (guchar*)thunks;
2135 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2136 if (thumb_supported)
2137 ARM_BX (code, ARMREG_IP);
2139 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2140 thunks [2] = (guint32)pdata->target;
2141 mono_arch_flush_icache ((guchar*)thunks, 12);
2143 arm_patch (pdata->code, (guchar*)thunks);
2144 mono_arch_flush_icache (pdata->code, 4);
2148 /* skip 12 bytes, the size of the thunk */
2152 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2158 handle_thunk (int absolute, guchar *code, const guchar *target) {
2159 MonoDomain *domain = mono_domain_get ();
2163 pdata.target = target;
2164 pdata.absolute = absolute;
2167 mono_domain_lock (domain);
2168 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2171 /* this uses the first available slot */
2173 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2175 mono_domain_unlock (domain);
2177 if (pdata.found != 1)
2178 g_print ("thunk failed for %p from %p\n", target, code);
2179 g_assert (pdata.found == 1);
2183 arm_patch (guchar *code, const guchar *target)
2185 guint32 *code32 = (void*)code;
2186 guint32 ins = *code32;
2187 guint32 prim = (ins >> 25) & 7;
2188 guint32 tval = GPOINTER_TO_UINT (target);
2190 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2191 if (prim == 5) { /* 101b */
2192 /* the diff starts 8 bytes from the branch opcode */
2193 gint diff = target - code - 8;
2195 gint tmask = 0xffffffff;
2196 if (tval & 1) { /* entering thumb mode */
2197 diff = target - 1 - code - 8;
2198 g_assert (thumb_supported);
2199 tbits = 0xf << 28; /* bl->blx bit pattern */
2200 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2201 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2205 tmask = ~(1 << 24); /* clear the link bit */
2206 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2211 if (diff <= 33554431) {
2213 ins = (ins & 0xff000000) | diff;
2215 *code32 = ins | tbits;
2219 /* diff between 0 and -33554432 */
2220 if (diff >= -33554432) {
2222 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2224 *code32 = ins | tbits;
2229 handle_thunk (TRUE, code, target);
2234 * The alternative call sequences looks like this:
2236 * ldr ip, [pc] // loads the address constant
2237 * b 1f // jumps around the constant
2238 * address constant embedded in the code
2243 * There are two cases for patching:
2244 * a) at the end of method emission: in this case code points to the start
2245 * of the call sequence
2246 * b) during runtime patching of the call site: in this case code points
2247 * to the mov pc, ip instruction
2249 * We have to handle also the thunk jump code sequence:
2253 * address constant // execution never reaches here
2255 if ((ins & 0x0ffffff0) == 0x12fff10) {
2256 /* Branch and exchange: the address is constructed in a reg
2257 * We can patch BX when the code sequence is the following:
2258 * ldr ip, [pc, #0] ; 0x8
2265 guint8 *emit = (guint8*)ccode;
2266 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2268 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2269 ARM_BX (emit, ARMREG_IP);
2271 /*patching from magic trampoline*/
2272 if (ins == ccode [3]) {
2273 g_assert (code32 [-4] == ccode [0]);
2274 g_assert (code32 [-3] == ccode [1]);
2275 g_assert (code32 [-1] == ccode [2]);
2276 code32 [-2] = (guint32)target;
2279 /*patching from JIT*/
2280 if (ins == ccode [0]) {
2281 g_assert (code32 [1] == ccode [1]);
2282 g_assert (code32 [3] == ccode [2]);
2283 g_assert (code32 [4] == ccode [3]);
2284 code32 [2] = (guint32)target;
2287 g_assert_not_reached ();
2288 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2296 guint8 *emit = (guint8*)ccode;
2297 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2299 ARM_BLX_REG (emit, ARMREG_IP);
2301 g_assert (code32 [-3] == ccode [0]);
2302 g_assert (code32 [-2] == ccode [1]);
2303 g_assert (code32 [0] == ccode [2]);
2305 code32 [-1] = (guint32)target;
2308 guint32 *tmp = ccode;
2309 guint8 *emit = (guint8*)tmp;
2310 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2311 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2312 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2313 ARM_BX (emit, ARMREG_IP);
2314 if (ins == ccode [2]) {
2315 g_assert_not_reached (); // should be -2 ...
2316 code32 [-1] = (guint32)target;
2319 if (ins == ccode [0]) {
2320 /* handles both thunk jump code and the far call sequence */
2321 code32 [2] = (guint32)target;
2324 g_assert_not_reached ();
2326 // g_print ("patched with 0x%08x\n", ins);
2330 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2331 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2332 * to be used with the emit macros.
2333 * Return -1 otherwise.
2336 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2339 for (i = 0; i < 31; i+= 2) {
2340 res = (val << (32 - i)) | (val >> i);
2343 *rot_amount = i? 32 - i: 0;
2350 * Emits in code a sequence of instructions that load the value 'val'
2351 * into the dreg register. Uses at most 4 instructions.
2354 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2356 int imm8, rot_amount;
2358 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2359 /* skip the constant pool */
2365 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2366 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2367 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2368 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2371 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2373 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2375 if (val & 0xFF0000) {
2376 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2378 if (val & 0xFF000000) {
2379 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2381 } else if (val & 0xFF00) {
2382 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2383 if (val & 0xFF0000) {
2384 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2386 if (val & 0xFF000000) {
2387 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2389 } else if (val & 0xFF0000) {
2390 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2391 if (val & 0xFF000000) {
2392 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2395 //g_assert_not_reached ();
2401 * emit_load_volatile_arguments:
2403 * Load volatile arguments from the stack to the original input registers.
2404 * Required before a tail call.
2407 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2409 MonoMethod *method = cfg->method;
2410 MonoMethodSignature *sig;
2415 /* FIXME: Generate intermediate code instead */
2417 sig = mono_method_signature (method);
2419 /* This is the opposite of the code in emit_prolog */
2423 cinfo = calculate_sizes (sig, sig->pinvoke);
2425 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2426 ArgInfo *ainfo = &cinfo->ret;
2427 inst = cfg->vret_addr;
2428 g_assert (arm_is_imm12 (inst->inst_offset));
2429 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2431 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2432 ArgInfo *ainfo = cinfo->args + i;
2433 inst = cfg->args [pos];
2435 if (cfg->verbose_level > 2)
2436 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2437 if (inst->opcode == OP_REGVAR) {
2438 if (ainfo->regtype == RegTypeGeneral)
2439 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2440 else if (ainfo->regtype == RegTypeFP) {
2441 g_assert_not_reached ();
2442 } else if (ainfo->regtype == RegTypeBase) {
2446 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2447 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2449 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2450 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2454 g_assert_not_reached ();
2456 if (ainfo->regtype == RegTypeGeneral) {
2457 switch (ainfo->size) {
2464 g_assert (arm_is_imm12 (inst->inst_offset));
2465 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2466 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2467 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2470 if (arm_is_imm12 (inst->inst_offset)) {
2471 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2473 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2474 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2478 } else if (ainfo->regtype == RegTypeBaseGen) {
2481 } else if (ainfo->regtype == RegTypeBase) {
2484 } else if (ainfo->regtype == RegTypeFP) {
2485 g_assert_not_reached ();
2486 } else if (ainfo->regtype == RegTypeStructByVal) {
2487 int doffset = inst->inst_offset;
2491 if (mono_class_from_mono_type (inst->inst_vtype))
2492 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2493 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2494 if (arm_is_imm12 (doffset)) {
2495 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2497 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2498 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2500 soffset += sizeof (gpointer);
2501 doffset += sizeof (gpointer);
2506 } else if (ainfo->regtype == RegTypeStructByAddr) {
2523 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2528 guint8 *code = cfg->native_code + cfg->code_len;
2529 MonoInst *last_ins = NULL;
2530 guint last_offset = 0;
2532 int imm8, rot_amount;
2534 /* we don't align basic blocks of loops on arm */
2536 if (cfg->verbose_level > 2)
2537 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2539 cpos = bb->max_offset;
2541 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2542 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2543 //g_assert (!mono_compile_aot);
2546 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2547 /* this is not thread save, but good enough */
2548 /* fixme: howto handle overflows? */
2549 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2552 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2553 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2554 (gpointer)"mono_break");
2555 code = emit_call_seq (cfg, code);
2558 MONO_BB_FOR_EACH_INS (bb, ins) {
2559 offset = code - cfg->native_code;
2561 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2563 if (offset > (cfg->code_size - max_len - 16)) {
2564 cfg->code_size *= 2;
2565 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2566 code = cfg->native_code + offset;
2568 // if (ins->cil_code)
2569 // g_print ("cil code\n");
2570 mono_debug_record_line_number (cfg, ins, offset);
2572 switch (ins->opcode) {
2573 case OP_MEMORY_BARRIER:
2576 g_assert_not_reached ();
2579 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2580 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2583 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2584 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2586 case OP_STOREI1_MEMBASE_IMM:
2587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2588 g_assert (arm_is_imm12 (ins->inst_offset));
2589 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2591 case OP_STOREI2_MEMBASE_IMM:
2592 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2593 g_assert (arm_is_imm8 (ins->inst_offset));
2594 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2596 case OP_STORE_MEMBASE_IMM:
2597 case OP_STOREI4_MEMBASE_IMM:
2598 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2599 g_assert (arm_is_imm12 (ins->inst_offset));
2600 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2602 case OP_STOREI1_MEMBASE_REG:
2603 g_assert (arm_is_imm12 (ins->inst_offset));
2604 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2606 case OP_STOREI2_MEMBASE_REG:
2607 g_assert (arm_is_imm8 (ins->inst_offset));
2608 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2610 case OP_STORE_MEMBASE_REG:
2611 case OP_STOREI4_MEMBASE_REG:
2612 /* this case is special, since it happens for spill code after lowering has been called */
2613 if (arm_is_imm12 (ins->inst_offset)) {
2614 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2616 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2617 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2620 case OP_STOREI1_MEMINDEX:
2621 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2623 case OP_STOREI2_MEMINDEX:
2624 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2626 case OP_STORE_MEMINDEX:
2627 case OP_STOREI4_MEMINDEX:
2628 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2631 g_assert_not_reached ();
2633 case OP_LOAD_MEMINDEX:
2634 case OP_LOADI4_MEMINDEX:
2635 case OP_LOADU4_MEMINDEX:
2636 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2638 case OP_LOADI1_MEMINDEX:
2639 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2641 case OP_LOADU1_MEMINDEX:
2642 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2644 case OP_LOADI2_MEMINDEX:
2645 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2647 case OP_LOADU2_MEMINDEX:
2648 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2650 case OP_LOAD_MEMBASE:
2651 case OP_LOADI4_MEMBASE:
2652 case OP_LOADU4_MEMBASE:
2653 /* this case is special, since it happens for spill code after lowering has been called */
2654 if (arm_is_imm12 (ins->inst_offset)) {
2655 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2657 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2658 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2661 case OP_LOADI1_MEMBASE:
2662 g_assert (arm_is_imm8 (ins->inst_offset));
2663 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2665 case OP_LOADU1_MEMBASE:
2666 g_assert (arm_is_imm12 (ins->inst_offset));
2667 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2669 case OP_LOADU2_MEMBASE:
2670 g_assert (arm_is_imm8 (ins->inst_offset));
2671 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2673 case OP_LOADI2_MEMBASE:
2674 g_assert (arm_is_imm8 (ins->inst_offset));
2675 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2677 case OP_ICONV_TO_I1:
2678 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2679 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2681 case OP_ICONV_TO_I2:
2682 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2683 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2685 case OP_ICONV_TO_U1:
2686 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2688 case OP_ICONV_TO_U2:
2689 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2690 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2694 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2696 case OP_COMPARE_IMM:
2697 case OP_ICOMPARE_IMM:
2698 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2699 g_assert (imm8 >= 0);
2700 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2704 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2705 * So instead of emitting a trap, we emit a call a C function and place a
2708 //*(int*)code = 0xef9f0001;
2711 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2712 (gpointer)"mono_break");
2713 code = emit_call_seq (cfg, code);
2715 case OP_RELAXED_NOP:
2720 case OP_DUMMY_STORE:
2721 case OP_NOT_REACHED:
2726 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2729 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2733 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2736 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2737 g_assert (imm8 >= 0);
2738 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2742 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2743 g_assert (imm8 >= 0);
2744 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2748 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2749 g_assert (imm8 >= 0);
2750 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2753 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2754 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2756 case OP_IADD_OVF_UN:
2757 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2758 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2761 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2762 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2764 case OP_ISUB_OVF_UN:
2765 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2766 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2768 case OP_ADD_OVF_CARRY:
2769 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2770 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2772 case OP_ADD_OVF_UN_CARRY:
2773 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2774 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2776 case OP_SUB_OVF_CARRY:
2777 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2778 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2780 case OP_SUB_OVF_UN_CARRY:
2781 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2782 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2786 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2789 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2790 g_assert (imm8 >= 0);
2791 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2794 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2798 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2802 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2803 g_assert (imm8 >= 0);
2804 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2808 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2809 g_assert (imm8 >= 0);
2810 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2812 case OP_ARM_RSBS_IMM:
2813 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2814 g_assert (imm8 >= 0);
2815 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2817 case OP_ARM_RSC_IMM:
2818 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2819 g_assert (imm8 >= 0);
2820 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2823 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2827 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2828 g_assert (imm8 >= 0);
2829 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2837 /* crappy ARM arch doesn't have a DIV instruction */
2838 g_assert_not_reached ();
2840 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2844 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2845 g_assert (imm8 >= 0);
2846 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2849 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2853 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2854 g_assert (imm8 >= 0);
2855 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2858 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2863 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2864 else if (ins->dreg != ins->sreg1)
2865 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2868 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2873 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2874 else if (ins->dreg != ins->sreg1)
2875 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2878 case OP_ISHR_UN_IMM:
2880 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2881 else if (ins->dreg != ins->sreg1)
2882 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2885 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2888 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2891 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2894 if (ins->dreg == ins->sreg2)
2895 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2897 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2900 g_assert_not_reached ();
2903 /* FIXME: handle ovf/ sreg2 != dreg */
2904 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2906 case OP_IMUL_OVF_UN:
2907 /* FIXME: handle ovf/ sreg2 != dreg */
2908 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2911 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2914 /* Load the GOT offset */
2915 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2916 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2918 *(gpointer*)code = NULL;
2920 /* Load the value from the GOT */
2921 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2923 case OP_ICONV_TO_I4:
2924 case OP_ICONV_TO_U4:
2926 if (ins->dreg != ins->sreg1)
2927 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2930 int saved = ins->sreg2;
2931 if (ins->sreg2 == ARM_LSW_REG) {
2932 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2935 if (ins->sreg1 != ARM_LSW_REG)
2936 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2937 if (saved != ARM_MSW_REG)
2938 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2943 ARM_MVFD (code, ins->dreg, ins->sreg1);
2944 #elif defined(ARM_FPU_VFP)
2945 ARM_CPYD (code, ins->dreg, ins->sreg1);
2948 case OP_FCONV_TO_R4:
2950 ARM_MVFS (code, ins->dreg, ins->sreg1);
2951 #elif defined(ARM_FPU_VFP)
2952 ARM_CVTD (code, ins->dreg, ins->sreg1);
2953 ARM_CVTS (code, ins->dreg, ins->dreg);
2958 * Keep in sync with mono_arch_emit_epilog
2960 g_assert (!cfg->method->save_lmf);
2962 code = emit_load_volatile_arguments (cfg, code);
2964 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2965 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2966 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2970 /* ensure ins->sreg1 is not NULL */
2971 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2975 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2976 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2978 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2979 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2981 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2991 call = (MonoCallInst*)ins;
2992 if (ins->flags & MONO_INST_HAS_METHOD)
2993 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2995 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2996 code = emit_call_seq (cfg, code);
2997 code = emit_move_return_value (cfg, ins, code);
3003 case OP_VOIDCALL_REG:
3005 code = emit_call_reg (code, ins->sreg1);
3006 code = emit_move_return_value (cfg, ins, code);
3008 case OP_FCALL_MEMBASE:
3009 case OP_LCALL_MEMBASE:
3010 case OP_VCALL_MEMBASE:
3011 case OP_VCALL2_MEMBASE:
3012 case OP_VOIDCALL_MEMBASE:
3013 case OP_CALL_MEMBASE:
3014 g_assert (arm_is_imm12 (ins->inst_offset));
3015 g_assert (ins->sreg1 != ARMREG_LR);
3016 call = (MonoCallInst*)ins;
3017 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3018 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3019 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3021 * We can't embed the method in the code stream in PIC code, or
3023 * Instead, we put it in V5 in code emitted by
3024 * mono_arch_emit_imt_argument (), and embed NULL here to
3025 * signal the IMT thunk that the value is in V5.
3027 if (call->dynamic_imt_arg)
3028 *((gpointer*)code) = NULL;
3030 *((gpointer*)code) = (gpointer)call->method;
3033 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3034 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3036 code = emit_move_return_value (cfg, ins, code);
3039 g_assert_not_reached ();
3042 /* keep alignment */
3043 int alloca_waste = cfg->param_area;
3046 /* round the size to 8 bytes */
3047 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3048 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3050 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3051 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3052 /* memzero the area: dreg holds the size, sp is the pointer */
3053 if (ins->flags & MONO_INST_INIT) {
3054 guint8 *start_loop, *branch_to_cond;
3055 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3056 branch_to_cond = code;
3059 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3060 arm_patch (branch_to_cond, code);
3061 /* decrement by 4 and set flags */
3062 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3063 ARM_B_COND (code, ARMCOND_GE, 0);
3064 arm_patch (code - 4, start_loop);
3066 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3070 if (ins->sreg1 != ARMREG_R0)
3071 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3072 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3073 (gpointer)"mono_arch_throw_exception");
3074 code = emit_call_seq (cfg, code);
3078 if (ins->sreg1 != ARMREG_R0)
3079 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3080 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3081 (gpointer)"mono_arch_rethrow_exception");
3082 code = emit_call_seq (cfg, code);
3085 case OP_START_HANDLER: {
3086 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3088 if (arm_is_imm12 (spvar->inst_offset)) {
3089 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3091 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3092 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3096 case OP_ENDFILTER: {
3097 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3099 if (ins->sreg1 != ARMREG_R0)
3100 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3101 if (arm_is_imm12 (spvar->inst_offset)) {
3102 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3104 g_assert (ARMREG_IP != spvar->inst_basereg);
3105 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3106 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3108 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3111 case OP_ENDFINALLY: {
3112 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3114 if (arm_is_imm12 (spvar->inst_offset)) {
3115 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3117 g_assert (ARMREG_IP != spvar->inst_basereg);
3118 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3119 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3121 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3124 case OP_CALL_HANDLER:
3125 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3129 ins->inst_c0 = code - cfg->native_code;
3132 if (ins->flags & MONO_INST_BRLABEL) {
3133 /*if (ins->inst_i0->inst_c0) {
3135 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3137 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3141 /*if (ins->inst_target_bb->native_offset) {
3143 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3145 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3151 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3155 * In the normal case we have:
3156 * ldr pc, [pc, ins->sreg1 << 2]
3159 * ldr lr, [pc, ins->sreg1 << 2]
3161 * After follows the data.
3162 * FIXME: add aot support.
3165 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3166 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3167 if (offset > (cfg->code_size - max_len - 16)) {
3168 cfg->code_size += max_len;
3169 cfg->code_size *= 2;
3170 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3171 code = cfg->native_code + offset;
3173 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3175 code += 4 * GPOINTER_TO_INT (ins->klass);
3179 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3180 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3184 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3185 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3189 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3190 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3194 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3195 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3199 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3200 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3202 case OP_COND_EXC_EQ:
3203 case OP_COND_EXC_NE_UN:
3204 case OP_COND_EXC_LT:
3205 case OP_COND_EXC_LT_UN:
3206 case OP_COND_EXC_GT:
3207 case OP_COND_EXC_GT_UN:
3208 case OP_COND_EXC_GE:
3209 case OP_COND_EXC_GE_UN:
3210 case OP_COND_EXC_LE:
3211 case OP_COND_EXC_LE_UN:
3212 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3214 case OP_COND_EXC_IEQ:
3215 case OP_COND_EXC_INE_UN:
3216 case OP_COND_EXC_ILT:
3217 case OP_COND_EXC_ILT_UN:
3218 case OP_COND_EXC_IGT:
3219 case OP_COND_EXC_IGT_UN:
3220 case OP_COND_EXC_IGE:
3221 case OP_COND_EXC_IGE_UN:
3222 case OP_COND_EXC_ILE:
3223 case OP_COND_EXC_ILE_UN:
3224 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3227 case OP_COND_EXC_OV:
3228 case OP_COND_EXC_NC:
3229 case OP_COND_EXC_NO:
3230 case OP_COND_EXC_IC:
3231 case OP_COND_EXC_IOV:
3232 case OP_COND_EXC_INC:
3233 case OP_COND_EXC_INO:
3246 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3249 /* floating point opcodes */
3252 if (cfg->compile_aot) {
3253 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3255 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3257 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3260 /* FIXME: we can optimize the imm load by dealing with part of
3261 * the displacement in LDFD (aligning to 512).
3263 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3264 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3268 if (cfg->compile_aot) {
3269 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3271 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3274 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3275 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3278 case OP_STORER8_MEMBASE_REG:
3279 /* This is generated by the local regalloc pass which runs after the lowering pass */
3280 if (!arm_is_fpimm8 (ins->inst_offset)) {
3281 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3282 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3284 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3287 case OP_LOADR8_MEMBASE:
3288 /* This is generated by the local regalloc pass which runs after the lowering pass */
3289 if (!arm_is_fpimm8 (ins->inst_offset)) {
3290 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3291 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3293 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3296 case OP_STORER4_MEMBASE_REG:
3297 g_assert (arm_is_fpimm8 (ins->inst_offset));
3298 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3300 case OP_LOADR4_MEMBASE:
3301 g_assert (arm_is_fpimm8 (ins->inst_offset));
3302 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3304 case OP_ICONV_TO_R_UN: {
3306 tmpreg = ins->dreg == 0? 1: 0;
3307 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3308 ARM_FLTD (code, ins->dreg, ins->sreg1);
3309 ARM_B_COND (code, ARMCOND_GE, 8);
3310 /* save the temp register */
3311 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3312 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3313 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3314 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3315 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3316 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3317 /* skip the constant pool */
3320 *(int*)code = 0x41f00000;
3325 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3326 * adfltd fdest, fdest, ftemp
3330 case OP_ICONV_TO_R4:
3331 ARM_FLTS (code, ins->dreg, ins->sreg1);
3333 case OP_ICONV_TO_R8:
3334 ARM_FLTD (code, ins->dreg, ins->sreg1);
3336 #elif defined(ARM_FPU_VFP)
3338 if (cfg->compile_aot) {
3339 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3341 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3343 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3346 /* FIXME: we can optimize the imm load by dealing with part of
3347 * the displacement in LDFD (aligning to 512).
3349 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3350 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3354 if (cfg->compile_aot) {
3355 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3357 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3359 ARM_CVTS (code, ins->dreg, ins->dreg);
3361 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3362 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3363 ARM_CVTS (code, ins->dreg, ins->dreg);
3366 case OP_STORER8_MEMBASE_REG:
3367 g_assert (arm_is_fpimm8 (ins->inst_offset));
3368 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3370 case OP_LOADR8_MEMBASE:
3371 g_assert (arm_is_fpimm8 (ins->inst_offset));
3372 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3374 case OP_STORER4_MEMBASE_REG:
3375 g_assert (arm_is_fpimm8 (ins->inst_offset));
3376 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3378 case OP_LOADR4_MEMBASE:
3379 g_assert (arm_is_fpimm8 (ins->inst_offset));
3380 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3382 case OP_ICONV_TO_R_UN: {
3383 g_assert_not_reached ();
3386 case OP_ICONV_TO_R4:
3387 g_assert_not_reached ();
3388 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3390 case OP_ICONV_TO_R8:
3391 g_assert_not_reached ();
3392 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3395 case OP_FCONV_TO_I1:
3396 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3398 case OP_FCONV_TO_U1:
3399 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3401 case OP_FCONV_TO_I2:
3402 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3404 case OP_FCONV_TO_U2:
3405 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3407 case OP_FCONV_TO_I4:
3409 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3411 case OP_FCONV_TO_U4:
3413 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3415 case OP_FCONV_TO_I8:
3416 case OP_FCONV_TO_U8:
3417 g_assert_not_reached ();
3418 /* Implemented as helper calls */
3420 case OP_LCONV_TO_R_UN:
3421 g_assert_not_reached ();
3422 /* Implemented as helper calls */
3424 case OP_LCONV_TO_OVF_I:
3425 case OP_LCONV_TO_OVF_I4_2: {
3426 guint32 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3428 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3431 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3432 high_bit_not_set = code;
3433 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3435 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3436 valid_negative = code;
3437 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3438 invalid_negative = code;
3439 ARM_B_COND (code, ARMCOND_AL, 0);
3441 arm_patch (high_bit_not_set, code);
3443 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3444 valid_positive = code;
3445 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3447 arm_patch (invalid_negative, code);
3448 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3450 arm_patch (valid_negative, code);
3451 arm_patch (valid_positive, code);
3453 if (ins->dreg != ins->sreg1)
3454 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3459 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3462 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3465 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3468 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3471 ARM_MNFD (code, ins->dreg, ins->sreg1);
3473 #elif defined(ARM_FPU_VFP)
3475 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3481 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3484 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3487 ARM_NEGD (code, ins->dreg, ins->sreg1);
3492 g_assert_not_reached ();
3496 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3497 #elif defined(ARM_FPU_VFP)
3498 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3503 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3504 #elif defined(ARM_FPU_VFP)
3505 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3507 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3508 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3512 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3513 #elif defined(ARM_FPU_VFP)
3514 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3516 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3517 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3521 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3522 #elif defined(ARM_FPU_VFP)
3523 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3525 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3526 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3527 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3532 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3533 #elif defined(ARM_FPU_VFP)
3534 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3536 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3537 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3542 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3543 #elif defined(ARM_FPU_VFP)
3544 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3546 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3547 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3548 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3550 /* ARM FPA flags table:
3551 * N Less than ARMCOND_MI
3552 * Z Equal ARMCOND_EQ
3553 * C Greater Than or Equal ARMCOND_CS
3554 * V Unordered ARMCOND_VS
3557 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3560 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3563 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3566 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3567 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3573 g_assert_not_reached ();
3576 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3579 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3580 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3585 if (ins->dreg != ins->sreg1)
3586 ARM_MVFD (code, ins->dreg, ins->sreg1);
3588 g_assert_not_reached ();
3593 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3594 g_assert_not_reached ();
3597 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3598 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3599 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3600 g_assert_not_reached ();
3606 last_offset = offset;
3609 cfg->code_len = code - cfg->native_code;
3612 #endif /* DISABLE_JIT */
3615 mono_arch_register_lowlevel_calls (void)
3617 /* The signature doesn't matter */
3618 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
3621 #define patch_lis_ori(ip,val) do {\
3622 guint16 *__lis_ori = (guint16*)(ip); \
3623 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3624 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3628 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3630 MonoJumpInfo *patch_info;
3631 gboolean compile_aot = !run_cctors;
3633 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3634 unsigned char *ip = patch_info->ip.i + code;
3635 const unsigned char *target;
3637 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3638 gpointer *jt = (gpointer*)(ip + 8);
3640 /* jt is the inlined jump table, 2 instructions after ip
3641 * In the normal case we store the absolute addresses,
3642 * otherwise the displacements.
3644 for (i = 0; i < patch_info->data.table->table_size; i++)
3645 jt [i] = code + (int)patch_info->data.table->table [i];
3648 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3651 switch (patch_info->type) {
3652 case MONO_PATCH_INFO_BB:
3653 case MONO_PATCH_INFO_LABEL:
3656 /* No need to patch these */
3661 switch (patch_info->type) {
3662 case MONO_PATCH_INFO_IP:
3663 g_assert_not_reached ();
3664 patch_lis_ori (ip, ip);
3666 case MONO_PATCH_INFO_METHOD_REL:
3667 g_assert_not_reached ();
3668 *((gpointer *)(ip)) = code + patch_info->data.offset;
3670 case MONO_PATCH_INFO_METHODCONST:
3671 case MONO_PATCH_INFO_CLASS:
3672 case MONO_PATCH_INFO_IMAGE:
3673 case MONO_PATCH_INFO_FIELD:
3674 case MONO_PATCH_INFO_VTABLE:
3675 case MONO_PATCH_INFO_IID:
3676 case MONO_PATCH_INFO_SFLDA:
3677 case MONO_PATCH_INFO_LDSTR:
3678 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3679 case MONO_PATCH_INFO_LDTOKEN:
3680 g_assert_not_reached ();
3681 /* from OP_AOTCONST : lis + ori */
3682 patch_lis_ori (ip, target);
3684 case MONO_PATCH_INFO_R4:
3685 case MONO_PATCH_INFO_R8:
3686 g_assert_not_reached ();
3687 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3689 case MONO_PATCH_INFO_EXC_NAME:
3690 g_assert_not_reached ();
3691 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3693 case MONO_PATCH_INFO_NONE:
3694 case MONO_PATCH_INFO_BB_OVF:
3695 case MONO_PATCH_INFO_EXC_OVF:
3696 /* everything is dealt with at epilog output time */
3701 arm_patch (ip, target);
3706 * Stack frame layout:
3708 * ------------------- fp
3709 * MonoLMF structure or saved registers
3710 * -------------------
3712 * -------------------
3714 * -------------------
3715 * optional 8 bytes for tracing
3716 * -------------------
3717 * param area size is cfg->param_area
3718 * ------------------- sp
3721 mono_arch_emit_prolog (MonoCompile *cfg)
3723 MonoMethod *method = cfg->method;
3725 MonoMethodSignature *sig;
3727 int alloc_size, pos, max_offset, i, rot_amount;
3734 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3737 sig = mono_method_signature (method);
3738 cfg->code_size = 256 + sig->param_count * 20;
3739 code = cfg->native_code = g_malloc (cfg->code_size);
3741 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3743 alloc_size = cfg->stack_offset;
3746 if (!method->save_lmf) {
3747 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3748 prev_sp_offset = 8; /* ip and lr */
3749 for (i = 0; i < 16; ++i) {
3750 if (cfg->used_int_regs & (1 << i))
3751 prev_sp_offset += 4;
3754 ARM_PUSH (code, 0x5ff0);
3755 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3756 pos += sizeof (MonoLMF) - prev_sp_offset;
3760 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3761 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3762 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3763 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3766 /* the stack used in the pushed regs */
3767 if (prev_sp_offset & 4)
3769 cfg->stack_usage = alloc_size;
3771 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3772 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3774 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3775 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3778 if (cfg->frame_reg != ARMREG_SP)
3779 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3780 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3781 prev_sp_offset += alloc_size;
3783 /* compute max_offset in order to use short forward jumps
3784 * we could skip do it on arm because the immediate displacement
3785 * for jumps is large enough, it may be useful later for constant pools
3788 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3789 MonoInst *ins = bb->code;
3790 bb->max_offset = max_offset;
3792 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3795 MONO_BB_FOR_EACH_INS (bb, ins)
3796 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3799 /* store runtime generic context */
3800 if (cfg->rgctx_var) {
3801 MonoInst *ins = cfg->rgctx_var;
3803 g_assert (ins->opcode == OP_REGOFFSET);
3805 if (arm_is_imm12 (ins->inst_offset)) {
3806 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
3808 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3809 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
3813 /* load arguments allocated to register from the stack */
3816 cinfo = calculate_sizes (sig, sig->pinvoke);
3818 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3819 ArgInfo *ainfo = &cinfo->ret;
3820 inst = cfg->vret_addr;
3821 g_assert (arm_is_imm12 (inst->inst_offset));
3822 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3824 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3825 ArgInfo *ainfo = cinfo->args + i;
3826 inst = cfg->args [pos];
3828 if (cfg->verbose_level > 2)
3829 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3830 if (inst->opcode == OP_REGVAR) {
3831 if (ainfo->regtype == RegTypeGeneral)
3832 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3833 else if (ainfo->regtype == RegTypeFP) {
3834 g_assert_not_reached ();
3835 } else if (ainfo->regtype == RegTypeBase) {
3836 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3837 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3839 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3840 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3843 g_assert_not_reached ();
3845 if (cfg->verbose_level > 2)
3846 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3848 /* the argument should be put on the stack: FIXME handle size != word */
3849 if (ainfo->regtype == RegTypeGeneral) {
3850 switch (ainfo->size) {
3852 if (arm_is_imm12 (inst->inst_offset))
3853 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3855 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3856 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3860 if (arm_is_imm8 (inst->inst_offset)) {
3861 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3863 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3864 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3868 g_assert (arm_is_imm12 (inst->inst_offset));
3869 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3870 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3871 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3874 if (arm_is_imm12 (inst->inst_offset)) {
3875 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3877 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3878 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3882 } else if (ainfo->regtype == RegTypeBaseGen) {
3883 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3884 g_assert (arm_is_imm12 (inst->inst_offset));
3885 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3886 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3887 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3888 } else if (ainfo->regtype == RegTypeBase) {
3889 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3890 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3892 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3893 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3896 switch (ainfo->size) {
3898 if (arm_is_imm8 (inst->inst_offset)) {
3899 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3901 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3902 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3906 if (arm_is_imm8 (inst->inst_offset)) {
3907 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3909 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3910 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3914 if (arm_is_imm12 (inst->inst_offset)) {
3915 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3917 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3918 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3920 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3921 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3923 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3924 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3926 if (arm_is_imm12 (inst->inst_offset + 4)) {
3927 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3929 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3930 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3934 if (arm_is_imm12 (inst->inst_offset)) {
3935 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3937 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3938 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3942 } else if (ainfo->regtype == RegTypeFP) {
3943 g_assert_not_reached ();
3944 } else if (ainfo->regtype == RegTypeStructByVal) {
3945 int doffset = inst->inst_offset;
3949 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
3950 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3951 if (arm_is_imm12 (doffset)) {
3952 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3954 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3955 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3957 soffset += sizeof (gpointer);
3958 doffset += sizeof (gpointer);
3960 if (ainfo->vtsize) {
3961 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3962 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3963 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3965 } else if (ainfo->regtype == RegTypeStructByAddr) {
3966 g_assert_not_reached ();
3967 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3968 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3970 g_assert_not_reached ();
3975 if (method->save_lmf) {
3977 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3978 (gpointer)"mono_get_lmf_addr");
3979 code = emit_call_seq (cfg, code);
3980 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3981 /* lmf_offset is the offset from the previous stack pointer,
3982 * alloc_size is the total stack space allocated, so the offset
3983 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3984 * The pointer to the struct is put in r1 (new_lmf).
3985 * r2 is used as scratch
3986 * The callee-saved registers are already in the MonoLMF structure
3988 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3989 /* r0 is the result from mono_get_lmf_addr () */
3990 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3991 /* new_lmf->previous_lmf = *lmf_addr */
3992 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3993 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3994 /* *(lmf_addr) = r1 */
3995 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3996 /* Skip method (only needed for trampoline LMF frames) */
3997 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3998 /* save the current IP */
3999 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4000 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4004 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4006 cfg->code_len = code - cfg->native_code;
4007 g_assert (cfg->code_len < cfg->code_size);
4014 mono_arch_emit_epilog (MonoCompile *cfg)
4016 MonoMethod *method = cfg->method;
4017 int pos, i, rot_amount;
4018 int max_epilog_size = 16 + 20*4;
4021 if (cfg->method->save_lmf)
4022 max_epilog_size += 128;
4024 if (mono_jit_trace_calls != NULL)
4025 max_epilog_size += 50;
4027 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4028 max_epilog_size += 50;
4030 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4031 cfg->code_size *= 2;
4032 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4033 mono_jit_stats.code_reallocs++;
4037 * Keep in sync with OP_JMP
4039 code = cfg->native_code + cfg->code_len;
4041 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4042 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4046 if (method->save_lmf) {
4048 /* all but r0-r3, sp and pc */
4049 pos += sizeof (MonoLMF) - (4 * 10);
4051 /* r2 contains the pointer to the current LMF */
4052 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4053 /* ip = previous_lmf */
4054 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4056 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4057 /* *(lmf_addr) = previous_lmf */
4058 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4059 /* FIXME: speedup: there is no actual need to restore the registers if
4060 * we didn't actually change them (idea from Zoltan).
4063 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4064 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4065 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4067 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4068 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4070 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4071 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4073 /* FIXME: add v4 thumb interworking support */
4074 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4077 cfg->code_len = code - cfg->native_code;
4079 g_assert (cfg->code_len < cfg->code_size);
4083 /* remove once throw_exception_by_name is eliminated */
4085 exception_id_by_name (const char *name)
4087 if (strcmp (name, "IndexOutOfRangeException") == 0)
4088 return MONO_EXC_INDEX_OUT_OF_RANGE;
4089 if (strcmp (name, "OverflowException") == 0)
4090 return MONO_EXC_OVERFLOW;
4091 if (strcmp (name, "ArithmeticException") == 0)
4092 return MONO_EXC_ARITHMETIC;
4093 if (strcmp (name, "DivideByZeroException") == 0)
4094 return MONO_EXC_DIVIDE_BY_ZERO;
4095 if (strcmp (name, "InvalidCastException") == 0)
4096 return MONO_EXC_INVALID_CAST;
4097 if (strcmp (name, "NullReferenceException") == 0)
4098 return MONO_EXC_NULL_REF;
4099 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4100 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4101 g_error ("Unknown intrinsic exception %s\n", name);
4106 mono_arch_emit_exceptions (MonoCompile *cfg)
4108 MonoJumpInfo *patch_info;
4111 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4112 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4113 int max_epilog_size = 50;
4115 /* count the number of exception infos */
4118 * make sure we have enough space for exceptions
4120 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4121 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4122 i = exception_id_by_name (patch_info->data.target);
4123 if (!exc_throw_found [i]) {
4124 max_epilog_size += 32;
4125 exc_throw_found [i] = TRUE;
4130 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4131 cfg->code_size *= 2;
4132 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4133 mono_jit_stats.code_reallocs++;
4136 code = cfg->native_code + cfg->code_len;
4138 /* add code to raise exceptions */
4139 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4140 switch (patch_info->type) {
4141 case MONO_PATCH_INFO_EXC: {
4142 MonoClass *exc_class;
4143 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4145 i = exception_id_by_name (patch_info->data.target);
4146 if (exc_throw_pos [i]) {
4147 arm_patch (ip, exc_throw_pos [i]);
4148 patch_info->type = MONO_PATCH_INFO_NONE;
4151 exc_throw_pos [i] = code;
4153 arm_patch (ip, code);
4155 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4156 g_assert (exc_class);
4158 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4159 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4160 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4161 patch_info->data.name = "mono_arch_throw_corlib_exception";
4162 patch_info->ip.i = code - cfg->native_code;
4164 *(guint32*)(gpointer)code = exc_class->type_token;
4174 cfg->code_len = code - cfg->native_code;
4176 g_assert (cfg->code_len < cfg->code_size);
4181 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4186 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4191 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4194 int this_dreg = ARMREG_R0;
4197 this_dreg = ARMREG_R1;
4199 /* add the this argument */
4200 if (this_reg != -1) {
4202 MONO_INST_NEW (cfg, this, OP_MOVE);
4203 this->type = this_type;
4204 this->sreg1 = this_reg;
4205 this->dreg = mono_regstate_next_int (cfg->rs);
4206 mono_bblock_add_inst (cfg->cbb, this);
4207 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
4212 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4213 vtarg->type = STACK_MP;
4214 vtarg->sreg1 = vt_reg;
4215 vtarg->dreg = mono_regstate_next_int (cfg->rs);
4216 mono_bblock_add_inst (cfg->cbb, vtarg);
4217 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
4222 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4228 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4235 mono_arch_print_tree (MonoInst *tree, int arity)
4240 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4246 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4252 mono_arch_get_patch_offset (guint8 *code)
4259 mono_arch_flush_register_windows (void)
4264 mono_arch_fixup_jinfo (MonoCompile *cfg)
4266 /* max encoded stack usage is 64KB * 4 */
4267 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4268 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4271 #ifdef MONO_ARCH_HAVE_IMT
4274 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4276 if (cfg->compile_aot) {
4277 int method_reg = mono_regstate_next_int (cfg->rs);
4280 call->dynamic_imt_arg = TRUE;
4282 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4283 ins->dreg = method_reg;
4284 ins->inst_p0 = call->method;
4285 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4286 MONO_ADD_INS (cfg->cbb, ins);
4288 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4289 } else if (cfg->generic_context) {
4291 /* Always pass in a register for simplicity */
4292 call->dynamic_imt_arg = TRUE;
4294 cfg->uses_rgctx_reg = TRUE;
4297 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4300 int method_reg = mono_alloc_preg (cfg);
4302 MONO_INST_NEW (cfg, ins, OP_PCONST);
4303 ins->inst_p0 = call->method;
4304 ins->dreg = method_reg;
4305 MONO_ADD_INS (cfg->cbb, ins);
4307 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4313 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4315 guint32 *code_ptr = (guint32*)code;
4317 /* The IMT value is stored in the code stream right after the LDC instruction. */
4318 if (!IS_LDR_PC (code_ptr [0])) {
4319 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4320 g_assert (IS_LDR_PC (code_ptr [0]));
4322 if (code_ptr [1] == 0)
4323 /* This is AOTed code, the IMT method is in V5 */
4324 return (MonoMethod*)regs [ARMREG_V5];
4326 return (MonoMethod*) code_ptr [1];
4330 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4332 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4336 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4338 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4341 #define ENABLE_WRONG_METHOD_CHECK 0
4342 #define BASE_SIZE (6 * 4)
4343 #define BSEARCH_ENTRY_SIZE (4 * 4)
4344 #define CMP_SIZE (3 * 4)
4345 #define BRANCH_SIZE (1 * 4)
4346 #define CALL_SIZE (2 * 4)
4347 #define WMC_SIZE (5 * 4)
4348 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4351 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4353 guint32 delta = DISTANCE (target, code);
4355 g_assert (delta >= 0 && delta <= 0xFFF);
4356 *target = *target | delta;
4362 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
4364 int size, i, extra_space = 0;
4365 arminstr_t *code, *start, *vtable_target = NULL;
4368 for (i = 0; i < count; ++i) {
4369 MonoIMTCheckItem *item = imt_entries [i];
4370 if (item->is_equals) {
4371 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
4373 if (item->check_target_idx) {
4374 if (!item->compare_done)
4375 item->chunk_size += CMP_SIZE;
4376 item->chunk_size += BRANCH_SIZE;
4378 #if ENABLE_WRONG_METHOD_CHECK
4379 item->chunk_size += WMC_SIZE;
4382 item->chunk_size += CALL_SIZE;
4384 item->chunk_size += BSEARCH_ENTRY_SIZE;
4385 imt_entries [item->check_target_idx]->compare_done = TRUE;
4387 size += item->chunk_size;
4390 start = code = mono_code_manager_reserve (domain->code_mp, size);
4393 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4394 for (i = 0; i < count; ++i) {
4395 MonoIMTCheckItem *item = imt_entries [i];
4396 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
4400 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4401 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4402 vtable_target = code;
4403 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4405 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4406 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4407 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4409 for (i = 0; i < count; ++i) {
4410 MonoIMTCheckItem *item = imt_entries [i];
4411 arminstr_t *imt_method = NULL;
4412 item->code_target = (guint8*)code;
4414 if (item->is_equals) {
4415 if (item->check_target_idx) {
4416 if (!item->compare_done) {
4418 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4419 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4421 item->jmp_code = (guint8*)code;
4422 ARM_B_COND (code, ARMCOND_NE, 0);
4424 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4425 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4427 /*Enable the commented code to assert on wrong method*/
4428 #if ENABLE_WRONG_METHOD_CHECK
4430 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4431 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4432 ARM_B_COND (code, ARMCOND_NE, 1);
4434 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4435 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4437 #if ENABLE_WRONG_METHOD_CHECK
4443 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
4445 /*must emit after unconditional branch*/
4446 if (vtable_target) {
4447 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4448 item->chunk_size += 4;
4449 vtable_target = NULL;
4452 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4454 code += extra_space;
4458 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4459 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4461 item->jmp_code = (guint8*)code;
4462 ARM_B_COND (code, ARMCOND_GE, 0);
4467 for (i = 0; i < count; ++i) {
4468 MonoIMTCheckItem *item = imt_entries [i];
4469 if (item->jmp_code) {
4470 if (item->check_target_idx)
4471 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4473 if (i > 0 && item->is_equals) {
4475 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4476 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4477 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
4484 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4485 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4490 mono_arch_flush_icache ((guint8*)start, size);
4491 mono_stats.imt_thunks_size += code - start;
4493 g_assert (DISTANCE (start, code) <= size);
4500 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4502 return ctx->regs [reg];