2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
36 * floating point support: on ARM it is a mess, there are at least 3
37 * different setups, each of which binary incompat with the other.
38 * 1) FPA: old and ugly, but unfortunately what current distros use
39 * the double binary format has the two words swapped. 8 double registers.
40 * Implemented usually by kernel emulation.
41 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
42 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
43 * 3) VFP: the new and actually sensible and useful FP support. Implemented
44 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
46 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
48 int mono_exc_esp_offset = 0;
50 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
51 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
52 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
54 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
55 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
56 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
58 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
59 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
62 void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
65 mono_arch_regname (int reg)
67 static const char * rnames[] = {
68 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
69 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
70 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
73 if (reg >= 0 && reg < 16)
79 mono_arch_fregname (int reg)
81 static const char * rnames[] = {
82 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
83 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
84 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
85 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
86 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
87 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
90 if (reg >= 0 && reg < 32)
96 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
99 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
100 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
103 g_assert (dreg != sreg);
104 code = mono_arm_emit_load_imm (code, dreg, imm);
105 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
110 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
112 /* we can use r0-r3, since this is called only for incoming args on the stack */
113 if (size > sizeof (gpointer) * 4) {
115 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
116 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
117 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
118 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
119 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
120 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
121 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
122 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
123 ARM_B_COND (code, ARMCOND_NE, 0);
124 arm_patch (code - 4, start_loop);
127 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
128 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
130 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
131 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
137 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
138 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
139 doffset = soffset = 0;
141 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
142 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
148 g_assert (size == 0);
153 emit_call_reg (guint8 *code, int reg)
156 ARM_BLX_REG (code, reg);
158 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
162 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
168 emit_call_seq (MonoCompile *cfg, guint8 *code)
170 if (cfg->method->dynamic) {
171 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
173 *(gpointer*)code = NULL;
175 code = emit_call_reg (code, ARMREG_IP);
183 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
185 switch (ins->opcode) {
188 case OP_FCALL_MEMBASE:
190 if (ins->dreg != ARM_FPA_F0)
191 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
200 * mono_arch_get_argument_info:
201 * @csig: a method signature
202 * @param_count: the number of parameters to consider
203 * @arg_info: an array to store the result infos
205 * Gathers information on parameters such as size, alignment and
206 * padding. arg_info should be large enought to hold param_count + 1 entries.
208 * Returns the size of the activation frame.
211 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
213 int k, frame_size = 0;
214 guint32 size, align, pad;
217 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
218 frame_size += sizeof (gpointer);
222 arg_info [0].offset = offset;
225 frame_size += sizeof (gpointer);
229 arg_info [0].size = frame_size;
231 for (k = 0; k < param_count; k++) {
232 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
234 /* ignore alignment for now */
237 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
238 arg_info [k].pad = pad;
240 arg_info [k + 1].pad = 0;
241 arg_info [k + 1].size = size;
243 arg_info [k + 1].offset = offset;
247 align = MONO_ARCH_FRAME_ALIGNMENT;
248 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
249 arg_info [k].pad = pad;
255 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
259 reg = (ldr >> 16 ) & 0xf;
260 offset = ldr & 0xfff;
261 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
263 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
266 *displacement = offset;
271 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
273 guint32* code = (guint32*)code_ptr;
275 /* Locate the address of the method-specific trampoline. The call using
276 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
277 looks something like this:
286 The call sequence could be also:
289 function pointer literal
293 Note that on ARM5+ we can use one instruction instead of the last two.
294 Therefore, we need to locate the 'ldr rA' instruction to know which
295 register was used to hold the method addrs.
298 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
301 /* Three possible code sequences can happen here:
305 * ldr pc, [rX - #offset]
311 * ldr pc, [rX - #offset]
313 * direct branch with bl:
317 * direct branch with mov:
321 * We only need to identify interface and virtual calls, the others can be ignored.
324 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
325 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
327 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
328 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
334 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
338 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
341 return (gpointer*)((char*)vt + displacement);
344 #define MAX_ARCH_DELEGATE_PARAMS 3
347 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
349 guint8 *code, *start;
351 /* FIXME: Support more cases */
352 if (MONO_TYPE_ISSTRUCT (sig->ret))
356 static guint8* cached = NULL;
357 mono_mini_arch_lock ();
359 mono_mini_arch_unlock ();
363 start = code = mono_global_codeman_reserve (12);
365 /* Replace the this argument with the target */
366 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
367 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
368 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
370 g_assert ((code - start) <= 12);
372 mono_arch_flush_icache (code, 12);
374 mono_mini_arch_unlock ();
377 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
380 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
382 for (i = 0; i < sig->param_count; ++i)
383 if (!mono_is_regsize_var (sig->params [i]))
386 mono_mini_arch_lock ();
387 code = cache [sig->param_count];
389 mono_mini_arch_unlock ();
393 size = 8 + sig->param_count * 4;
394 start = code = mono_global_codeman_reserve (size);
396 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
397 /* slide down the arguments */
398 for (i = 0; i < sig->param_count; ++i) {
399 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= size);
405 mono_arch_flush_icache (code, size);
406 cache [sig->param_count] = start;
407 mono_mini_arch_unlock ();
415 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
417 /* FIXME: handle returning a struct */
418 if (MONO_TYPE_ISSTRUCT (sig->ret))
419 return (gpointer)regs [ARMREG_R1];
420 return (gpointer)regs [ARMREG_R0];
424 * Initialize the cpu to execute managed code.
427 mono_arch_cpu_init (void)
432 * Initialize architecture specific code.
435 mono_arch_init (void)
437 InitializeCriticalSection (&mini_arch_mutex);
441 * Cleanup architecture specific code.
444 mono_arch_cleanup (void)
449 * This function returns the optimizations supported on this cpu.
452 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
456 thumb_supported = TRUE;
461 FILE *file = fopen ("/proc/cpuinfo", "r");
463 while ((line = fgets (buf, 512, file))) {
464 if (strncmp (line, "Processor", 9) == 0) {
465 char *ver = strstr (line, "(v");
466 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
471 if (strncmp (line, "Features", 8) == 0) {
472 char *th = strstr (line, "thumb");
474 thumb_supported = TRUE;
482 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
486 /* no arm-specific optimizations yet */
492 is_regsize_var (MonoType *t) {
495 t = mini_type_get_underlying_type (NULL, t);
502 case MONO_TYPE_FNPTR:
504 case MONO_TYPE_OBJECT:
505 case MONO_TYPE_STRING:
506 case MONO_TYPE_CLASS:
507 case MONO_TYPE_SZARRAY:
508 case MONO_TYPE_ARRAY:
510 case MONO_TYPE_GENERICINST:
511 if (!mono_type_generic_inst_is_valuetype (t))
514 case MONO_TYPE_VALUETYPE:
521 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
526 for (i = 0; i < cfg->num_varinfo; i++) {
527 MonoInst *ins = cfg->varinfo [i];
528 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
531 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
534 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
537 /* we can only allocate 32 bit values */
538 if (is_regsize_var (ins->inst_vtype)) {
539 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
540 g_assert (i == vmv->idx);
541 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
548 #define USE_EXTRA_TEMPS 0
551 mono_arch_get_global_int_regs (MonoCompile *cfg)
554 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
555 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
556 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
557 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
558 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
559 /* V5 is reserved for passing the vtable/rgctx/IMT method */
560 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
561 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
562 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
568 * mono_arch_regalloc_cost:
570 * Return the cost, in number of memory references, of the action of
571 * allocating the variable VMV into a register during global register
575 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
582 mono_arch_flush_icache (guint8 *code, gint size)
585 sys_icache_invalidate (code, size);
587 __asm __volatile ("mov r0, %0\n"
590 "swi 0x9f0002 @ sys_cacheflush"
592 : "r" (code), "r" (code + size), "r" (0)
593 : "r0", "r1", "r3" );
608 guint16 vtsize; /* in param area */
610 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
611 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
626 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
629 if (*gr > ARMREG_R3) {
630 ainfo->offset = *stack_size;
631 ainfo->reg = ARMREG_SP; /* in the caller */
632 ainfo->regtype = RegTypeBase;
643 /* first word in r3 and the second on the stack */
644 ainfo->offset = *stack_size;
645 ainfo->reg = ARMREG_SP; /* in the caller */
646 ainfo->regtype = RegTypeBaseGen;
648 } else if (*gr >= ARMREG_R3) {
653 ainfo->offset = *stack_size;
654 ainfo->reg = ARMREG_SP; /* in the caller */
655 ainfo->regtype = RegTypeBase;
670 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
673 int n = sig->hasthis + sig->param_count;
674 MonoType *simpletype;
675 guint32 stack_size = 0;
676 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
680 /* FIXME: handle returning a struct */
681 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
682 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
683 cinfo->struct_ret = ARMREG_R0;
688 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
691 DEBUG(printf("params: %d\n", sig->param_count));
692 for (i = 0; i < sig->param_count; ++i) {
693 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
694 /* Prevent implicit arguments and sig_cookie from
695 being passed in registers */
697 /* Emit the signature cookie just before the implicit arguments */
698 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
700 DEBUG(printf("param %d: ", i));
701 if (sig->params [i]->byref) {
702 DEBUG(printf("byref\n"));
703 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
707 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
708 switch (simpletype->type) {
709 case MONO_TYPE_BOOLEAN:
712 cinfo->args [n].size = 1;
713 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
719 cinfo->args [n].size = 2;
720 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
725 cinfo->args [n].size = 4;
726 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
732 case MONO_TYPE_FNPTR:
733 case MONO_TYPE_CLASS:
734 case MONO_TYPE_OBJECT:
735 case MONO_TYPE_STRING:
736 case MONO_TYPE_SZARRAY:
737 case MONO_TYPE_ARRAY:
739 cinfo->args [n].size = sizeof (gpointer);
740 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
743 case MONO_TYPE_GENERICINST:
744 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
745 cinfo->args [n].size = sizeof (gpointer);
746 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
751 case MONO_TYPE_TYPEDBYREF:
752 case MONO_TYPE_VALUETYPE: {
757 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
758 size = sizeof (MonoTypedRef);
760 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
762 size = mono_class_native_size (klass, NULL);
764 size = mono_class_value_size (klass, NULL);
766 DEBUG(printf ("load %d bytes struct\n",
767 mono_class_native_size (sig->params [i]->data.klass, NULL)));
770 align_size += (sizeof (gpointer) - 1);
771 align_size &= ~(sizeof (gpointer) - 1);
772 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
773 cinfo->args [n].regtype = RegTypeStructByVal;
774 /* FIXME: align gr and stack_size if needed */
775 if (gr > ARMREG_R3) {
776 cinfo->args [n].size = 0;
777 cinfo->args [n].vtsize = nwords;
779 int rest = ARMREG_R3 - gr + 1;
780 int n_in_regs = rest >= nwords? nwords: rest;
781 cinfo->args [n].size = n_in_regs;
782 cinfo->args [n].vtsize = nwords - n_in_regs;
783 cinfo->args [n].reg = gr;
786 cinfo->args [n].offset = stack_size;
787 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
788 stack_size += nwords * sizeof (gpointer);
795 cinfo->args [n].size = 8;
796 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
800 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
805 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
806 switch (simpletype->type) {
807 case MONO_TYPE_BOOLEAN:
818 case MONO_TYPE_FNPTR:
819 case MONO_TYPE_CLASS:
820 case MONO_TYPE_OBJECT:
821 case MONO_TYPE_SZARRAY:
822 case MONO_TYPE_ARRAY:
823 case MONO_TYPE_STRING:
824 cinfo->ret.reg = ARMREG_R0;
828 cinfo->ret.reg = ARMREG_R0;
832 cinfo->ret.reg = ARMREG_R0;
833 /* FIXME: cinfo->ret.reg = ???;
834 cinfo->ret.regtype = RegTypeFP;*/
836 case MONO_TYPE_GENERICINST:
837 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
838 cinfo->ret.reg = ARMREG_R0;
842 case MONO_TYPE_VALUETYPE:
844 case MONO_TYPE_TYPEDBYREF:
848 g_error ("Can't handle as return value 0x%x", sig->ret->type);
852 /* align stack size to 8 */
853 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
854 stack_size = (stack_size + 7) & ~7;
856 cinfo->stack_usage = stack_size;
862 * Set var information according to the calling convention. arm version.
863 * The locals var stuff should most likely be split in another method.
866 mono_arch_allocate_vars (MonoCompile *cfg)
868 MonoMethodSignature *sig;
869 MonoMethodHeader *header;
871 int i, offset, size, align, curinst;
872 int frame_reg = ARMREG_FP;
874 /* FIXME: this will change when we use FP as gcc does */
875 cfg->flags |= MONO_CFG_HAS_SPILLUP;
877 /* allow room for the vararg method args: void* and long/double */
878 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
879 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
881 header = mono_method_get_header (cfg->method);
884 * We use the frame register also for any method that has
885 * exception clauses. This way, when the handlers are called,
886 * the code will reference local variables using the frame reg instead of
887 * the stack pointer: if we had to restore the stack pointer, we'd
888 * corrupt the method frames that are already on the stack (since
889 * filters get called before stack unwinding happens) when the filter
890 * code would call any method (this also applies to finally etc.).
892 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
893 frame_reg = ARMREG_FP;
894 cfg->frame_reg = frame_reg;
895 if (frame_reg != ARMREG_SP) {
896 cfg->used_int_regs |= 1 << frame_reg;
899 if (!cfg->compile_aot || cfg->uses_rgctx_reg)
900 /* V5 is reserved for passing the vtable/rgctx/IMT method */
901 cfg->used_int_regs |= (1 << ARMREG_V5);
903 sig = mono_method_signature (cfg->method);
907 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
908 /* FIXME: handle long and FP values */
909 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
913 cfg->ret->opcode = OP_REGVAR;
914 cfg->ret->inst_c0 = ARMREG_R0;
918 /* local vars are at a positive offset from the stack pointer */
920 * also note that if the function uses alloca, we use FP
921 * to point at the local variables.
923 offset = 0; /* linkage area */
924 /* align the offset to 16 bytes: not sure this is needed here */
926 //offset &= ~(8 - 1);
928 /* add parameter area size for called functions */
929 offset += cfg->param_area;
932 if (cfg->flags & MONO_CFG_HAS_FPOUT)
935 /* allow room to save the return value */
936 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
939 /* the MonoLMF structure is stored just below the stack pointer */
941 if (sig->call_convention == MONO_CALL_VARARG) {
945 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
946 inst = cfg->vret_addr;
947 offset += sizeof(gpointer) - 1;
948 offset &= ~(sizeof(gpointer) - 1);
949 inst->inst_offset = offset;
950 inst->opcode = OP_REGOFFSET;
951 inst->inst_basereg = frame_reg;
952 if (G_UNLIKELY (cfg->verbose_level > 1)) {
953 printf ("vret_addr =");
954 mono_print_ins (cfg->vret_addr);
956 offset += sizeof(gpointer);
957 if (sig->call_convention == MONO_CALL_VARARG)
958 cfg->sig_cookie += sizeof (gpointer);
961 curinst = cfg->locals_start;
962 for (i = curinst; i < cfg->num_varinfo; ++i) {
963 inst = cfg->varinfo [i];
964 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
967 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
968 * pinvoke wrappers when they call functions returning structure */
969 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
971 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
975 size = mono_type_size (inst->inst_vtype, &align);
977 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
978 * since it loads/stores misaligned words, which don't do the right thing.
980 if (align < 4 && size >= 4)
983 offset &= ~(align - 1);
984 inst->inst_offset = offset;
985 inst->opcode = OP_REGOFFSET;
986 inst->inst_basereg = frame_reg;
988 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
993 inst = cfg->args [curinst];
994 if (inst->opcode != OP_REGVAR) {
995 inst->opcode = OP_REGOFFSET;
996 inst->inst_basereg = frame_reg;
997 offset += sizeof (gpointer) - 1;
998 offset &= ~(sizeof (gpointer) - 1);
999 inst->inst_offset = offset;
1000 offset += sizeof (gpointer);
1001 if (sig->call_convention == MONO_CALL_VARARG)
1002 cfg->sig_cookie += sizeof (gpointer);
1007 for (i = 0; i < sig->param_count; ++i) {
1008 inst = cfg->args [curinst];
1009 if (inst->opcode != OP_REGVAR) {
1010 inst->opcode = OP_REGOFFSET;
1011 inst->inst_basereg = frame_reg;
1012 size = mono_type_size (sig->params [i], &align);
1013 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1014 * since it loads/stores misaligned words, which don't do the right thing.
1016 if (align < 4 && size >= 4)
1018 offset += align - 1;
1019 offset &= ~(align - 1);
1020 inst->inst_offset = offset;
1022 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1023 cfg->sig_cookie += size;
1028 /* align the offset to 8 bytes */
1033 cfg->stack_offset = offset;
1037 mono_arch_create_vars (MonoCompile *cfg)
1039 MonoMethodSignature *sig;
1041 sig = mono_method_signature (cfg->method);
1043 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1044 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1045 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1046 printf ("vret_addr = ");
1047 mono_print_ins (cfg->vret_addr);
1053 * take the arguments and generate the arch-specific
1054 * instructions to properly call the function in call.
1055 * This includes pushing, moving arguments to the right register
1057 * Issue: who does the spilling if needed, and when?
1060 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1062 MonoMethodSignature *sig;
1067 sig = call->signature;
1068 n = sig->param_count + sig->hasthis;
1070 cinfo = calculate_sizes (sig, sig->pinvoke);
1071 if (cinfo->struct_ret)
1072 call->used_iregs |= 1 << cinfo->struct_ret;
1074 for (i = 0; i < n; ++i) {
1075 ainfo = cinfo->args + i;
1076 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1078 cfg->disable_aot = TRUE;
1080 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1081 sig_arg->inst_p0 = call->signature;
1083 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1084 arg->inst_imm = cinfo->sig_cookie.offset;
1085 arg->inst_left = sig_arg;
1087 /* prepend, so they get reversed */
1088 arg->next = call->out_args;
1089 call->out_args = arg;
1091 if (is_virtual && i == 0) {
1092 /* the argument will be attached to the call instrucion */
1093 in = call->args [i];
1094 call->used_iregs |= 1 << ainfo->reg;
1096 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1097 in = call->args [i];
1098 arg->cil_code = in->cil_code;
1099 arg->inst_left = in;
1100 arg->inst_right = (MonoInst*)call;
1101 arg->type = in->type;
1102 /* prepend, we'll need to reverse them later */
1103 arg->next = call->out_args;
1104 call->out_args = arg;
1105 if (ainfo->regtype == RegTypeGeneral) {
1106 arg->backend.reg3 = ainfo->reg;
1107 call->used_iregs |= 1 << ainfo->reg;
1108 if (arg->type == STACK_I8)
1109 call->used_iregs |= 1 << (ainfo->reg + 1);
1110 if (arg->type == STACK_R8) {
1111 if (ainfo->size == 4) {
1112 #ifndef MONO_ARCH_SOFT_FLOAT
1113 arg->opcode = OP_OUTARG_R4;
1116 call->used_iregs |= 1 << (ainfo->reg + 1);
1118 cfg->flags |= MONO_CFG_HAS_FPOUT;
1120 } else if (ainfo->regtype == RegTypeStructByAddr) {
1121 /* FIXME: where si the data allocated? */
1122 arg->backend.reg3 = ainfo->reg;
1123 call->used_iregs |= 1 << ainfo->reg;
1124 g_assert_not_reached ();
1125 } else if (ainfo->regtype == RegTypeStructByVal) {
1127 /* mark the used regs */
1128 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
1129 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
1131 arg->opcode = OP_OUTARG_VT;
1132 /* vtsize and offset have just 12 bits of encoding in number of words */
1133 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
1134 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
1135 } else if (ainfo->regtype == RegTypeBase) {
1136 arg->opcode = OP_OUTARG_MEMBASE;
1137 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
1138 } else if (ainfo->regtype == RegTypeBaseGen) {
1139 call->used_iregs |= 1 << ARMREG_R3;
1140 arg->opcode = OP_OUTARG_MEMBASE;
1141 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
1142 if (arg->type == STACK_R8)
1143 cfg->flags |= MONO_CFG_HAS_FPOUT;
1144 } else if (ainfo->regtype == RegTypeFP) {
1145 arg->backend.reg3 = ainfo->reg;
1146 /* FP args are passed in int regs */
1147 call->used_iregs |= 1 << ainfo->reg;
1148 if (ainfo->size == 8) {
1149 arg->opcode = OP_OUTARG_R8;
1150 call->used_iregs |= 1 << (ainfo->reg + 1);
1152 arg->opcode = OP_OUTARG_R4;
1154 cfg->flags |= MONO_CFG_HAS_FPOUT;
1156 g_assert_not_reached ();
1161 * Reverse the call->out_args list.
1164 MonoInst *prev = NULL, *list = call->out_args, *next;
1171 call->out_args = prev;
1173 call->stack_usage = cinfo->stack_usage;
1174 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
1175 cfg->flags |= MONO_CFG_HAS_CALLS;
1177 * should set more info in call, such as the stack space
1178 * used by the args that needs to be added back to esp
1186 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1189 MonoMethodSignature *sig;
1193 sig = call->signature;
1194 n = sig->param_count + sig->hasthis;
1196 cinfo = calculate_sizes (sig, sig->pinvoke);
1198 for (i = 0; i < n; ++i) {
1199 ArgInfo *ainfo = cinfo->args + i;
1202 if (i >= sig->hasthis)
1203 t = sig->params [i - sig->hasthis];
1205 t = &mono_defaults.int_class->byval_arg;
1206 t = mini_type_get_underlying_type (NULL, t);
1208 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1213 in = call->args [i];
1215 switch (ainfo->regtype) {
1216 case RegTypeGeneral:
1217 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1218 MONO_INST_NEW (cfg, ins, OP_MOVE);
1219 ins->dreg = mono_alloc_ireg (cfg);
1220 ins->sreg1 = in->dreg + 1;
1221 MONO_ADD_INS (cfg->cbb, ins);
1222 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1224 MONO_INST_NEW (cfg, ins, OP_MOVE);
1225 ins->dreg = mono_alloc_ireg (cfg);
1226 ins->sreg1 = in->dreg + 2;
1227 MONO_ADD_INS (cfg->cbb, ins);
1228 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1229 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1230 #ifndef MONO_ARCH_SOFT_FLOAT
1234 if (ainfo->size == 4) {
1235 #ifdef MONO_ARCH_SOFT_FLOAT
1236 /* mono_emit_call_args () have already done the r8->r4 conversion */
1237 /* The converted value is in an int vreg */
1238 MONO_INST_NEW (cfg, ins, OP_MOVE);
1239 ins->dreg = mono_alloc_ireg (cfg);
1240 ins->sreg1 = in->dreg;
1241 MONO_ADD_INS (cfg->cbb, ins);
1242 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1244 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1245 creg = mono_alloc_ireg (cfg);
1246 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1247 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1250 #ifdef MONO_ARCH_SOFT_FLOAT
1251 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1252 ins->dreg = mono_alloc_ireg (cfg);
1253 ins->sreg1 = in->dreg;
1254 MONO_ADD_INS (cfg->cbb, ins);
1255 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1257 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1258 ins->dreg = mono_alloc_ireg (cfg);
1259 ins->sreg1 = in->dreg;
1260 MONO_ADD_INS (cfg->cbb, ins);
1261 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1263 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1264 creg = mono_alloc_ireg (cfg);
1265 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1266 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1267 creg = mono_alloc_ireg (cfg);
1268 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1269 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1272 cfg->flags |= MONO_CFG_HAS_FPOUT;
1274 MONO_INST_NEW (cfg, ins, OP_MOVE);
1275 ins->dreg = mono_alloc_ireg (cfg);
1276 ins->sreg1 = in->dreg;
1277 MONO_ADD_INS (cfg->cbb, ins);
1279 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1282 case RegTypeStructByAddr:
1285 /* FIXME: where si the data allocated? */
1286 arg->backend.reg3 = ainfo->reg;
1287 call->used_iregs |= 1 << ainfo->reg;
1288 g_assert_not_reached ();
1291 case RegTypeStructByVal:
1292 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1293 ins->opcode = OP_OUTARG_VT;
1294 ins->sreg1 = in->dreg;
1295 ins->klass = in->klass;
1296 ins->inst_p0 = call;
1297 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1298 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1299 MONO_ADD_INS (cfg->cbb, ins);
1302 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1303 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1304 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1305 if (t->type == MONO_TYPE_R8) {
1306 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1308 #ifdef MONO_ARCH_SOFT_FLOAT
1309 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1311 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1315 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1318 case RegTypeBaseGen:
1319 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1320 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1321 MONO_INST_NEW (cfg, ins, OP_MOVE);
1322 ins->dreg = mono_alloc_ireg (cfg);
1323 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1324 MONO_ADD_INS (cfg->cbb, ins);
1325 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1326 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1329 #ifdef MONO_ARCH_SOFT_FLOAT
1330 g_assert_not_reached ();
1333 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1334 creg = mono_alloc_ireg (cfg);
1335 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1336 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1337 creg = mono_alloc_ireg (cfg);
1338 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1339 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1340 cfg->flags |= MONO_CFG_HAS_FPOUT;
1342 g_assert_not_reached ();
1349 arg->backend.reg3 = ainfo->reg;
1350 /* FP args are passed in int regs */
1351 call->used_iregs |= 1 << ainfo->reg;
1352 if (ainfo->size == 8) {
1353 arg->opcode = OP_OUTARG_R8;
1354 call->used_iregs |= 1 << (ainfo->reg + 1);
1356 arg->opcode = OP_OUTARG_R4;
1359 cfg->flags |= MONO_CFG_HAS_FPOUT;
1363 g_assert_not_reached ();
1367 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1370 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1371 vtarg->sreg1 = call->vret_var->dreg;
1372 vtarg->dreg = mono_alloc_preg (cfg);
1373 MONO_ADD_INS (cfg->cbb, vtarg);
1375 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1378 call->stack_usage = cinfo->stack_usage;
1384 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1386 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1387 ArgInfo *ainfo = ins->inst_p1;
1388 int ovf_size = ainfo->vtsize;
1389 int doffset = ainfo->offset;
1390 int i, soffset, dreg;
1393 for (i = 0; i < ainfo->size; ++i) {
1394 dreg = mono_alloc_ireg (cfg);
1395 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1396 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1397 soffset += sizeof (gpointer);
1399 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1401 mini_emit_memcpy2 (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1405 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1407 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1410 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1413 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1414 ins->sreg1 = val->dreg + 1;
1415 ins->sreg2 = val->dreg + 2;
1416 MONO_ADD_INS (cfg->cbb, ins);
1419 #ifdef MONO_ARCH_SOFT_FLOAT
1420 if (ret->type == MONO_TYPE_R8) {
1423 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1424 ins->dreg = cfg->ret->dreg;
1425 ins->sreg1 = val->dreg;
1426 MONO_ADD_INS (cfg->cbb, ins);
1429 if (ret->type == MONO_TYPE_R4) {
1430 /* Already converted to an int in method_to_ir () */
1431 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1438 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1442 mono_arch_is_inst_imm (gint64 imm)
1448 * Allow tracing to work with this interface (with an optional argument)
1452 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1456 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1457 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1458 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1459 code = emit_call_reg (code, ARMREG_R2);
1472 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1475 int save_mode = SAVE_NONE;
1477 MonoMethod *method = cfg->method;
1478 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1479 int save_offset = cfg->param_area;
1483 offset = code - cfg->native_code;
1484 /* we need about 16 instructions */
1485 if (offset > (cfg->code_size - 16 * 4)) {
1486 cfg->code_size *= 2;
1487 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1488 code = cfg->native_code + offset;
1491 case MONO_TYPE_VOID:
1492 /* special case string .ctor icall */
1493 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1494 save_mode = SAVE_ONE;
1496 save_mode = SAVE_NONE;
1500 save_mode = SAVE_TWO;
1504 save_mode = SAVE_FP;
1506 case MONO_TYPE_VALUETYPE:
1507 save_mode = SAVE_STRUCT;
1510 save_mode = SAVE_ONE;
1514 switch (save_mode) {
1516 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1517 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1518 if (enable_arguments) {
1519 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1520 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1524 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1525 if (enable_arguments) {
1526 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1530 /* FIXME: what reg? */
1531 if (enable_arguments) {
1532 /* FIXME: what reg? */
1536 if (enable_arguments) {
1537 /* FIXME: get the actual address */
1538 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1546 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1547 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1548 code = emit_call_reg (code, ARMREG_IP);
1550 switch (save_mode) {
1552 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1553 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1556 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1570 * The immediate field for cond branches is big enough for all reasonable methods
1572 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1573 if (ins->flags & MONO_INST_BRLABEL) { \
1574 if (0 && ins->inst_i0->inst_c0) { \
1575 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1577 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1578 ARM_B_COND (code, (condcode), 0); \
1581 if (0 && ins->inst_true_bb->native_offset) { \
1582 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1584 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1585 ARM_B_COND (code, (condcode), 0); \
1589 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1591 /* emit an exception if condition is fail
1593 * We assign the extra code used to throw the implicit exceptions
1594 * to cfg->bb_exit as far as the big branch handling is concerned
1596 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1598 mono_add_patch_info (cfg, code - cfg->native_code, \
1599 MONO_PATCH_INFO_EXC, exc_name); \
1600 ARM_BL_COND (code, (condcode), 0); \
1603 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1606 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1611 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1613 MonoInst *ins, *n, *last_ins = NULL;
1615 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1616 switch (ins->opcode) {
1619 /* Already done by an arch-independent pass */
1623 /* remove unnecessary multiplication with 1 */
1624 if (ins->inst_imm == 1) {
1625 if (ins->dreg != ins->sreg1) {
1626 ins->opcode = OP_MOVE;
1628 MONO_DELETE_INS (bb, ins);
1632 int power2 = mono_is_power_of_two (ins->inst_imm);
1634 ins->opcode = OP_SHL_IMM;
1635 ins->inst_imm = power2;
1639 case OP_LOAD_MEMBASE:
1640 case OP_LOADI4_MEMBASE:
1642 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1643 * OP_LOAD_MEMBASE offset(basereg), reg
1645 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1646 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1647 ins->inst_basereg == last_ins->inst_destbasereg &&
1648 ins->inst_offset == last_ins->inst_offset) {
1649 if (ins->dreg == last_ins->sreg1) {
1650 MONO_DELETE_INS (bb, ins);
1653 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1654 ins->opcode = OP_MOVE;
1655 ins->sreg1 = last_ins->sreg1;
1659 * Note: reg1 must be different from the basereg in the second load
1660 * OP_LOAD_MEMBASE offset(basereg), reg1
1661 * OP_LOAD_MEMBASE offset(basereg), reg2
1663 * OP_LOAD_MEMBASE offset(basereg), reg1
1664 * OP_MOVE reg1, reg2
1666 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1667 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1668 ins->inst_basereg != last_ins->dreg &&
1669 ins->inst_basereg == last_ins->inst_basereg &&
1670 ins->inst_offset == last_ins->inst_offset) {
1672 if (ins->dreg == last_ins->dreg) {
1673 MONO_DELETE_INS (bb, ins);
1676 ins->opcode = OP_MOVE;
1677 ins->sreg1 = last_ins->dreg;
1680 //g_assert_not_reached ();
1684 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1685 * OP_LOAD_MEMBASE offset(basereg), reg
1687 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1688 * OP_ICONST reg, imm
1690 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1691 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1692 ins->inst_basereg == last_ins->inst_destbasereg &&
1693 ins->inst_offset == last_ins->inst_offset) {
1694 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1695 ins->opcode = OP_ICONST;
1696 ins->inst_c0 = last_ins->inst_imm;
1697 g_assert_not_reached (); // check this rule
1701 case OP_LOADU1_MEMBASE:
1702 case OP_LOADI1_MEMBASE:
1703 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1704 ins->inst_basereg == last_ins->inst_destbasereg &&
1705 ins->inst_offset == last_ins->inst_offset) {
1706 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1707 ins->sreg1 = last_ins->sreg1;
1710 case OP_LOADU2_MEMBASE:
1711 case OP_LOADI2_MEMBASE:
1712 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1713 ins->inst_basereg == last_ins->inst_destbasereg &&
1714 ins->inst_offset == last_ins->inst_offset) {
1715 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1716 ins->sreg1 = last_ins->sreg1;
1720 ins->opcode = OP_MOVE;
1724 if (ins->dreg == ins->sreg1) {
1725 MONO_DELETE_INS (bb, ins);
1729 * OP_MOVE sreg, dreg
1730 * OP_MOVE dreg, sreg
1732 if (last_ins && last_ins->opcode == OP_MOVE &&
1733 ins->sreg1 == last_ins->dreg &&
1734 ins->dreg == last_ins->sreg1) {
1735 MONO_DELETE_INS (bb, ins);
1743 bb->last_ins = last_ins;
1747 * the branch_cc_table should maintain the order of these
1761 branch_cc_table [] = {
1775 #define NEW_INS(cfg,dest,op) do { \
1776 MONO_INST_NEW ((cfg), (dest), (op)); \
1777 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1781 map_to_reg_reg_op (int op)
1790 case OP_COMPARE_IMM:
1792 case OP_ICOMPARE_IMM:
1806 case OP_LOAD_MEMBASE:
1807 return OP_LOAD_MEMINDEX;
1808 case OP_LOADI4_MEMBASE:
1809 return OP_LOADI4_MEMINDEX;
1810 case OP_LOADU4_MEMBASE:
1811 return OP_LOADU4_MEMINDEX;
1812 case OP_LOADU1_MEMBASE:
1813 return OP_LOADU1_MEMINDEX;
1814 case OP_LOADI2_MEMBASE:
1815 return OP_LOADI2_MEMINDEX;
1816 case OP_LOADU2_MEMBASE:
1817 return OP_LOADU2_MEMINDEX;
1818 case OP_LOADI1_MEMBASE:
1819 return OP_LOADI1_MEMINDEX;
1820 case OP_STOREI1_MEMBASE_REG:
1821 return OP_STOREI1_MEMINDEX;
1822 case OP_STOREI2_MEMBASE_REG:
1823 return OP_STOREI2_MEMINDEX;
1824 case OP_STOREI4_MEMBASE_REG:
1825 return OP_STOREI4_MEMINDEX;
1826 case OP_STORE_MEMBASE_REG:
1827 return OP_STORE_MEMINDEX;
1828 case OP_STORER4_MEMBASE_REG:
1829 return OP_STORER4_MEMINDEX;
1830 case OP_STORER8_MEMBASE_REG:
1831 return OP_STORER8_MEMINDEX;
1832 case OP_STORE_MEMBASE_IMM:
1833 return OP_STORE_MEMBASE_REG;
1834 case OP_STOREI1_MEMBASE_IMM:
1835 return OP_STOREI1_MEMBASE_REG;
1836 case OP_STOREI2_MEMBASE_IMM:
1837 return OP_STOREI2_MEMBASE_REG;
1838 case OP_STOREI4_MEMBASE_IMM:
1839 return OP_STOREI4_MEMBASE_REG;
1841 g_assert_not_reached ();
1845 * Remove from the instruction list the instructions that can't be
1846 * represented with very simple instructions with no register
1850 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1852 MonoInst *ins, *temp, *last_ins = NULL;
1853 int rot_amount, imm8, low_imm;
1855 /* setup the virtual reg allocator */
1856 if (bb->max_vreg > cfg->rs->next_vreg)
1857 cfg->rs->next_vreg = bb->max_vreg;
1859 MONO_BB_FOR_EACH_INS (bb, ins) {
1861 switch (ins->opcode) {
1865 case OP_COMPARE_IMM:
1866 case OP_ICOMPARE_IMM:
1880 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1881 NEW_INS (cfg, temp, OP_ICONST);
1882 temp->inst_c0 = ins->inst_imm;
1883 temp->dreg = mono_regstate_next_int (cfg->rs);
1884 ins->sreg2 = temp->dreg;
1886 ins->opcode = mono_op_imm_to_op (ins->opcode);
1888 ins->opcode = map_to_reg_reg_op (ins->opcode);
1893 if (ins->inst_imm == 1) {
1894 ins->opcode = OP_MOVE;
1897 if (ins->inst_imm == 0) {
1898 ins->opcode = OP_ICONST;
1902 imm8 = mono_is_power_of_two (ins->inst_imm);
1904 ins->opcode = OP_SHL_IMM;
1905 ins->inst_imm = imm8;
1908 NEW_INS (cfg, temp, OP_ICONST);
1909 temp->inst_c0 = ins->inst_imm;
1910 temp->dreg = mono_regstate_next_int (cfg->rs);
1911 ins->sreg2 = temp->dreg;
1912 ins->opcode = OP_IMUL;
1914 case OP_LOCALLOC_IMM:
1915 NEW_INS (cfg, temp, OP_ICONST);
1916 temp->inst_c0 = ins->inst_imm;
1917 temp->dreg = mono_regstate_next_int (cfg->rs);
1918 ins->sreg1 = temp->dreg;
1919 ins->opcode = OP_LOCALLOC;
1921 case OP_LOAD_MEMBASE:
1922 case OP_LOADI4_MEMBASE:
1923 case OP_LOADU4_MEMBASE:
1924 case OP_LOADU1_MEMBASE:
1925 /* we can do two things: load the immed in a register
1926 * and use an indexed load, or see if the immed can be
1927 * represented as an ad_imm + a load with a smaller offset
1928 * that fits. We just do the first for now, optimize later.
1930 if (arm_is_imm12 (ins->inst_offset))
1932 NEW_INS (cfg, temp, OP_ICONST);
1933 temp->inst_c0 = ins->inst_offset;
1934 temp->dreg = mono_regstate_next_int (cfg->rs);
1935 ins->sreg2 = temp->dreg;
1936 ins->opcode = map_to_reg_reg_op (ins->opcode);
1938 case OP_LOADI2_MEMBASE:
1939 case OP_LOADU2_MEMBASE:
1940 case OP_LOADI1_MEMBASE:
1941 if (arm_is_imm8 (ins->inst_offset))
1943 NEW_INS (cfg, temp, OP_ICONST);
1944 temp->inst_c0 = ins->inst_offset;
1945 temp->dreg = mono_regstate_next_int (cfg->rs);
1946 ins->sreg2 = temp->dreg;
1947 ins->opcode = map_to_reg_reg_op (ins->opcode);
1949 case OP_LOADR4_MEMBASE:
1950 case OP_LOADR8_MEMBASE:
1951 if (arm_is_fpimm8 (ins->inst_offset))
1953 low_imm = ins->inst_offset & 0x1ff;
1954 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1955 NEW_INS (cfg, temp, OP_ADD_IMM);
1956 temp->inst_imm = ins->inst_offset & ~0x1ff;
1957 temp->sreg1 = ins->inst_basereg;
1958 temp->dreg = mono_regstate_next_int (cfg->rs);
1959 ins->inst_basereg = temp->dreg;
1960 ins->inst_offset = low_imm;
1963 /* VFP/FPA doesn't have indexed load instructions */
1964 g_assert_not_reached ();
1966 case OP_STORE_MEMBASE_REG:
1967 case OP_STOREI4_MEMBASE_REG:
1968 case OP_STOREI1_MEMBASE_REG:
1969 if (arm_is_imm12 (ins->inst_offset))
1971 NEW_INS (cfg, temp, OP_ICONST);
1972 temp->inst_c0 = ins->inst_offset;
1973 temp->dreg = mono_regstate_next_int (cfg->rs);
1974 ins->sreg2 = temp->dreg;
1975 ins->opcode = map_to_reg_reg_op (ins->opcode);
1977 case OP_STOREI2_MEMBASE_REG:
1978 if (arm_is_imm8 (ins->inst_offset))
1980 NEW_INS (cfg, temp, OP_ICONST);
1981 temp->inst_c0 = ins->inst_offset;
1982 temp->dreg = mono_regstate_next_int (cfg->rs);
1983 ins->sreg2 = temp->dreg;
1984 ins->opcode = map_to_reg_reg_op (ins->opcode);
1986 case OP_STORER4_MEMBASE_REG:
1987 case OP_STORER8_MEMBASE_REG:
1988 if (arm_is_fpimm8 (ins->inst_offset))
1990 low_imm = ins->inst_offset & 0x1ff;
1991 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1992 NEW_INS (cfg, temp, OP_ADD_IMM);
1993 temp->inst_imm = ins->inst_offset & ~0x1ff;
1994 temp->sreg1 = ins->inst_destbasereg;
1995 temp->dreg = mono_regstate_next_int (cfg->rs);
1996 ins->inst_destbasereg = temp->dreg;
1997 ins->inst_offset = low_imm;
2000 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2001 /* VFP/FPA doesn't have indexed store instructions */
2002 g_assert_not_reached ();
2004 case OP_STORE_MEMBASE_IMM:
2005 case OP_STOREI1_MEMBASE_IMM:
2006 case OP_STOREI2_MEMBASE_IMM:
2007 case OP_STOREI4_MEMBASE_IMM:
2008 NEW_INS (cfg, temp, OP_ICONST);
2009 temp->inst_c0 = ins->inst_imm;
2010 temp->dreg = mono_regstate_next_int (cfg->rs);
2011 ins->sreg1 = temp->dreg;
2012 ins->opcode = map_to_reg_reg_op (ins->opcode);
2014 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2016 gboolean swap = FALSE;
2019 /* Some fp compares require swapped operands */
2020 g_assert (ins->next);
2021 switch (ins->next->opcode) {
2023 ins->next->opcode = OP_FBLT;
2027 ins->next->opcode = OP_FBLT_UN;
2031 ins->next->opcode = OP_FBGE;
2035 ins->next->opcode = OP_FBGE_UN;
2043 ins->sreg1 = ins->sreg2;
2052 bb->last_ins = last_ins;
2053 bb->max_vreg = cfg->rs->next_vreg;
2058 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2060 /* sreg is a float, dreg is an integer reg */
2062 ARM_FIXZ (code, dreg, sreg);
2063 #elif defined(ARM_FPU_VFP)
2065 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2067 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2068 ARM_FMRS (code, dreg, ARM_VFP_F0);
2072 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2073 else if (size == 2) {
2074 ARM_SHL_IMM (code, dreg, dreg, 16);
2075 ARM_SHR_IMM (code, dreg, dreg, 16);
2079 ARM_SHL_IMM (code, dreg, dreg, 24);
2080 ARM_SAR_IMM (code, dreg, dreg, 24);
2081 } else if (size == 2) {
2082 ARM_SHL_IMM (code, dreg, dreg, 16);
2083 ARM_SAR_IMM (code, dreg, dreg, 16);
2091 const guchar *target;
2096 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2099 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2100 PatchData *pdata = (PatchData*)user_data;
2101 guchar *code = data;
2102 guint32 *thunks = data;
2103 guint32 *endthunks = (guint32*)(code + bsize);
2105 int difflow, diffhigh;
2107 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2108 difflow = (char*)pdata->code - (char*)thunks;
2109 diffhigh = (char*)pdata->code - (char*)endthunks;
2110 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2114 * The thunk is composed of 3 words:
2115 * load constant from thunks [2] into ARM_IP
2118 * Note that the LR register is already setup
2120 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2121 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2122 while (thunks < endthunks) {
2123 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2124 if (thunks [2] == (guint32)pdata->target) {
2125 arm_patch (pdata->code, (guchar*)thunks);
2126 mono_arch_flush_icache (pdata->code, 4);
2129 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2130 /* found a free slot instead: emit thunk */
2131 /* ARMREG_IP is fine to use since this can't be an IMT call
2134 code = (guchar*)thunks;
2135 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2136 if (thumb_supported)
2137 ARM_BX (code, ARMREG_IP);
2139 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2140 thunks [2] = (guint32)pdata->target;
2141 mono_arch_flush_icache ((guchar*)thunks, 12);
2143 arm_patch (pdata->code, (guchar*)thunks);
2144 mono_arch_flush_icache (pdata->code, 4);
2148 /* skip 12 bytes, the size of the thunk */
2152 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2158 handle_thunk (int absolute, guchar *code, const guchar *target) {
2159 MonoDomain *domain = mono_domain_get ();
2163 pdata.target = target;
2164 pdata.absolute = absolute;
2167 mono_domain_lock (domain);
2168 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2171 /* this uses the first available slot */
2173 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2175 mono_domain_unlock (domain);
2177 if (pdata.found != 1)
2178 g_print ("thunk failed for %p from %p\n", target, code);
2179 g_assert (pdata.found == 1);
2183 arm_patch (guchar *code, const guchar *target)
2185 guint32 *code32 = (void*)code;
2186 guint32 ins = *code32;
2187 guint32 prim = (ins >> 25) & 7;
2188 guint32 tval = GPOINTER_TO_UINT (target);
2190 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2191 if (prim == 5) { /* 101b */
2192 /* the diff starts 8 bytes from the branch opcode */
2193 gint diff = target - code - 8;
2195 gint tmask = 0xffffffff;
2196 if (tval & 1) { /* entering thumb mode */
2197 diff = target - 1 - code - 8;
2198 g_assert (thumb_supported);
2199 tbits = 0xf << 28; /* bl->blx bit pattern */
2200 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2201 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2205 tmask = ~(1 << 24); /* clear the link bit */
2206 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2211 if (diff <= 33554431) {
2213 ins = (ins & 0xff000000) | diff;
2215 *code32 = ins | tbits;
2219 /* diff between 0 and -33554432 */
2220 if (diff >= -33554432) {
2222 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2224 *code32 = ins | tbits;
2229 handle_thunk (TRUE, code, target);
2234 * The alternative call sequences looks like this:
2236 * ldr ip, [pc] // loads the address constant
2237 * b 1f // jumps around the constant
2238 * address constant embedded in the code
2243 * There are two cases for patching:
2244 * a) at the end of method emission: in this case code points to the start
2245 * of the call sequence
2246 * b) during runtime patching of the call site: in this case code points
2247 * to the mov pc, ip instruction
2249 * We have to handle also the thunk jump code sequence:
2253 * address constant // execution never reaches here
2255 if ((ins & 0x0ffffff0) == 0x12fff10) {
2256 /* Branch and exchange: the address is constructed in a reg
2257 * We can patch BX when the code sequence is the following:
2258 * ldr ip, [pc, #0] ; 0x8
2265 guint8 *emit = (guint8*)ccode;
2266 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2268 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2269 ARM_BX (emit, ARMREG_IP);
2271 /*patching from magic trampoline*/
2272 if (ins == ccode [3]) {
2273 g_assert (code32 [-4] == ccode [0]);
2274 g_assert (code32 [-3] == ccode [1]);
2275 g_assert (code32 [-1] == ccode [2]);
2276 code32 [-2] = (guint32)target;
2279 /*patching from JIT*/
2280 if (ins == ccode [0]) {
2281 g_assert (code32 [1] == ccode [1]);
2282 g_assert (code32 [3] == ccode [2]);
2283 g_assert (code32 [4] == ccode [3]);
2284 code32 [2] = (guint32)target;
2287 g_assert_not_reached ();
2288 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2296 guint8 *emit = (guint8*)ccode;
2297 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2299 ARM_BLX_REG (emit, ARMREG_IP);
2301 g_assert (code32 [-3] == ccode [0]);
2302 g_assert (code32 [-2] == ccode [1]);
2303 g_assert (code32 [0] == ccode [2]);
2305 code32 [-1] = (guint32)target;
2308 guint32 *tmp = ccode;
2309 guint8 *emit = (guint8*)tmp;
2310 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2311 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2312 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2313 ARM_BX (emit, ARMREG_IP);
2314 if (ins == ccode [2]) {
2315 g_assert_not_reached (); // should be -2 ...
2316 code32 [-1] = (guint32)target;
2319 if (ins == ccode [0]) {
2320 /* handles both thunk jump code and the far call sequence */
2321 code32 [2] = (guint32)target;
2324 g_assert_not_reached ();
2326 // g_print ("patched with 0x%08x\n", ins);
2330 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2331 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2332 * to be used with the emit macros.
2333 * Return -1 otherwise.
2336 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2339 for (i = 0; i < 31; i+= 2) {
2340 res = (val << (32 - i)) | (val >> i);
2343 *rot_amount = i? 32 - i: 0;
2350 * Emits in code a sequence of instructions that load the value 'val'
2351 * into the dreg register. Uses at most 4 instructions.
2354 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2356 int imm8, rot_amount;
2358 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2359 /* skip the constant pool */
2365 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2366 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2367 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2368 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2371 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2373 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2375 if (val & 0xFF0000) {
2376 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2378 if (val & 0xFF000000) {
2379 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2381 } else if (val & 0xFF00) {
2382 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2383 if (val & 0xFF0000) {
2384 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2386 if (val & 0xFF000000) {
2387 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2389 } else if (val & 0xFF0000) {
2390 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2391 if (val & 0xFF000000) {
2392 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2395 //g_assert_not_reached ();
2401 * emit_load_volatile_arguments:
2403 * Load volatile arguments from the stack to the original input registers.
2404 * Required before a tail call.
2407 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2409 MonoMethod *method = cfg->method;
2410 MonoMethodSignature *sig;
2415 /* FIXME: Generate intermediate code instead */
2417 sig = mono_method_signature (method);
2419 /* This is the opposite of the code in emit_prolog */
2423 cinfo = calculate_sizes (sig, sig->pinvoke);
2425 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2426 ArgInfo *ainfo = &cinfo->ret;
2427 inst = cfg->vret_addr;
2428 g_assert (arm_is_imm12 (inst->inst_offset));
2429 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2431 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2432 ArgInfo *ainfo = cinfo->args + i;
2433 inst = cfg->args [pos];
2435 if (cfg->verbose_level > 2)
2436 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2437 if (inst->opcode == OP_REGVAR) {
2438 if (ainfo->regtype == RegTypeGeneral)
2439 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2440 else if (ainfo->regtype == RegTypeFP) {
2441 g_assert_not_reached ();
2442 } else if (ainfo->regtype == RegTypeBase) {
2446 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2447 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2449 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2450 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2454 g_assert_not_reached ();
2456 if (ainfo->regtype == RegTypeGeneral) {
2457 switch (ainfo->size) {
2464 g_assert (arm_is_imm12 (inst->inst_offset));
2465 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2466 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2467 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2470 if (arm_is_imm12 (inst->inst_offset)) {
2471 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2473 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2474 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2478 } else if (ainfo->regtype == RegTypeBaseGen) {
2481 } else if (ainfo->regtype == RegTypeBase) {
2484 } else if (ainfo->regtype == RegTypeFP) {
2485 g_assert_not_reached ();
2486 } else if (ainfo->regtype == RegTypeStructByVal) {
2487 int doffset = inst->inst_offset;
2491 if (mono_class_from_mono_type (inst->inst_vtype))
2492 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2493 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2494 if (arm_is_imm12 (doffset)) {
2495 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2497 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2498 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2500 soffset += sizeof (gpointer);
2501 doffset += sizeof (gpointer);
2506 } else if (ainfo->regtype == RegTypeStructByAddr) {
2523 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2528 guint8 *code = cfg->native_code + cfg->code_len;
2529 MonoInst *last_ins = NULL;
2530 guint last_offset = 0;
2532 int imm8, rot_amount;
2534 /* we don't align basic blocks of loops on arm */
2536 if (cfg->verbose_level > 2)
2537 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2539 cpos = bb->max_offset;
2541 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2542 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2543 //g_assert (!mono_compile_aot);
2546 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2547 /* this is not thread save, but good enough */
2548 /* fixme: howto handle overflows? */
2549 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2552 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2553 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2554 (gpointer)"mono_break");
2555 code = emit_call_seq (cfg, code);
2558 MONO_BB_FOR_EACH_INS (bb, ins) {
2559 offset = code - cfg->native_code;
2561 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2563 if (offset > (cfg->code_size - max_len - 16)) {
2564 cfg->code_size *= 2;
2565 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2566 code = cfg->native_code + offset;
2568 // if (ins->cil_code)
2569 // g_print ("cil code\n");
2570 mono_debug_record_line_number (cfg, ins, offset);
2572 switch (ins->opcode) {
2573 case OP_MEMORY_BARRIER:
2576 g_assert_not_reached ();
2579 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2580 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2583 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2584 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2586 case OP_STOREI1_MEMBASE_IMM:
2587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2588 g_assert (arm_is_imm12 (ins->inst_offset));
2589 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2591 case OP_STOREI2_MEMBASE_IMM:
2592 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2593 g_assert (arm_is_imm8 (ins->inst_offset));
2594 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2596 case OP_STORE_MEMBASE_IMM:
2597 case OP_STOREI4_MEMBASE_IMM:
2598 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2599 g_assert (arm_is_imm12 (ins->inst_offset));
2600 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2602 case OP_STOREI1_MEMBASE_REG:
2603 g_assert (arm_is_imm12 (ins->inst_offset));
2604 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2606 case OP_STOREI2_MEMBASE_REG:
2607 g_assert (arm_is_imm8 (ins->inst_offset));
2608 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2610 case OP_STORE_MEMBASE_REG:
2611 case OP_STOREI4_MEMBASE_REG:
2612 /* this case is special, since it happens for spill code after lowering has been called */
2613 if (arm_is_imm12 (ins->inst_offset)) {
2614 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2616 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2617 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2620 case OP_STOREI1_MEMINDEX:
2621 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2623 case OP_STOREI2_MEMINDEX:
2624 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2626 case OP_STORE_MEMINDEX:
2627 case OP_STOREI4_MEMINDEX:
2628 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2631 g_assert_not_reached ();
2633 case OP_LOAD_MEMINDEX:
2634 case OP_LOADI4_MEMINDEX:
2635 case OP_LOADU4_MEMINDEX:
2636 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2638 case OP_LOADI1_MEMINDEX:
2639 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2641 case OP_LOADU1_MEMINDEX:
2642 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2644 case OP_LOADI2_MEMINDEX:
2645 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2647 case OP_LOADU2_MEMINDEX:
2648 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2650 case OP_LOAD_MEMBASE:
2651 case OP_LOADI4_MEMBASE:
2652 case OP_LOADU4_MEMBASE:
2653 /* this case is special, since it happens for spill code after lowering has been called */
2654 if (arm_is_imm12 (ins->inst_offset)) {
2655 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2657 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2658 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2661 case OP_LOADI1_MEMBASE:
2662 g_assert (arm_is_imm8 (ins->inst_offset));
2663 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2665 case OP_LOADU1_MEMBASE:
2666 g_assert (arm_is_imm12 (ins->inst_offset));
2667 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2669 case OP_LOADU2_MEMBASE:
2670 g_assert (arm_is_imm8 (ins->inst_offset));
2671 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2673 case OP_LOADI2_MEMBASE:
2674 g_assert (arm_is_imm8 (ins->inst_offset));
2675 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2677 case OP_ICONV_TO_I1:
2678 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2679 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2681 case OP_ICONV_TO_I2:
2682 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2683 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2685 case OP_ICONV_TO_U1:
2686 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2688 case OP_ICONV_TO_U2:
2689 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2690 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2694 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2696 case OP_COMPARE_IMM:
2697 case OP_ICOMPARE_IMM:
2698 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2699 g_assert (imm8 >= 0);
2700 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2704 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2705 * So instead of emitting a trap, we emit a call a C function and place a
2708 //*(int*)code = 0xef9f0001;
2711 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2712 (gpointer)"mono_break");
2713 code = emit_call_seq (cfg, code);
2717 case OP_DUMMY_STORE:
2718 case OP_NOT_REACHED:
2723 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2726 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2730 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2733 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2734 g_assert (imm8 >= 0);
2735 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2739 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2740 g_assert (imm8 >= 0);
2741 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2745 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2746 g_assert (imm8 >= 0);
2747 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2750 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2751 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2753 case OP_IADD_OVF_UN:
2754 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2755 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2758 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2759 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2761 case OP_ISUB_OVF_UN:
2762 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2763 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2765 case OP_ADD_OVF_CARRY:
2766 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2767 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2769 case OP_ADD_OVF_UN_CARRY:
2770 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2771 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2773 case OP_SUB_OVF_CARRY:
2774 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2775 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2777 case OP_SUB_OVF_UN_CARRY:
2778 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2779 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2783 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2786 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2787 g_assert (imm8 >= 0);
2788 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2791 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2795 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2799 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2800 g_assert (imm8 >= 0);
2801 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2805 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2806 g_assert (imm8 >= 0);
2807 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2809 case OP_ARM_RSBS_IMM:
2810 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2811 g_assert (imm8 >= 0);
2812 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2814 case OP_ARM_RSC_IMM:
2815 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2816 g_assert (imm8 >= 0);
2817 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2820 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2824 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2825 g_assert (imm8 >= 0);
2826 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2834 /* crappy ARM arch doesn't have a DIV instruction */
2835 g_assert_not_reached ();
2837 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2841 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2842 g_assert (imm8 >= 0);
2843 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2846 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2850 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2851 g_assert (imm8 >= 0);
2852 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2855 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2860 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2861 else if (ins->dreg != ins->sreg1)
2862 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2865 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2870 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2871 else if (ins->dreg != ins->sreg1)
2872 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2875 case OP_ISHR_UN_IMM:
2877 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2878 else if (ins->dreg != ins->sreg1)
2879 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2882 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2885 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2888 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2891 if (ins->dreg == ins->sreg2)
2892 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2894 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2897 g_assert_not_reached ();
2900 /* FIXME: handle ovf/ sreg2 != dreg */
2901 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2903 case OP_IMUL_OVF_UN:
2904 /* FIXME: handle ovf/ sreg2 != dreg */
2905 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2908 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2911 /* Load the GOT offset */
2912 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2913 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2915 *(gpointer*)code = NULL;
2917 /* Load the value from the GOT */
2918 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2920 case OP_ICONV_TO_I4:
2921 case OP_ICONV_TO_U4:
2923 if (ins->dreg != ins->sreg1)
2924 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2927 int saved = ins->sreg2;
2928 if (ins->sreg2 == ARM_LSW_REG) {
2929 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2932 if (ins->sreg1 != ARM_LSW_REG)
2933 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2934 if (saved != ARM_MSW_REG)
2935 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2940 ARM_MVFD (code, ins->dreg, ins->sreg1);
2941 #elif defined(ARM_FPU_VFP)
2942 ARM_CPYD (code, ins->dreg, ins->sreg1);
2945 case OP_FCONV_TO_R4:
2947 ARM_MVFS (code, ins->dreg, ins->sreg1);
2948 #elif defined(ARM_FPU_VFP)
2949 ARM_CVTD (code, ins->dreg, ins->sreg1);
2950 ARM_CVTS (code, ins->dreg, ins->dreg);
2955 * Keep in sync with mono_arch_emit_epilog
2957 g_assert (!cfg->method->save_lmf);
2959 code = emit_load_volatile_arguments (cfg, code);
2961 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2962 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2963 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2967 /* ensure ins->sreg1 is not NULL */
2968 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2972 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2973 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2975 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2976 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2978 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2988 call = (MonoCallInst*)ins;
2989 if (ins->flags & MONO_INST_HAS_METHOD)
2990 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2992 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2993 code = emit_call_seq (cfg, code);
2994 code = emit_move_return_value (cfg, ins, code);
3000 case OP_VOIDCALL_REG:
3002 code = emit_call_reg (code, ins->sreg1);
3003 code = emit_move_return_value (cfg, ins, code);
3005 case OP_FCALL_MEMBASE:
3006 case OP_LCALL_MEMBASE:
3007 case OP_VCALL_MEMBASE:
3008 case OP_VCALL2_MEMBASE:
3009 case OP_VOIDCALL_MEMBASE:
3010 case OP_CALL_MEMBASE:
3011 g_assert (arm_is_imm12 (ins->inst_offset));
3012 g_assert (ins->sreg1 != ARMREG_LR);
3013 call = (MonoCallInst*)ins;
3014 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3015 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3016 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3018 * We can't embed the method in the code stream in PIC code, or
3020 * Instead, we put it in V5 in code emitted by
3021 * mono_arch_emit_imt_argument (), and embed NULL here to
3022 * signal the IMT thunk that the value is in V5.
3024 if (call->dynamic_imt_arg)
3025 *((gpointer*)code) = NULL;
3027 *((gpointer*)code) = (gpointer)call->method;
3030 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3031 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3033 code = emit_move_return_value (cfg, ins, code);
3036 g_assert_not_reached ();
3039 /* keep alignment */
3040 int alloca_waste = cfg->param_area;
3043 /* round the size to 8 bytes */
3044 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3045 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3047 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3048 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3049 /* memzero the area: dreg holds the size, sp is the pointer */
3050 if (ins->flags & MONO_INST_INIT) {
3051 guint8 *start_loop, *branch_to_cond;
3052 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3053 branch_to_cond = code;
3056 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3057 arm_patch (branch_to_cond, code);
3058 /* decrement by 4 and set flags */
3059 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3060 ARM_B_COND (code, ARMCOND_GE, 0);
3061 arm_patch (code - 4, start_loop);
3063 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3067 if (ins->sreg1 != ARMREG_R0)
3068 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3069 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3070 (gpointer)"mono_arch_throw_exception");
3071 code = emit_call_seq (cfg, code);
3075 if (ins->sreg1 != ARMREG_R0)
3076 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3077 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3078 (gpointer)"mono_arch_rethrow_exception");
3079 code = emit_call_seq (cfg, code);
3082 case OP_START_HANDLER: {
3083 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3085 if (arm_is_imm12 (spvar->inst_offset)) {
3086 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3088 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3089 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3093 case OP_ENDFILTER: {
3094 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3096 if (ins->sreg1 != ARMREG_R0)
3097 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3098 if (arm_is_imm12 (spvar->inst_offset)) {
3099 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3101 g_assert (ARMREG_IP != spvar->inst_basereg);
3102 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3103 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3105 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3108 case OP_ENDFINALLY: {
3109 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3111 if (arm_is_imm12 (spvar->inst_offset)) {
3112 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3114 g_assert (ARMREG_IP != spvar->inst_basereg);
3115 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3116 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3118 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3121 case OP_CALL_HANDLER:
3122 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3126 ins->inst_c0 = code - cfg->native_code;
3129 if (ins->flags & MONO_INST_BRLABEL) {
3130 /*if (ins->inst_i0->inst_c0) {
3132 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3134 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3138 /*if (ins->inst_target_bb->native_offset) {
3140 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3142 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3148 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3152 * In the normal case we have:
3153 * ldr pc, [pc, ins->sreg1 << 2]
3156 * ldr lr, [pc, ins->sreg1 << 2]
3158 * After follows the data.
3159 * FIXME: add aot support.
3162 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3163 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3164 if (offset > (cfg->code_size - max_len - 16)) {
3165 cfg->code_size += max_len;
3166 cfg->code_size *= 2;
3167 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3168 code = cfg->native_code + offset;
3170 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3172 code += 4 * GPOINTER_TO_INT (ins->klass);
3176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3181 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3182 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3186 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3191 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3192 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3196 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3199 case OP_COND_EXC_EQ:
3200 case OP_COND_EXC_NE_UN:
3201 case OP_COND_EXC_LT:
3202 case OP_COND_EXC_LT_UN:
3203 case OP_COND_EXC_GT:
3204 case OP_COND_EXC_GT_UN:
3205 case OP_COND_EXC_GE:
3206 case OP_COND_EXC_GE_UN:
3207 case OP_COND_EXC_LE:
3208 case OP_COND_EXC_LE_UN:
3209 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3211 case OP_COND_EXC_IEQ:
3212 case OP_COND_EXC_INE_UN:
3213 case OP_COND_EXC_ILT:
3214 case OP_COND_EXC_ILT_UN:
3215 case OP_COND_EXC_IGT:
3216 case OP_COND_EXC_IGT_UN:
3217 case OP_COND_EXC_IGE:
3218 case OP_COND_EXC_IGE_UN:
3219 case OP_COND_EXC_ILE:
3220 case OP_COND_EXC_ILE_UN:
3221 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3224 case OP_COND_EXC_OV:
3225 case OP_COND_EXC_NC:
3226 case OP_COND_EXC_NO:
3227 case OP_COND_EXC_IC:
3228 case OP_COND_EXC_IOV:
3229 case OP_COND_EXC_INC:
3230 case OP_COND_EXC_INO:
3243 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3246 /* floating point opcodes */
3249 if (cfg->compile_aot) {
3250 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3252 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3254 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3257 /* FIXME: we can optimize the imm load by dealing with part of
3258 * the displacement in LDFD (aligning to 512).
3260 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3261 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3265 if (cfg->compile_aot) {
3266 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3268 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3271 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3272 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3275 case OP_STORER8_MEMBASE_REG:
3276 /* This is generated by the local regalloc pass which runs after the lowering pass */
3277 if (!arm_is_fpimm8 (ins->inst_offset)) {
3278 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3279 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3281 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3284 case OP_LOADR8_MEMBASE:
3285 /* This is generated by the local regalloc pass which runs after the lowering pass */
3286 if (!arm_is_fpimm8 (ins->inst_offset)) {
3287 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3288 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3290 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3293 case OP_STORER4_MEMBASE_REG:
3294 g_assert (arm_is_fpimm8 (ins->inst_offset));
3295 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3297 case OP_LOADR4_MEMBASE:
3298 g_assert (arm_is_fpimm8 (ins->inst_offset));
3299 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3301 case OP_ICONV_TO_R_UN: {
3303 tmpreg = ins->dreg == 0? 1: 0;
3304 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3305 ARM_FLTD (code, ins->dreg, ins->sreg1);
3306 ARM_B_COND (code, ARMCOND_GE, 8);
3307 /* save the temp register */
3308 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3309 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3310 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3311 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3312 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3313 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3314 /* skip the constant pool */
3317 *(int*)code = 0x41f00000;
3322 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3323 * adfltd fdest, fdest, ftemp
3327 case OP_ICONV_TO_R4:
3328 ARM_FLTS (code, ins->dreg, ins->sreg1);
3330 case OP_ICONV_TO_R8:
3331 ARM_FLTD (code, ins->dreg, ins->sreg1);
3333 #elif defined(ARM_FPU_VFP)
3335 if (cfg->compile_aot) {
3336 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3338 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3340 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3343 /* FIXME: we can optimize the imm load by dealing with part of
3344 * the displacement in LDFD (aligning to 512).
3346 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3347 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3351 if (cfg->compile_aot) {
3352 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3354 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3356 ARM_CVTS (code, ins->dreg, ins->dreg);
3358 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3359 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3360 ARM_CVTS (code, ins->dreg, ins->dreg);
3363 case OP_STORER8_MEMBASE_REG:
3364 g_assert (arm_is_fpimm8 (ins->inst_offset));
3365 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3367 case OP_LOADR8_MEMBASE:
3368 g_assert (arm_is_fpimm8 (ins->inst_offset));
3369 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3371 case OP_STORER4_MEMBASE_REG:
3372 g_assert (arm_is_fpimm8 (ins->inst_offset));
3373 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3375 case OP_LOADR4_MEMBASE:
3376 g_assert (arm_is_fpimm8 (ins->inst_offset));
3377 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3379 case OP_ICONV_TO_R_UN: {
3380 g_assert_not_reached ();
3383 case OP_ICONV_TO_R4:
3384 g_assert_not_reached ();
3385 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3387 case OP_ICONV_TO_R8:
3388 g_assert_not_reached ();
3389 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3392 case OP_FCONV_TO_I1:
3393 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3395 case OP_FCONV_TO_U1:
3396 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3398 case OP_FCONV_TO_I2:
3399 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3401 case OP_FCONV_TO_U2:
3402 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3404 case OP_FCONV_TO_I4:
3406 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3408 case OP_FCONV_TO_U4:
3410 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3412 case OP_FCONV_TO_I8:
3413 case OP_FCONV_TO_U8:
3414 g_assert_not_reached ();
3415 /* Implemented as helper calls */
3417 case OP_LCONV_TO_R_UN:
3418 g_assert_not_reached ();
3419 /* Implemented as helper calls */
3421 case OP_LCONV_TO_OVF_I:
3422 case OP_LCONV_TO_OVF_I4_2: {
3423 guint32 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3425 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3428 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3429 high_bit_not_set = code;
3430 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3432 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3433 valid_negative = code;
3434 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3435 invalid_negative = code;
3436 ARM_B_COND (code, ARMCOND_AL, 0);
3438 arm_patch (high_bit_not_set, code);
3440 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3441 valid_positive = code;
3442 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3444 arm_patch (invalid_negative, code);
3445 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3447 arm_patch (valid_negative, code);
3448 arm_patch (valid_positive, code);
3450 if (ins->dreg != ins->sreg1)
3451 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3456 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3459 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3462 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3465 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3468 ARM_MNFD (code, ins->dreg, ins->sreg1);
3470 #elif defined(ARM_FPU_VFP)
3472 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3475 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3478 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3481 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3484 ARM_NEGD (code, ins->dreg, ins->sreg1);
3489 g_assert_not_reached ();
3493 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3494 #elif defined(ARM_FPU_VFP)
3495 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3500 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3501 #elif defined(ARM_FPU_VFP)
3502 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3504 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3505 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3509 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3510 #elif defined(ARM_FPU_VFP)
3511 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3513 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3514 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3518 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3519 #elif defined(ARM_FPU_VFP)
3520 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3522 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3523 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3524 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3529 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3530 #elif defined(ARM_FPU_VFP)
3531 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3533 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3534 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3539 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3540 #elif defined(ARM_FPU_VFP)
3541 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3543 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3544 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3545 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3547 /* ARM FPA flags table:
3548 * N Less than ARMCOND_MI
3549 * Z Equal ARMCOND_EQ
3550 * C Greater Than or Equal ARMCOND_CS
3551 * V Unordered ARMCOND_VS
3554 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3557 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3560 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3563 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3564 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3570 g_assert_not_reached ();
3573 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3576 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3577 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3582 if (ins->dreg != ins->sreg1)
3583 ARM_MVFD (code, ins->dreg, ins->sreg1);
3585 g_assert_not_reached ();
3590 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3591 g_assert_not_reached ();
3594 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3595 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3596 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3597 g_assert_not_reached ();
3603 last_offset = offset;
3606 cfg->code_len = code - cfg->native_code;
3609 #endif /* DISABLE_JIT */
3612 mono_arch_register_lowlevel_calls (void)
3614 /* The signature doesn't matter */
3615 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
3618 #define patch_lis_ori(ip,val) do {\
3619 guint16 *__lis_ori = (guint16*)(ip); \
3620 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3621 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3625 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3627 MonoJumpInfo *patch_info;
3628 gboolean compile_aot = !run_cctors;
3630 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3631 unsigned char *ip = patch_info->ip.i + code;
3632 const unsigned char *target;
3634 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3635 gpointer *jt = (gpointer*)(ip + 8);
3637 /* jt is the inlined jump table, 2 instructions after ip
3638 * In the normal case we store the absolute addresses,
3639 * otherwise the displacements.
3641 for (i = 0; i < patch_info->data.table->table_size; i++)
3642 jt [i] = code + (int)patch_info->data.table->table [i];
3645 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3648 switch (patch_info->type) {
3649 case MONO_PATCH_INFO_BB:
3650 case MONO_PATCH_INFO_LABEL:
3653 /* No need to patch these */
3658 switch (patch_info->type) {
3659 case MONO_PATCH_INFO_IP:
3660 g_assert_not_reached ();
3661 patch_lis_ori (ip, ip);
3663 case MONO_PATCH_INFO_METHOD_REL:
3664 g_assert_not_reached ();
3665 *((gpointer *)(ip)) = code + patch_info->data.offset;
3667 case MONO_PATCH_INFO_METHODCONST:
3668 case MONO_PATCH_INFO_CLASS:
3669 case MONO_PATCH_INFO_IMAGE:
3670 case MONO_PATCH_INFO_FIELD:
3671 case MONO_PATCH_INFO_VTABLE:
3672 case MONO_PATCH_INFO_IID:
3673 case MONO_PATCH_INFO_SFLDA:
3674 case MONO_PATCH_INFO_LDSTR:
3675 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3676 case MONO_PATCH_INFO_LDTOKEN:
3677 g_assert_not_reached ();
3678 /* from OP_AOTCONST : lis + ori */
3679 patch_lis_ori (ip, target);
3681 case MONO_PATCH_INFO_R4:
3682 case MONO_PATCH_INFO_R8:
3683 g_assert_not_reached ();
3684 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3686 case MONO_PATCH_INFO_EXC_NAME:
3687 g_assert_not_reached ();
3688 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3690 case MONO_PATCH_INFO_NONE:
3691 case MONO_PATCH_INFO_BB_OVF:
3692 case MONO_PATCH_INFO_EXC_OVF:
3693 /* everything is dealt with at epilog output time */
3698 arm_patch (ip, target);
3703 * Stack frame layout:
3705 * ------------------- fp
3706 * MonoLMF structure or saved registers
3707 * -------------------
3709 * -------------------
3711 * -------------------
3712 * optional 8 bytes for tracing
3713 * -------------------
3714 * param area size is cfg->param_area
3715 * ------------------- sp
3718 mono_arch_emit_prolog (MonoCompile *cfg)
3720 MonoMethod *method = cfg->method;
3722 MonoMethodSignature *sig;
3724 int alloc_size, pos, max_offset, i, rot_amount;
3731 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3734 sig = mono_method_signature (method);
3735 cfg->code_size = 256 + sig->param_count * 20;
3736 code = cfg->native_code = g_malloc (cfg->code_size);
3738 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3740 alloc_size = cfg->stack_offset;
3743 if (!method->save_lmf) {
3744 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3745 prev_sp_offset = 8; /* ip and lr */
3746 for (i = 0; i < 16; ++i) {
3747 if (cfg->used_int_regs & (1 << i))
3748 prev_sp_offset += 4;
3751 ARM_PUSH (code, 0x5ff0);
3752 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3753 pos += sizeof (MonoLMF) - prev_sp_offset;
3757 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3758 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3759 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3760 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3763 /* the stack used in the pushed regs */
3764 if (prev_sp_offset & 4)
3766 cfg->stack_usage = alloc_size;
3768 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3769 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3771 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3772 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3775 if (cfg->frame_reg != ARMREG_SP)
3776 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3777 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3778 prev_sp_offset += alloc_size;
3780 /* compute max_offset in order to use short forward jumps
3781 * we could skip do it on arm because the immediate displacement
3782 * for jumps is large enough, it may be useful later for constant pools
3785 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3786 MonoInst *ins = bb->code;
3787 bb->max_offset = max_offset;
3789 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3792 MONO_BB_FOR_EACH_INS (bb, ins)
3793 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3796 /* store runtime generic context */
3797 if (cfg->rgctx_var) {
3798 MonoInst *ins = cfg->rgctx_var;
3800 g_assert (ins->opcode == OP_REGOFFSET);
3802 if (arm_is_imm12 (ins->inst_offset)) {
3803 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
3805 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3806 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
3810 /* load arguments allocated to register from the stack */
3813 cinfo = calculate_sizes (sig, sig->pinvoke);
3815 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3816 ArgInfo *ainfo = &cinfo->ret;
3817 inst = cfg->vret_addr;
3818 g_assert (arm_is_imm12 (inst->inst_offset));
3819 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3821 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3822 ArgInfo *ainfo = cinfo->args + i;
3823 inst = cfg->args [pos];
3825 if (cfg->verbose_level > 2)
3826 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3827 if (inst->opcode == OP_REGVAR) {
3828 if (ainfo->regtype == RegTypeGeneral)
3829 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3830 else if (ainfo->regtype == RegTypeFP) {
3831 g_assert_not_reached ();
3832 } else if (ainfo->regtype == RegTypeBase) {
3833 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3834 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3836 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3837 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3840 g_assert_not_reached ();
3842 if (cfg->verbose_level > 2)
3843 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3845 /* the argument should be put on the stack: FIXME handle size != word */
3846 if (ainfo->regtype == RegTypeGeneral) {
3847 switch (ainfo->size) {
3849 if (arm_is_imm12 (inst->inst_offset))
3850 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3852 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3853 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3857 if (arm_is_imm8 (inst->inst_offset)) {
3858 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3860 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3861 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3865 g_assert (arm_is_imm12 (inst->inst_offset));
3866 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3867 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3868 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3871 if (arm_is_imm12 (inst->inst_offset)) {
3872 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3874 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3875 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3879 } else if (ainfo->regtype == RegTypeBaseGen) {
3880 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3881 g_assert (arm_is_imm12 (inst->inst_offset));
3882 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3883 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3884 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3885 } else if (ainfo->regtype == RegTypeBase) {
3886 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3887 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3889 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3890 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3893 switch (ainfo->size) {
3895 if (arm_is_imm8 (inst->inst_offset)) {
3896 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3898 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3899 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3903 if (arm_is_imm8 (inst->inst_offset)) {
3904 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3906 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3907 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3911 if (arm_is_imm12 (inst->inst_offset)) {
3912 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3914 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3915 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3917 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3918 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3920 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3921 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3923 if (arm_is_imm12 (inst->inst_offset + 4)) {
3924 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3926 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3927 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3931 if (arm_is_imm12 (inst->inst_offset)) {
3932 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3934 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3935 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3939 } else if (ainfo->regtype == RegTypeFP) {
3940 g_assert_not_reached ();
3941 } else if (ainfo->regtype == RegTypeStructByVal) {
3942 int doffset = inst->inst_offset;
3946 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
3947 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3948 if (arm_is_imm12 (doffset)) {
3949 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3951 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3952 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3954 soffset += sizeof (gpointer);
3955 doffset += sizeof (gpointer);
3957 if (ainfo->vtsize) {
3958 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3959 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3960 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3962 } else if (ainfo->regtype == RegTypeStructByAddr) {
3963 g_assert_not_reached ();
3964 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3965 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3967 g_assert_not_reached ();
3972 if (method->save_lmf) {
3974 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3975 (gpointer)"mono_get_lmf_addr");
3976 code = emit_call_seq (cfg, code);
3977 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3978 /* lmf_offset is the offset from the previous stack pointer,
3979 * alloc_size is the total stack space allocated, so the offset
3980 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3981 * The pointer to the struct is put in r1 (new_lmf).
3982 * r2 is used as scratch
3983 * The callee-saved registers are already in the MonoLMF structure
3985 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3986 /* r0 is the result from mono_get_lmf_addr () */
3987 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3988 /* new_lmf->previous_lmf = *lmf_addr */
3989 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3990 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3991 /* *(lmf_addr) = r1 */
3992 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3993 /* Skip method (only needed for trampoline LMF frames) */
3994 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3995 /* save the current IP */
3996 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3997 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4001 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4003 cfg->code_len = code - cfg->native_code;
4004 g_assert (cfg->code_len < cfg->code_size);
4011 mono_arch_emit_epilog (MonoCompile *cfg)
4013 MonoMethod *method = cfg->method;
4014 int pos, i, rot_amount;
4015 int max_epilog_size = 16 + 20*4;
4018 if (cfg->method->save_lmf)
4019 max_epilog_size += 128;
4021 if (mono_jit_trace_calls != NULL)
4022 max_epilog_size += 50;
4024 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4025 max_epilog_size += 50;
4027 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4028 cfg->code_size *= 2;
4029 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4030 mono_jit_stats.code_reallocs++;
4034 * Keep in sync with OP_JMP
4036 code = cfg->native_code + cfg->code_len;
4038 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4039 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4043 if (method->save_lmf) {
4045 /* all but r0-r3, sp and pc */
4046 pos += sizeof (MonoLMF) - (4 * 10);
4048 /* r2 contains the pointer to the current LMF */
4049 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4050 /* ip = previous_lmf */
4051 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4053 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4054 /* *(lmf_addr) = previous_lmf */
4055 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4056 /* FIXME: speedup: there is no actual need to restore the registers if
4057 * we didn't actually change them (idea from Zoltan).
4060 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4061 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4062 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4064 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4065 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4067 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4068 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4070 /* FIXME: add v4 thumb interworking support */
4071 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4074 cfg->code_len = code - cfg->native_code;
4076 g_assert (cfg->code_len < cfg->code_size);
4080 /* remove once throw_exception_by_name is eliminated */
4082 exception_id_by_name (const char *name)
4084 if (strcmp (name, "IndexOutOfRangeException") == 0)
4085 return MONO_EXC_INDEX_OUT_OF_RANGE;
4086 if (strcmp (name, "OverflowException") == 0)
4087 return MONO_EXC_OVERFLOW;
4088 if (strcmp (name, "ArithmeticException") == 0)
4089 return MONO_EXC_ARITHMETIC;
4090 if (strcmp (name, "DivideByZeroException") == 0)
4091 return MONO_EXC_DIVIDE_BY_ZERO;
4092 if (strcmp (name, "InvalidCastException") == 0)
4093 return MONO_EXC_INVALID_CAST;
4094 if (strcmp (name, "NullReferenceException") == 0)
4095 return MONO_EXC_NULL_REF;
4096 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4097 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4098 g_error ("Unknown intrinsic exception %s\n", name);
4103 mono_arch_emit_exceptions (MonoCompile *cfg)
4105 MonoJumpInfo *patch_info;
4108 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4109 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4110 int max_epilog_size = 50;
4112 /* count the number of exception infos */
4115 * make sure we have enough space for exceptions
4117 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4118 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4119 i = exception_id_by_name (patch_info->data.target);
4120 if (!exc_throw_found [i]) {
4121 max_epilog_size += 32;
4122 exc_throw_found [i] = TRUE;
4127 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4128 cfg->code_size *= 2;
4129 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4130 mono_jit_stats.code_reallocs++;
4133 code = cfg->native_code + cfg->code_len;
4135 /* add code to raise exceptions */
4136 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4137 switch (patch_info->type) {
4138 case MONO_PATCH_INFO_EXC: {
4139 MonoClass *exc_class;
4140 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4142 i = exception_id_by_name (patch_info->data.target);
4143 if (exc_throw_pos [i]) {
4144 arm_patch (ip, exc_throw_pos [i]);
4145 patch_info->type = MONO_PATCH_INFO_NONE;
4148 exc_throw_pos [i] = code;
4150 arm_patch (ip, code);
4152 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4153 g_assert (exc_class);
4155 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4156 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4157 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4158 patch_info->data.name = "mono_arch_throw_corlib_exception";
4159 patch_info->ip.i = code - cfg->native_code;
4161 *(guint32*)(gpointer)code = exc_class->type_token;
4171 cfg->code_len = code - cfg->native_code;
4173 g_assert (cfg->code_len < cfg->code_size);
4178 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4183 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4188 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4191 int this_dreg = ARMREG_R0;
4194 this_dreg = ARMREG_R1;
4196 /* add the this argument */
4197 if (this_reg != -1) {
4199 MONO_INST_NEW (cfg, this, OP_MOVE);
4200 this->type = this_type;
4201 this->sreg1 = this_reg;
4202 this->dreg = mono_regstate_next_int (cfg->rs);
4203 mono_bblock_add_inst (cfg->cbb, this);
4204 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
4209 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
4210 vtarg->type = STACK_MP;
4211 vtarg->sreg1 = vt_reg;
4212 vtarg->dreg = mono_regstate_next_int (cfg->rs);
4213 mono_bblock_add_inst (cfg->cbb, vtarg);
4214 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
4219 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4225 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4232 mono_arch_print_tree (MonoInst *tree, int arity)
4237 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4243 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4249 mono_arch_get_patch_offset (guint8 *code)
4256 mono_arch_flush_register_windows (void)
4261 mono_arch_fixup_jinfo (MonoCompile *cfg)
4263 /* max encoded stack usage is 64KB * 4 */
4264 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4265 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4268 #ifdef MONO_ARCH_HAVE_IMT
4271 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4273 if (cfg->compile_aot) {
4274 int method_reg = mono_regstate_next_int (cfg->rs);
4277 call->dynamic_imt_arg = TRUE;
4279 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4280 ins->dreg = method_reg;
4281 ins->inst_p0 = call->method;
4282 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4283 MONO_ADD_INS (cfg->cbb, ins);
4285 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4286 } else if (cfg->generic_context) {
4288 /* Always pass in a register for simplicity */
4289 call->dynamic_imt_arg = TRUE;
4291 cfg->uses_rgctx_reg = TRUE;
4294 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4297 int method_reg = mono_alloc_preg (cfg);
4299 MONO_INST_NEW (cfg, ins, OP_PCONST);
4300 ins->inst_p0 = call->method;
4301 ins->dreg = method_reg;
4302 MONO_ADD_INS (cfg->cbb, ins);
4304 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4310 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4312 guint32 *code_ptr = (guint32*)code;
4314 /* The IMT value is stored in the code stream right after the LDC instruction. */
4315 if (!IS_LDR_PC (code_ptr [0])) {
4316 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4317 g_assert (IS_LDR_PC (code_ptr [0]));
4319 if (code_ptr [1] == 0)
4320 /* This is AOTed code, the IMT method is in V5 */
4321 return (MonoMethod*)regs [ARMREG_V5];
4323 return (MonoMethod*) code_ptr [1];
4327 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4329 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4333 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4335 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4338 #define ENABLE_WRONG_METHOD_CHECK 0
4339 #define BASE_SIZE (6 * 4)
4340 #define BSEARCH_ENTRY_SIZE (4 * 4)
4341 #define CMP_SIZE (3 * 4)
4342 #define BRANCH_SIZE (1 * 4)
4343 #define CALL_SIZE (2 * 4)
4344 #define WMC_SIZE (5 * 4)
4345 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4348 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4350 guint32 delta = DISTANCE (target, code);
4352 g_assert (delta >= 0 && delta <= 0xFFF);
4353 *target = *target | delta;
4359 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
4361 int size, i, extra_space = 0;
4362 arminstr_t *code, *start, *vtable_target = NULL;
4365 for (i = 0; i < count; ++i) {
4366 MonoIMTCheckItem *item = imt_entries [i];
4367 if (item->is_equals) {
4368 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
4370 if (item->check_target_idx) {
4371 if (!item->compare_done)
4372 item->chunk_size += CMP_SIZE;
4373 item->chunk_size += BRANCH_SIZE;
4375 #if ENABLE_WRONG_METHOD_CHECK
4376 item->chunk_size += WMC_SIZE;
4379 item->chunk_size += CALL_SIZE;
4381 item->chunk_size += BSEARCH_ENTRY_SIZE;
4382 imt_entries [item->check_target_idx]->compare_done = TRUE;
4384 size += item->chunk_size;
4387 start = code = mono_code_manager_reserve (domain->code_mp, size);
4390 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4391 for (i = 0; i < count; ++i) {
4392 MonoIMTCheckItem *item = imt_entries [i];
4393 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
4397 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4398 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4399 vtable_target = code;
4400 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4402 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4403 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4404 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4406 for (i = 0; i < count; ++i) {
4407 MonoIMTCheckItem *item = imt_entries [i];
4408 arminstr_t *imt_method = NULL;
4409 item->code_target = (guint8*)code;
4411 if (item->is_equals) {
4412 if (item->check_target_idx) {
4413 if (!item->compare_done) {
4415 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4416 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4418 item->jmp_code = (guint8*)code;
4419 ARM_B_COND (code, ARMCOND_NE, 0);
4421 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4422 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4424 /*Enable the commented code to assert on wrong method*/
4425 #if ENABLE_WRONG_METHOD_CHECK
4427 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4428 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4429 ARM_B_COND (code, ARMCOND_NE, 1);
4431 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4432 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4434 #if ENABLE_WRONG_METHOD_CHECK
4440 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
4442 /*must emit after unconditional branch*/
4443 if (vtable_target) {
4444 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4445 item->chunk_size += 4;
4446 vtable_target = NULL;
4449 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4451 code += extra_space;
4455 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4456 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4458 item->jmp_code = (guint8*)code;
4459 ARM_B_COND (code, ARMCOND_GE, 0);
4464 for (i = 0; i < count; ++i) {
4465 MonoIMTCheckItem *item = imt_entries [i];
4466 if (item->jmp_code) {
4467 if (item->check_target_idx)
4468 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4470 if (i > 0 && item->is_equals) {
4472 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4473 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4474 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
4481 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4482 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4487 mono_arch_flush_icache ((guint8*)start, size);
4488 mono_stats.imt_thunks_size += code - start;
4490 g_assert (DISTANCE (start, code) <= size);
4497 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4499 return ctx->regs [reg];