2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
34 static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
38 * floating point support: on ARM it is a mess, there are at least 3
39 * different setups, each of which binary incompat with the other.
40 * 1) FPA: old and ugly, but unfortunately what current distros use
41 * the double binary format has the two words swapped. 8 double registers.
42 * Implemented usually by kernel emulation.
43 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
44 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
45 * 3) VFP: the new and actually sensible and useful FP support. Implemented
46 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
48 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
50 int mono_exc_esp_offset = 0;
52 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
53 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
54 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
56 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
57 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
58 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
60 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
61 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
65 mono_arch_regname (int reg)
67 static const char * rnames[] = {
68 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
69 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
70 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
73 if (reg >= 0 && reg < 16)
79 mono_arch_fregname (int reg)
81 static const char * rnames[] = {
82 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
83 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
84 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
85 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
86 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
87 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
90 if (reg >= 0 && reg < 32)
96 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
99 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
100 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
103 g_assert (dreg != sreg);
104 code = mono_arm_emit_load_imm (code, dreg, imm);
105 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
110 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
112 /* we can use r0-r3, since this is called only for incoming args on the stack */
113 if (size > sizeof (gpointer) * 4) {
115 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
116 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
117 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
118 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
119 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
120 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
121 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
122 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
123 ARM_B_COND (code, ARMCOND_NE, 0);
124 arm_patch (code - 4, start_loop);
127 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
128 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
130 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
131 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
137 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
138 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
139 doffset = soffset = 0;
141 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
142 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
148 g_assert (size == 0);
153 emit_call_reg (guint8 *code, int reg)
156 ARM_BLX_REG (code, reg);
158 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
162 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
168 emit_call_seq (MonoCompile *cfg, guint8 *code)
170 if (cfg->method->dynamic) {
171 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
173 *(gpointer*)code = NULL;
175 code = emit_call_reg (code, ARMREG_IP);
183 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
185 switch (ins->opcode) {
188 case OP_FCALL_MEMBASE:
190 if (ins->dreg != ARM_FPA_F0)
191 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
200 * mono_arch_get_argument_info:
201 * @csig: a method signature
202 * @param_count: the number of parameters to consider
203 * @arg_info: an array to store the result infos
205 * Gathers information on parameters such as size, alignment and
206 * padding. arg_info should be large enought to hold param_count + 1 entries.
208 * Returns the size of the activation frame.
211 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
213 int k, frame_size = 0;
214 int size, align, pad;
217 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
218 frame_size += sizeof (gpointer);
222 arg_info [0].offset = offset;
225 frame_size += sizeof (gpointer);
229 arg_info [0].size = frame_size;
231 for (k = 0; k < param_count; k++) {
234 size = mono_type_native_stack_size (csig->params [k], &align);
236 size = mini_type_stack_size (NULL, csig->params [k], &align);
238 /* ignore alignment for now */
241 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
242 arg_info [k].pad = pad;
244 arg_info [k + 1].pad = 0;
245 arg_info [k + 1].size = size;
247 arg_info [k + 1].offset = offset;
251 align = MONO_ARCH_FRAME_ALIGNMENT;
252 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
253 arg_info [k].pad = pad;
259 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
263 reg = (ldr >> 16 ) & 0xf;
264 offset = ldr & 0xfff;
265 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
267 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
270 *displacement = offset;
275 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
277 guint32* code = (guint32*)code_ptr;
279 /* Locate the address of the method-specific trampoline. The call using
280 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
281 looks something like this:
290 The call sequence could be also:
293 function pointer literal
297 Note that on ARM5+ we can use one instruction instead of the last two.
298 Therefore, we need to locate the 'ldr rA' instruction to know which
299 register was used to hold the method addrs.
302 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
305 /* Three possible code sequences can happen here:
309 * ldr pc, [rX - #offset]
315 * ldr pc, [rX - #offset]
317 * direct branch with bl:
321 * direct branch with mov:
325 * We only need to identify interface and virtual calls, the others can be ignored.
328 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
329 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
331 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
332 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
338 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
342 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
345 return (gpointer*)((char*)vt + displacement);
348 #define MAX_ARCH_DELEGATE_PARAMS 3
351 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
353 guint8 *code, *start;
355 /* FIXME: Support more cases */
356 if (MONO_TYPE_ISSTRUCT (sig->ret))
360 static guint8* cached = NULL;
361 mono_mini_arch_lock ();
363 mono_mini_arch_unlock ();
367 start = code = mono_global_codeman_reserve (12);
369 /* Replace the this argument with the target */
370 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
371 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
372 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
374 g_assert ((code - start) <= 12);
376 mono_arch_flush_icache (code, 12);
378 mono_mini_arch_unlock ();
381 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
384 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
386 for (i = 0; i < sig->param_count; ++i)
387 if (!mono_is_regsize_var (sig->params [i]))
390 mono_mini_arch_lock ();
391 code = cache [sig->param_count];
393 mono_mini_arch_unlock ();
397 size = 8 + sig->param_count * 4;
398 start = code = mono_global_codeman_reserve (size);
400 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
401 /* slide down the arguments */
402 for (i = 0; i < sig->param_count; ++i) {
403 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
405 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
407 g_assert ((code - start) <= size);
409 mono_arch_flush_icache (code, size);
410 cache [sig->param_count] = start;
411 mono_mini_arch_unlock ();
419 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
421 /* FIXME: handle returning a struct */
422 if (MONO_TYPE_ISSTRUCT (sig->ret))
423 return (gpointer)regs [ARMREG_R1];
424 return (gpointer)regs [ARMREG_R0];
428 * Initialize the cpu to execute managed code.
431 mono_arch_cpu_init (void)
436 * Initialize architecture specific code.
439 mono_arch_init (void)
441 InitializeCriticalSection (&mini_arch_mutex);
445 * Cleanup architecture specific code.
448 mono_arch_cleanup (void)
453 * This function returns the optimizations supported on this cpu.
456 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
460 thumb_supported = TRUE;
465 FILE *file = fopen ("/proc/cpuinfo", "r");
467 while ((line = fgets (buf, 512, file))) {
468 if (strncmp (line, "Processor", 9) == 0) {
469 char *ver = strstr (line, "(v");
470 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
475 if (strncmp (line, "Features", 8) == 0) {
476 char *th = strstr (line, "thumb");
478 thumb_supported = TRUE;
486 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
490 /* no arm-specific optimizations yet */
496 is_regsize_var (MonoType *t) {
499 t = mono_type_get_underlying_type (t);
506 case MONO_TYPE_FNPTR:
508 case MONO_TYPE_OBJECT:
509 case MONO_TYPE_STRING:
510 case MONO_TYPE_CLASS:
511 case MONO_TYPE_SZARRAY:
512 case MONO_TYPE_ARRAY:
514 case MONO_TYPE_GENERICINST:
515 if (!mono_type_generic_inst_is_valuetype (t))
518 case MONO_TYPE_VALUETYPE:
525 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
530 for (i = 0; i < cfg->num_varinfo; i++) {
531 MonoInst *ins = cfg->varinfo [i];
532 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
535 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
538 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
541 /* we can only allocate 32 bit values */
542 if (is_regsize_var (ins->inst_vtype)) {
543 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
544 g_assert (i == vmv->idx);
545 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
552 #define USE_EXTRA_TEMPS 0
555 mono_arch_get_global_int_regs (MonoCompile *cfg)
558 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
559 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
560 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
561 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
562 if (cfg->compile_aot)
563 /* V5 is reserved for holding the IMT method */
564 cfg->used_int_regs |= (1 << ARMREG_V5);
566 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
567 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
568 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
574 * mono_arch_regalloc_cost:
576 * Return the cost, in number of memory references, of the action of
577 * allocating the variable VMV into a register during global register
581 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
588 mono_arch_flush_icache (guint8 *code, gint size)
591 sys_icache_invalidate (code, size);
593 __asm __volatile ("mov r0, %0\n"
596 "swi 0x9f0002 @ sys_cacheflush"
598 : "r" (code), "r" (code + size), "r" (0)
599 : "r0", "r1", "r3" );
614 guint16 vtsize; /* in param area */
616 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
617 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
632 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
635 if (*gr > ARMREG_R3) {
636 ainfo->offset = *stack_size;
637 ainfo->reg = ARMREG_SP; /* in the caller */
638 ainfo->regtype = RegTypeBase;
649 /* first word in r3 and the second on the stack */
650 ainfo->offset = *stack_size;
651 ainfo->reg = ARMREG_SP; /* in the caller */
652 ainfo->regtype = RegTypeBaseGen;
654 } else if (*gr >= ARMREG_R3) {
659 ainfo->offset = *stack_size;
660 ainfo->reg = ARMREG_SP; /* in the caller */
661 ainfo->regtype = RegTypeBase;
676 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
679 int n = sig->hasthis + sig->param_count;
681 guint32 stack_size = 0;
682 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
686 /* FIXME: handle returning a struct */
687 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
688 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
689 cinfo->struct_ret = ARMREG_R0;
694 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
697 DEBUG(printf("params: %d\n", sig->param_count));
698 for (i = 0; i < sig->param_count; ++i) {
699 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
700 /* Prevent implicit arguments and sig_cookie from
701 being passed in registers */
703 /* Emit the signature cookie just before the implicit arguments */
704 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
706 DEBUG(printf("param %d: ", i));
707 if (sig->params [i]->byref) {
708 DEBUG(printf("byref\n"));
709 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
713 simpletype = mono_type_get_underlying_type (sig->params [i])->type;
714 switch (simpletype) {
715 case MONO_TYPE_BOOLEAN:
718 cinfo->args [n].size = 1;
719 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
725 cinfo->args [n].size = 2;
726 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
731 cinfo->args [n].size = 4;
732 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
738 case MONO_TYPE_FNPTR:
739 case MONO_TYPE_CLASS:
740 case MONO_TYPE_OBJECT:
741 case MONO_TYPE_STRING:
742 case MONO_TYPE_SZARRAY:
743 case MONO_TYPE_ARRAY:
745 cinfo->args [n].size = sizeof (gpointer);
746 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
749 case MONO_TYPE_GENERICINST:
750 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
751 cinfo->args [n].size = sizeof (gpointer);
752 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
757 case MONO_TYPE_TYPEDBYREF:
758 case MONO_TYPE_VALUETYPE: {
763 if (simpletype == MONO_TYPE_TYPEDBYREF) {
764 size = sizeof (MonoTypedRef);
766 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
768 size = mono_class_native_size (klass, NULL);
770 size = mono_class_value_size (klass, NULL);
772 DEBUG(printf ("load %d bytes struct\n",
773 mono_class_native_size (sig->params [i]->data.klass, NULL)));
776 align_size += (sizeof (gpointer) - 1);
777 align_size &= ~(sizeof (gpointer) - 1);
778 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
779 cinfo->args [n].regtype = RegTypeStructByVal;
780 /* FIXME: align gr and stack_size if needed */
781 if (gr > ARMREG_R3) {
782 cinfo->args [n].size = 0;
783 cinfo->args [n].vtsize = nwords;
785 int rest = ARMREG_R3 - gr + 1;
786 int n_in_regs = rest >= nwords? nwords: rest;
787 cinfo->args [n].size = n_in_regs;
788 cinfo->args [n].vtsize = nwords - n_in_regs;
789 cinfo->args [n].reg = gr;
792 cinfo->args [n].offset = stack_size;
793 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
794 stack_size += nwords * sizeof (gpointer);
801 cinfo->args [n].size = 8;
802 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
806 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
811 simpletype = mono_type_get_underlying_type (sig->ret)->type;
812 switch (simpletype) {
813 case MONO_TYPE_BOOLEAN:
824 case MONO_TYPE_FNPTR:
825 case MONO_TYPE_CLASS:
826 case MONO_TYPE_OBJECT:
827 case MONO_TYPE_SZARRAY:
828 case MONO_TYPE_ARRAY:
829 case MONO_TYPE_STRING:
830 cinfo->ret.reg = ARMREG_R0;
834 cinfo->ret.reg = ARMREG_R0;
838 cinfo->ret.reg = ARMREG_R0;
839 /* FIXME: cinfo->ret.reg = ???;
840 cinfo->ret.regtype = RegTypeFP;*/
842 case MONO_TYPE_GENERICINST:
843 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
844 cinfo->ret.reg = ARMREG_R0;
848 case MONO_TYPE_VALUETYPE:
850 case MONO_TYPE_TYPEDBYREF:
854 g_error ("Can't handle as return value 0x%x", sig->ret->type);
858 /* align stack size to 8 */
859 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
860 stack_size = (stack_size + 7) & ~7;
862 cinfo->stack_usage = stack_size;
868 * Set var information according to the calling convention. arm version.
869 * The locals var stuff should most likely be split in another method.
872 mono_arch_allocate_vars (MonoCompile *m)
874 MonoMethodSignature *sig;
875 MonoMethodHeader *header;
877 int i, offset, size, align, curinst;
878 int frame_reg = ARMREG_FP;
880 /* FIXME: this will change when we use FP as gcc does */
881 m->flags |= MONO_CFG_HAS_SPILLUP;
883 /* allow room for the vararg method args: void* and long/double */
884 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
885 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
887 header = mono_method_get_header (m->method);
890 * We use the frame register also for any method that has
891 * exception clauses. This way, when the handlers are called,
892 * the code will reference local variables using the frame reg instead of
893 * the stack pointer: if we had to restore the stack pointer, we'd
894 * corrupt the method frames that are already on the stack (since
895 * filters get called before stack unwinding happens) when the filter
896 * code would call any method (this also applies to finally etc.).
898 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
899 frame_reg = ARMREG_FP;
900 m->frame_reg = frame_reg;
901 if (frame_reg != ARMREG_SP) {
902 m->used_int_regs |= 1 << frame_reg;
905 sig = mono_method_signature (m->method);
909 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
910 /* FIXME: handle long and FP values */
911 switch (mono_type_get_underlying_type (sig->ret)->type) {
915 m->ret->opcode = OP_REGVAR;
916 m->ret->inst_c0 = ARMREG_R0;
920 /* local vars are at a positive offset from the stack pointer */
922 * also note that if the function uses alloca, we use FP
923 * to point at the local variables.
925 offset = 0; /* linkage area */
926 /* align the offset to 16 bytes: not sure this is needed here */
928 //offset &= ~(8 - 1);
930 /* add parameter area size for called functions */
931 offset += m->param_area;
934 if (m->flags & MONO_CFG_HAS_FPOUT)
937 /* allow room to save the return value */
938 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
941 /* the MonoLMF structure is stored just below the stack pointer */
943 if (sig->call_convention == MONO_CALL_VARARG) {
947 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
949 offset += sizeof(gpointer) - 1;
950 offset &= ~(sizeof(gpointer) - 1);
951 inst->inst_offset = offset;
952 inst->opcode = OP_REGOFFSET;
953 inst->inst_basereg = frame_reg;
954 if (G_UNLIKELY (m->verbose_level > 1)) {
955 printf ("vret_addr =");
956 mono_print_ins (m->vret_addr);
958 offset += sizeof(gpointer);
959 if (sig->call_convention == MONO_CALL_VARARG)
960 m->sig_cookie += sizeof (gpointer);
963 curinst = m->locals_start;
964 for (i = curinst; i < m->num_varinfo; ++i) {
965 inst = m->varinfo [i];
966 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
969 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
970 * pinvoke wrappers when they call functions returning structure */
971 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
972 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
974 size = mono_type_size (inst->inst_vtype, &align);
976 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
977 * since it loads/stores misaligned words, which don't do the right thing.
979 if (align < 4 && size >= 4)
982 offset &= ~(align - 1);
983 inst->inst_offset = offset;
984 inst->opcode = OP_REGOFFSET;
985 inst->inst_basereg = frame_reg;
987 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
992 inst = m->args [curinst];
993 if (inst->opcode != OP_REGVAR) {
994 inst->opcode = OP_REGOFFSET;
995 inst->inst_basereg = frame_reg;
996 offset += sizeof (gpointer) - 1;
997 offset &= ~(sizeof (gpointer) - 1);
998 inst->inst_offset = offset;
999 offset += sizeof (gpointer);
1000 if (sig->call_convention == MONO_CALL_VARARG)
1001 m->sig_cookie += sizeof (gpointer);
1006 for (i = 0; i < sig->param_count; ++i) {
1007 inst = m->args [curinst];
1008 if (inst->opcode != OP_REGVAR) {
1009 inst->opcode = OP_REGOFFSET;
1010 inst->inst_basereg = frame_reg;
1011 size = mono_type_size (sig->params [i], &align);
1012 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1013 * since it loads/stores misaligned words, which don't do the right thing.
1015 if (align < 4 && size >= 4)
1017 offset += align - 1;
1018 offset &= ~(align - 1);
1019 inst->inst_offset = offset;
1021 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1022 m->sig_cookie += size;
1027 /* align the offset to 8 bytes */
1032 m->stack_offset = offset;
1037 mono_arch_create_vars (MonoCompile *cfg)
1039 MonoMethodSignature *sig;
1041 sig = mono_method_signature (cfg->method);
1043 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1044 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1045 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1046 printf ("vret_addr = ");
1047 mono_print_ins (cfg->vret_addr);
1053 * take the arguments and generate the arch-specific
1054 * instructions to properly call the function in call.
1055 * This includes pushing, moving arguments to the right register
1057 * Issue: who does the spilling if needed, and when?
1060 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1062 MonoMethodSignature *sig;
1067 sig = call->signature;
1068 n = sig->param_count + sig->hasthis;
1070 cinfo = calculate_sizes (sig, sig->pinvoke);
1071 if (cinfo->struct_ret)
1072 call->used_iregs |= 1 << cinfo->struct_ret;
1074 for (i = 0; i < n; ++i) {
1075 ainfo = cinfo->args + i;
1076 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1078 cfg->disable_aot = TRUE;
1080 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1081 sig_arg->inst_p0 = call->signature;
1083 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1084 arg->inst_imm = cinfo->sig_cookie.offset;
1085 arg->inst_left = sig_arg;
1086 MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
1088 if (is_virtual && i == 0) {
1089 /* the argument will be attached to the call instrucion */
1090 in = call->args [i];
1091 call->used_iregs |= 1 << ainfo->reg;
1093 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1094 in = call->args [i];
1095 arg->cil_code = in->cil_code;
1096 arg->inst_left = in;
1097 arg->inst_right = (MonoInst*)call;
1098 arg->type = in->type;
1099 MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
1100 if (ainfo->regtype == RegTypeGeneral) {
1101 arg->backend.reg3 = ainfo->reg;
1102 call->used_iregs |= 1 << ainfo->reg;
1103 if (arg->type == STACK_I8)
1104 call->used_iregs |= 1 << (ainfo->reg + 1);
1105 if (arg->type == STACK_R8) {
1106 if (ainfo->size == 4) {
1107 #ifndef MONO_ARCH_SOFT_FLOAT
1108 arg->opcode = OP_OUTARG_R4;
1111 call->used_iregs |= 1 << (ainfo->reg + 1);
1113 cfg->flags |= MONO_CFG_HAS_FPOUT;
1115 } else if (ainfo->regtype == RegTypeStructByAddr) {
1116 /* FIXME: where si the data allocated? */
1117 arg->backend.reg3 = ainfo->reg;
1118 call->used_iregs |= 1 << ainfo->reg;
1119 g_assert_not_reached ();
1120 } else if (ainfo->regtype == RegTypeStructByVal) {
1122 /* mark the used regs */
1123 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
1124 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
1126 arg->opcode = OP_OUTARG_VT;
1127 /* vtsize and offset have just 12 bits of encoding in number of words */
1128 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
1129 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
1130 } else if (ainfo->regtype == RegTypeBase) {
1131 arg->opcode = OP_OUTARG_MEMBASE;
1132 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
1133 } else if (ainfo->regtype == RegTypeBaseGen) {
1134 call->used_iregs |= 1 << ARMREG_R3;
1135 arg->opcode = OP_OUTARG_MEMBASE;
1136 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
1137 if (arg->type == STACK_R8)
1138 cfg->flags |= MONO_CFG_HAS_FPOUT;
1139 } else if (ainfo->regtype == RegTypeFP) {
1140 arg->backend.reg3 = ainfo->reg;
1141 /* FP args are passed in int regs */
1142 call->used_iregs |= 1 << ainfo->reg;
1143 if (ainfo->size == 8) {
1144 arg->opcode = OP_OUTARG_R8;
1145 call->used_iregs |= 1 << (ainfo->reg + 1);
1147 arg->opcode = OP_OUTARG_R4;
1149 cfg->flags |= MONO_CFG_HAS_FPOUT;
1151 g_assert_not_reached ();
1155 call->stack_usage = cinfo->stack_usage;
1156 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
1157 cfg->flags |= MONO_CFG_HAS_CALLS;
1159 * should set more info in call, such as the stack space
1160 * used by the args that needs to be added back to esp
1168 * Allow tracing to work with this interface (with an optional argument)
1172 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1176 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1177 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1178 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1179 code = emit_call_reg (code, ARMREG_R2);
1192 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1195 int save_mode = SAVE_NONE;
1197 MonoMethod *method = cfg->method;
1198 int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
1199 int save_offset = cfg->param_area;
1203 offset = code - cfg->native_code;
1204 /* we need about 16 instructions */
1205 if (offset > (cfg->code_size - 16 * 4)) {
1206 cfg->code_size *= 2;
1207 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1208 code = cfg->native_code + offset;
1211 case MONO_TYPE_VOID:
1212 /* special case string .ctor icall */
1213 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1214 save_mode = SAVE_ONE;
1216 save_mode = SAVE_NONE;
1220 save_mode = SAVE_TWO;
1224 save_mode = SAVE_FP;
1226 case MONO_TYPE_VALUETYPE:
1227 save_mode = SAVE_STRUCT;
1230 save_mode = SAVE_ONE;
1234 switch (save_mode) {
1236 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1237 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1238 if (enable_arguments) {
1239 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1240 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1244 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1245 if (enable_arguments) {
1246 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1250 /* FIXME: what reg? */
1251 if (enable_arguments) {
1252 /* FIXME: what reg? */
1256 if (enable_arguments) {
1257 /* FIXME: get the actual address */
1258 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1266 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1267 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1268 code = emit_call_reg (code, ARMREG_IP);
1270 switch (save_mode) {
1272 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1273 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1276 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1290 * The immediate field for cond branches is big enough for all reasonable methods
1292 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1293 if (ins->flags & MONO_INST_BRLABEL) { \
1294 if (0 && ins->inst_i0->inst_c0) { \
1295 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1297 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1298 ARM_B_COND (code, (condcode), 0); \
1301 if (0 && ins->inst_true_bb->native_offset) { \
1302 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1304 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1305 ARM_B_COND (code, (condcode), 0); \
1309 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1311 /* emit an exception if condition is fail
1313 * We assign the extra code used to throw the implicit exceptions
1314 * to cfg->bb_exit as far as the big branch handling is concerned
1316 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1318 mono_add_patch_info (cfg, code - cfg->native_code, \
1319 MONO_PATCH_INFO_EXC, exc_name); \
1320 ARM_BL_COND (code, (condcode), 0); \
1323 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1326 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1331 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1335 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1336 MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
1338 switch (ins->opcode) {
1340 /* remove unnecessary multiplication with 1 */
1341 if (ins->inst_imm == 1) {
1342 if (ins->dreg != ins->sreg1) {
1343 ins->opcode = OP_MOVE;
1345 MONO_DELETE_INS (bb, ins);
1349 int power2 = mono_is_power_of_two (ins->inst_imm);
1351 ins->opcode = OP_SHL_IMM;
1352 ins->inst_imm = power2;
1356 case OP_LOAD_MEMBASE:
1357 case OP_LOADI4_MEMBASE:
1359 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1360 * OP_LOAD_MEMBASE offset(basereg), reg
1362 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1363 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1364 ins->inst_basereg == last_ins->inst_destbasereg &&
1365 ins->inst_offset == last_ins->inst_offset) {
1366 if (ins->dreg == last_ins->sreg1) {
1367 MONO_DELETE_INS (bb, ins);
1370 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1371 ins->opcode = OP_MOVE;
1372 ins->sreg1 = last_ins->sreg1;
1376 * Note: reg1 must be different from the basereg in the second load
1377 * OP_LOAD_MEMBASE offset(basereg), reg1
1378 * OP_LOAD_MEMBASE offset(basereg), reg2
1380 * OP_LOAD_MEMBASE offset(basereg), reg1
1381 * OP_MOVE reg1, reg2
1383 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1384 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1385 ins->inst_basereg != last_ins->dreg &&
1386 ins->inst_basereg == last_ins->inst_basereg &&
1387 ins->inst_offset == last_ins->inst_offset) {
1389 if (ins->dreg == last_ins->dreg) {
1390 MONO_DELETE_INS (bb, ins);
1393 ins->opcode = OP_MOVE;
1394 ins->sreg1 = last_ins->dreg;
1397 //g_assert_not_reached ();
1401 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1402 * OP_LOAD_MEMBASE offset(basereg), reg
1404 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1405 * OP_ICONST reg, imm
1407 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1408 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1409 ins->inst_basereg == last_ins->inst_destbasereg &&
1410 ins->inst_offset == last_ins->inst_offset) {
1411 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1412 ins->opcode = OP_ICONST;
1413 ins->inst_c0 = last_ins->inst_imm;
1414 g_assert_not_reached (); // check this rule
1418 case OP_LOADU1_MEMBASE:
1419 case OP_LOADI1_MEMBASE:
1420 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1421 ins->inst_basereg == last_ins->inst_destbasereg &&
1422 ins->inst_offset == last_ins->inst_offset) {
1423 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1424 ins->sreg1 = last_ins->sreg1;
1427 case OP_LOADU2_MEMBASE:
1428 case OP_LOADI2_MEMBASE:
1429 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1430 ins->inst_basereg == last_ins->inst_destbasereg &&
1431 ins->inst_offset == last_ins->inst_offset) {
1432 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1433 ins->sreg1 = last_ins->sreg1;
1437 ins->opcode = OP_MOVE;
1441 if (ins->dreg == ins->sreg1) {
1442 MONO_DELETE_INS (bb, ins);
1446 * OP_MOVE sreg, dreg
1447 * OP_MOVE dreg, sreg
1449 if (last_ins && last_ins->opcode == OP_MOVE &&
1450 ins->sreg1 == last_ins->dreg &&
1451 ins->dreg == last_ins->sreg1) {
1452 MONO_DELETE_INS (bb, ins);
1461 * the branch_cc_table should maintain the order of these
1475 branch_cc_table [] = {
1490 #define NEW_INS(cfg,ins,dest,op) do { \
1491 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1492 (dest)->opcode = (op); \
1493 MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
1497 map_to_reg_reg_op (int op)
1506 case OP_COMPARE_IMM:
1520 case OP_LOAD_MEMBASE:
1521 return OP_LOAD_MEMINDEX;
1522 case OP_LOADI4_MEMBASE:
1523 return OP_LOADI4_MEMINDEX;
1524 case OP_LOADU4_MEMBASE:
1525 return OP_LOADU4_MEMINDEX;
1526 case OP_LOADU1_MEMBASE:
1527 return OP_LOADU1_MEMINDEX;
1528 case OP_LOADI2_MEMBASE:
1529 return OP_LOADI2_MEMINDEX;
1530 case OP_LOADU2_MEMBASE:
1531 return OP_LOADU2_MEMINDEX;
1532 case OP_LOADI1_MEMBASE:
1533 return OP_LOADI1_MEMINDEX;
1534 case OP_STOREI1_MEMBASE_REG:
1535 return OP_STOREI1_MEMINDEX;
1536 case OP_STOREI2_MEMBASE_REG:
1537 return OP_STOREI2_MEMINDEX;
1538 case OP_STOREI4_MEMBASE_REG:
1539 return OP_STOREI4_MEMINDEX;
1540 case OP_STORE_MEMBASE_REG:
1541 return OP_STORE_MEMINDEX;
1542 case OP_STORER4_MEMBASE_REG:
1543 return OP_STORER4_MEMINDEX;
1544 case OP_STORER8_MEMBASE_REG:
1545 return OP_STORER8_MEMINDEX;
1546 case OP_STORE_MEMBASE_IMM:
1547 return OP_STORE_MEMBASE_REG;
1548 case OP_STOREI1_MEMBASE_IMM:
1549 return OP_STOREI1_MEMBASE_REG;
1550 case OP_STOREI2_MEMBASE_IMM:
1551 return OP_STOREI2_MEMBASE_REG;
1552 case OP_STOREI4_MEMBASE_IMM:
1553 return OP_STOREI4_MEMBASE_REG;
1555 g_assert_not_reached ();
1559 * Remove from the instruction list the instructions that can't be
1560 * represented with very simple instructions with no register
1564 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1566 int rot_amount, imm8, low_imm;
1567 MonoInst *ins, *temp;
1569 /* setup the virtual reg allocator */
1570 if (bb->max_vreg > cfg->rs->next_vreg)
1571 cfg->rs->next_vreg = bb->max_vreg;
1573 MONO_BB_FOR_EACH_INS (bb, ins) {
1577 last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
1578 switch (ins->opcode) {
1582 case OP_COMPARE_IMM:
1589 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1590 NEW_INS (cfg, ins, temp, OP_ICONST);
1591 temp->inst_c0 = ins->inst_imm;
1592 temp->dreg = mono_regstate_next_int (cfg->rs);
1593 ins->sreg2 = temp->dreg;
1594 ins->opcode = map_to_reg_reg_op (ins->opcode);
1598 if (ins->inst_imm == 1) {
1599 ins->opcode = OP_MOVE;
1602 if (ins->inst_imm == 0) {
1603 ins->opcode = OP_ICONST;
1607 imm8 = mono_is_power_of_two (ins->inst_imm);
1609 ins->opcode = OP_SHL_IMM;
1610 ins->inst_imm = imm8;
1613 NEW_INS (cfg, ins, temp, OP_ICONST);
1614 temp->inst_c0 = ins->inst_imm;
1615 temp->dreg = mono_regstate_next_int (cfg->rs);
1616 ins->sreg2 = temp->dreg;
1617 ins->opcode = OP_IMUL;
1619 case OP_LOAD_MEMBASE:
1620 case OP_LOADI4_MEMBASE:
1621 case OP_LOADU4_MEMBASE:
1622 case OP_LOADU1_MEMBASE:
1623 /* we can do two things: load the immed in a register
1624 * and use an indexed load, or see if the immed can be
1625 * represented as an ad_imm + a load with a smaller offset
1626 * that fits. We just do the first for now, optimize later.
1628 if (arm_is_imm12 (ins->inst_offset))
1630 NEW_INS (cfg, ins, temp, OP_ICONST);
1631 temp->inst_c0 = ins->inst_offset;
1632 temp->dreg = mono_regstate_next_int (cfg->rs);
1633 ins->sreg2 = temp->dreg;
1634 ins->opcode = map_to_reg_reg_op (ins->opcode);
1636 case OP_LOADI2_MEMBASE:
1637 case OP_LOADU2_MEMBASE:
1638 case OP_LOADI1_MEMBASE:
1639 if (arm_is_imm8 (ins->inst_offset))
1641 NEW_INS (cfg, ins, temp, OP_ICONST);
1642 temp->inst_c0 = ins->inst_offset;
1643 temp->dreg = mono_regstate_next_int (cfg->rs);
1644 ins->sreg2 = temp->dreg;
1645 ins->opcode = map_to_reg_reg_op (ins->opcode);
1647 case OP_LOADR4_MEMBASE:
1648 case OP_LOADR8_MEMBASE:
1649 if (arm_is_fpimm8 (ins->inst_offset))
1651 low_imm = ins->inst_offset & 0x1ff;
1652 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1653 NEW_INS (cfg, ins, temp, OP_ADD_IMM);
1654 temp->inst_imm = ins->inst_offset & ~0x1ff;
1655 temp->sreg1 = ins->inst_basereg;
1656 temp->dreg = mono_regstate_next_int (cfg->rs);
1657 ins->inst_basereg = temp->dreg;
1658 ins->inst_offset = low_imm;
1661 /* VFP/FPA doesn't have indexed load instructions */
1662 g_assert_not_reached ();
1664 case OP_STORE_MEMBASE_REG:
1665 case OP_STOREI4_MEMBASE_REG:
1666 case OP_STOREI1_MEMBASE_REG:
1667 if (arm_is_imm12 (ins->inst_offset))
1669 NEW_INS (cfg, ins, temp, OP_ICONST);
1670 temp->inst_c0 = ins->inst_offset;
1671 temp->dreg = mono_regstate_next_int (cfg->rs);
1672 ins->sreg2 = temp->dreg;
1673 ins->opcode = map_to_reg_reg_op (ins->opcode);
1675 case OP_STOREI2_MEMBASE_REG:
1676 if (arm_is_imm8 (ins->inst_offset))
1678 NEW_INS (cfg, ins, temp, OP_ICONST);
1679 temp->inst_c0 = ins->inst_offset;
1680 temp->dreg = mono_regstate_next_int (cfg->rs);
1681 ins->sreg2 = temp->dreg;
1682 ins->opcode = map_to_reg_reg_op (ins->opcode);
1684 case OP_STORER4_MEMBASE_REG:
1685 case OP_STORER8_MEMBASE_REG:
1686 if (arm_is_fpimm8 (ins->inst_offset))
1688 low_imm = ins->inst_offset & 0x1ff;
1689 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1690 NEW_INS (cfg, ins, temp, OP_ADD_IMM);
1691 temp->inst_imm = ins->inst_offset & ~0x1ff;
1692 temp->sreg1 = ins->inst_destbasereg;
1693 temp->dreg = mono_regstate_next_int (cfg->rs);
1694 ins->inst_destbasereg = temp->dreg;
1695 ins->inst_offset = low_imm;
1698 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
1699 /* VFP/FPA doesn't have indexed store instructions */
1700 g_assert_not_reached ();
1702 case OP_STORE_MEMBASE_IMM:
1703 case OP_STOREI1_MEMBASE_IMM:
1704 case OP_STOREI2_MEMBASE_IMM:
1705 case OP_STOREI4_MEMBASE_IMM:
1706 NEW_INS (cfg, ins, temp, OP_ICONST);
1707 temp->inst_c0 = ins->inst_imm;
1708 temp->dreg = mono_regstate_next_int (cfg->rs);
1709 ins->sreg1 = temp->dreg;
1710 ins->opcode = map_to_reg_reg_op (ins->opcode);
1711 goto loop_start; /* make it handle the possibly big ins->inst_offset */
1714 bb->max_vreg = cfg->rs->next_vreg;
1718 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
1720 /* sreg is a float, dreg is an integer reg */
1722 ARM_FIXZ (code, dreg, sreg);
1723 #elif defined(ARM_FPU_VFP)
1725 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
1727 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
1728 ARM_FMRS (code, dreg, ARM_VFP_F0);
1732 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
1733 else if (size == 2) {
1734 ARM_SHL_IMM (code, dreg, dreg, 16);
1735 ARM_SHR_IMM (code, dreg, dreg, 16);
1739 ARM_SHL_IMM (code, dreg, dreg, 24);
1740 ARM_SAR_IMM (code, dreg, dreg, 24);
1741 } else if (size == 2) {
1742 ARM_SHL_IMM (code, dreg, dreg, 16);
1743 ARM_SAR_IMM (code, dreg, dreg, 16);
1751 const guchar *target;
1756 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
1759 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
1760 PatchData *pdata = (PatchData*)user_data;
1761 guchar *code = data;
1762 guint32 *thunks = data;
1763 guint32 *endthunks = (guint32*)(code + bsize);
1765 int difflow, diffhigh;
1767 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
1768 difflow = (char*)pdata->code - (char*)thunks;
1769 diffhigh = (char*)pdata->code - (char*)endthunks;
1770 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
1774 * The thunk is composed of 3 words:
1775 * load constant from thunks [2] into ARM_IP
1778 * Note that the LR register is already setup
1780 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
1781 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
1782 while (thunks < endthunks) {
1783 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
1784 if (thunks [2] == (guint32)pdata->target) {
1785 arm_patch (pdata->code, (guchar*)thunks);
1786 mono_arch_flush_icache (pdata->code, 4);
1789 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
1790 /* found a free slot instead: emit thunk */
1791 /* ARMREG_IP is fine to use since this can't be an IMT call
1794 code = (guchar*)thunks;
1795 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
1796 if (thumb_supported)
1797 ARM_BX (code, ARMREG_IP);
1799 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
1800 thunks [2] = (guint32)pdata->target;
1801 mono_arch_flush_icache ((guchar*)thunks, 12);
1803 arm_patch (pdata->code, (guchar*)thunks);
1804 mono_arch_flush_icache (pdata->code, 4);
1808 /* skip 12 bytes, the size of the thunk */
1812 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
1818 handle_thunk (int absolute, guchar *code, const guchar *target) {
1819 MonoDomain *domain = mono_domain_get ();
1823 pdata.target = target;
1824 pdata.absolute = absolute;
1827 mono_domain_lock (domain);
1828 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
1831 /* this uses the first available slot */
1833 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
1835 mono_domain_unlock (domain);
1837 if (pdata.found != 1)
1838 g_print ("thunk failed for %p from %p\n", target, code);
1839 g_assert (pdata.found == 1);
1843 arm_patch (guchar *code, const guchar *target)
1845 guint32 *code32 = (void*)code;
1846 guint32 ins = *code32;
1847 guint32 prim = (ins >> 25) & 7;
1848 guint32 tval = GPOINTER_TO_UINT (target);
1850 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1851 if (prim == 5) { /* 101b */
1852 /* the diff starts 8 bytes from the branch opcode */
1853 gint diff = target - code - 8;
1855 gint tmask = 0xffffffff;
1856 if (tval & 1) { /* entering thumb mode */
1857 diff = target - 1 - code - 8;
1858 g_assert (thumb_supported);
1859 tbits = 0xf << 28; /* bl->blx bit pattern */
1860 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
1861 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
1865 tmask = ~(1 << 24); /* clear the link bit */
1866 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
1871 if (diff <= 33554431) {
1873 ins = (ins & 0xff000000) | diff;
1875 *code32 = ins | tbits;
1879 /* diff between 0 and -33554432 */
1880 if (diff >= -33554432) {
1882 ins = (ins & 0xff000000) | (diff & ~0xff000000);
1884 *code32 = ins | tbits;
1889 handle_thunk (TRUE, code, target);
1894 * The alternative call sequences looks like this:
1896 * ldr ip, [pc] // loads the address constant
1897 * b 1f // jumps around the constant
1898 * address constant embedded in the code
1903 * There are two cases for patching:
1904 * a) at the end of method emission: in this case code points to the start
1905 * of the call sequence
1906 * b) during runtime patching of the call site: in this case code points
1907 * to the mov pc, ip instruction
1909 * We have to handle also the thunk jump code sequence:
1913 * address constant // execution never reaches here
1915 if ((ins & 0x0ffffff0) == 0x12fff10) {
1916 /* Branch and exchange: the address is constructed in a reg
1917 * We can patch BX when the code sequence is the following:
1918 * ldr ip, [pc, #0] ; 0x8
1925 guint8 *emit = (guint8*)ccode;
1926 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
1928 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
1929 ARM_BX (emit, ARMREG_IP);
1931 /*patching from magic trampoline*/
1932 if (ins == ccode [3]) {
1933 g_assert (code32 [-4] == ccode [0]);
1934 g_assert (code32 [-3] == ccode [1]);
1935 g_assert (code32 [-1] == ccode [2]);
1936 code32 [-2] = (guint32)target;
1939 /*patching from JIT*/
1940 if (ins == ccode [0]) {
1941 g_assert (code32 [1] == ccode [1]);
1942 g_assert (code32 [3] == ccode [2]);
1943 g_assert (code32 [4] == ccode [3]);
1944 code32 [2] = (guint32)target;
1947 g_assert_not_reached ();
1948 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
1956 guint8 *emit = (guint8*)ccode;
1957 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
1959 ARM_BLX_REG (emit, ARMREG_IP);
1961 g_assert (code32 [-3] == ccode [0]);
1962 g_assert (code32 [-2] == ccode [1]);
1963 g_assert (code32 [0] == ccode [2]);
1965 code32 [-1] = (guint32)target;
1968 guint32 *tmp = ccode;
1969 guint8 *emit = (guint8*)tmp;
1970 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
1971 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
1972 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
1973 ARM_BX (emit, ARMREG_IP);
1974 if (ins == ccode [2]) {
1975 g_assert_not_reached (); // should be -2 ...
1976 code32 [-1] = (guint32)target;
1979 if (ins == ccode [0]) {
1980 /* handles both thunk jump code and the far call sequence */
1981 code32 [2] = (guint32)target;
1984 g_assert_not_reached ();
1986 // g_print ("patched with 0x%08x\n", ins);
1990 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
1991 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
1992 * to be used with the emit macros.
1993 * Return -1 otherwise.
1996 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
1999 for (i = 0; i < 31; i+= 2) {
2000 res = (val << (32 - i)) | (val >> i);
2003 *rot_amount = i? 32 - i: 0;
2010 * Emits in code a sequence of instructions that load the value 'val'
2011 * into the dreg register. Uses at most 4 instructions.
2014 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2016 int imm8, rot_amount;
2018 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2019 /* skip the constant pool */
2025 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2026 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2027 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2028 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2031 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2033 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2035 if (val & 0xFF0000) {
2036 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2038 if (val & 0xFF000000) {
2039 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2041 } else if (val & 0xFF00) {
2042 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2043 if (val & 0xFF0000) {
2044 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2046 if (val & 0xFF000000) {
2047 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2049 } else if (val & 0xFF0000) {
2050 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2051 if (val & 0xFF000000) {
2052 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2055 //g_assert_not_reached ();
2061 * emit_load_volatile_arguments:
2063 * Load volatile arguments from the stack to the original input registers.
2064 * Required before a tail call.
2067 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2069 MonoMethod *method = cfg->method;
2070 MonoMethodSignature *sig;
2075 /* FIXME: Generate intermediate code instead */
2077 sig = mono_method_signature (method);
2079 /* This is the opposite of the code in emit_prolog */
2083 cinfo = calculate_sizes (sig, sig->pinvoke);
2085 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2086 ArgInfo *ainfo = &cinfo->ret;
2087 inst = cfg->vret_addr;
2088 g_assert (arm_is_imm12 (inst->inst_offset));
2089 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2091 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2092 ArgInfo *ainfo = cinfo->args + i;
2093 inst = cfg->args [pos];
2095 if (cfg->verbose_level > 2)
2096 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2097 if (inst->opcode == OP_REGVAR) {
2098 if (ainfo->regtype == RegTypeGeneral)
2099 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2100 else if (ainfo->regtype == RegTypeFP) {
2101 g_assert_not_reached ();
2102 } else if (ainfo->regtype == RegTypeBase) {
2106 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2107 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2109 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2110 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2114 g_assert_not_reached ();
2116 if (ainfo->regtype == RegTypeGeneral) {
2117 switch (ainfo->size) {
2124 g_assert (arm_is_imm12 (inst->inst_offset));
2125 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2126 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2127 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2130 if (arm_is_imm12 (inst->inst_offset)) {
2131 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2133 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2134 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2138 } else if (ainfo->regtype == RegTypeBaseGen) {
2141 } else if (ainfo->regtype == RegTypeBase) {
2144 } else if (ainfo->regtype == RegTypeFP) {
2145 g_assert_not_reached ();
2146 } else if (ainfo->regtype == RegTypeStructByVal) {
2147 int doffset = inst->inst_offset;
2151 if (mono_class_from_mono_type (inst->inst_vtype))
2152 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2153 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2154 if (arm_is_imm12 (doffset)) {
2155 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2157 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2158 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2160 soffset += sizeof (gpointer);
2161 doffset += sizeof (gpointer);
2166 } else if (ainfo->regtype == RegTypeStructByAddr) {
2181 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2186 guint8 *code = cfg->native_code + cfg->code_len;
2187 guint last_offset = 0;
2189 int imm8, rot_amount;
2191 /* we don't align basic blocks of loops on arm */
2193 if (cfg->verbose_level > 2)
2194 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2196 cpos = bb->max_offset;
2198 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2199 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2200 //g_assert (!mono_compile_aot);
2203 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2204 /* this is not thread save, but good enough */
2205 /* fixme: howto handle overflows? */
2206 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2209 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2210 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2211 (gpointer)"mono_break");
2212 code = emit_call_seq (cfg, code);
2215 MONO_BB_FOR_EACH_INS (bb, ins) {
2216 offset = code - cfg->native_code;
2218 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2220 if (offset > (cfg->code_size - max_len - 16)) {
2221 cfg->code_size *= 2;
2222 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2223 code = cfg->native_code + offset;
2225 // if (ins->cil_code)
2226 // g_print ("cil code\n");
2227 mono_debug_record_line_number (cfg, ins, offset);
2229 switch (ins->opcode) {
2230 case OP_MEMORY_BARRIER:
2233 g_assert_not_reached ();
2236 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2237 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2240 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2241 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2243 case OP_STOREI1_MEMBASE_IMM:
2244 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2245 g_assert (arm_is_imm12 (ins->inst_offset));
2246 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2248 case OP_STOREI2_MEMBASE_IMM:
2249 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2250 g_assert (arm_is_imm8 (ins->inst_offset));
2251 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2253 case OP_STORE_MEMBASE_IMM:
2254 case OP_STOREI4_MEMBASE_IMM:
2255 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2256 g_assert (arm_is_imm12 (ins->inst_offset));
2257 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2259 case OP_STOREI1_MEMBASE_REG:
2260 g_assert (arm_is_imm12 (ins->inst_offset));
2261 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2263 case OP_STOREI2_MEMBASE_REG:
2264 g_assert (arm_is_imm8 (ins->inst_offset));
2265 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2267 case OP_STORE_MEMBASE_REG:
2268 case OP_STOREI4_MEMBASE_REG:
2269 /* this case is special, since it happens for spill code after lowering has been called */
2270 if (arm_is_imm12 (ins->inst_offset)) {
2271 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2273 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2274 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2277 case OP_STOREI1_MEMINDEX:
2278 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2280 case OP_STOREI2_MEMINDEX:
2281 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2283 case OP_STORE_MEMINDEX:
2284 case OP_STOREI4_MEMINDEX:
2285 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2288 g_assert_not_reached ();
2290 case OP_LOAD_MEMINDEX:
2291 case OP_LOADI4_MEMINDEX:
2292 case OP_LOADU4_MEMINDEX:
2293 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2295 case OP_LOADI1_MEMINDEX:
2296 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2298 case OP_LOADU1_MEMINDEX:
2299 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2301 case OP_LOADI2_MEMINDEX:
2302 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2304 case OP_LOADU2_MEMINDEX:
2305 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2307 case OP_LOAD_MEMBASE:
2308 case OP_LOADI4_MEMBASE:
2309 case OP_LOADU4_MEMBASE:
2310 /* this case is special, since it happens for spill code after lowering has been called */
2311 if (arm_is_imm12 (ins->inst_offset)) {
2312 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2314 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2315 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2318 case OP_LOADI1_MEMBASE:
2319 g_assert (arm_is_imm8 (ins->inst_offset));
2320 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2322 case OP_LOADU1_MEMBASE:
2323 g_assert (arm_is_imm12 (ins->inst_offset));
2324 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2326 case OP_LOADU2_MEMBASE:
2327 g_assert (arm_is_imm8 (ins->inst_offset));
2328 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2330 case OP_LOADI2_MEMBASE:
2331 g_assert (arm_is_imm8 (ins->inst_offset));
2332 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2334 case OP_ICONV_TO_I1:
2335 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2336 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2338 case OP_ICONV_TO_I2:
2339 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2340 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2342 case OP_ICONV_TO_U1:
2343 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2345 case OP_ICONV_TO_U2:
2346 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2347 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2350 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2352 case OP_COMPARE_IMM:
2353 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2354 g_assert (imm8 >= 0);
2355 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2359 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2360 * So instead of emitting a trap, we emit a call a C function and place a
2363 //*(int*)code = 0xef9f0001;
2366 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2367 (gpointer)"mono_break");
2368 code = emit_call_seq (cfg, code);
2371 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2374 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2377 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2380 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2381 g_assert (imm8 >= 0);
2382 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2385 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2386 g_assert (imm8 >= 0);
2387 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2390 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2391 g_assert (imm8 >= 0);
2392 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2395 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2396 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2398 case OP_IADD_OVF_UN:
2399 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2400 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2403 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2404 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2406 case OP_ISUB_OVF_UN:
2407 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2408 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2410 case OP_ADD_OVF_CARRY:
2411 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2412 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2414 case OP_ADD_OVF_UN_CARRY:
2415 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2416 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2418 case OP_SUB_OVF_CARRY:
2419 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2420 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2422 case OP_SUB_OVF_UN_CARRY:
2423 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2424 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2427 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2430 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2431 g_assert (imm8 >= 0);
2432 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2435 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2438 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2441 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2442 g_assert (imm8 >= 0);
2443 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2446 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2447 g_assert (imm8 >= 0);
2448 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2450 case OP_ARM_RSBS_IMM:
2451 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2452 g_assert (imm8 >= 0);
2453 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2455 case OP_ARM_RSC_IMM:
2456 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2457 g_assert (imm8 >= 0);
2458 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2461 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2464 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2465 g_assert (imm8 >= 0);
2466 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2474 /* crappy ARM arch doesn't have a DIV instruction */
2475 g_assert_not_reached ();
2477 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2480 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2481 g_assert (imm8 >= 0);
2482 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2485 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2488 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2489 g_assert (imm8 >= 0);
2490 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2493 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2497 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2498 else if (ins->dreg != ins->sreg1)
2499 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2502 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2506 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2507 else if (ins->dreg != ins->sreg1)
2508 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2512 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2513 else if (ins->dreg != ins->sreg1)
2514 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2517 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2520 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2523 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2526 if (ins->dreg == ins->sreg2)
2527 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2529 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2532 g_assert_not_reached ();
2535 /* FIXME: handle ovf/ sreg2 != dreg */
2536 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2538 case OP_IMUL_OVF_UN:
2539 /* FIXME: handle ovf/ sreg2 != dreg */
2540 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2543 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2546 /* Load the GOT offset */
2547 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2548 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2550 *(gpointer*)code = NULL;
2552 /* Load the value from the GOT */
2553 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2555 case OP_ICONV_TO_I4:
2556 case OP_ICONV_TO_U4:
2558 if (ins->dreg != ins->sreg1)
2559 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2562 int saved = ins->sreg2;
2563 if (ins->sreg2 == ARM_LSW_REG) {
2564 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2567 if (ins->sreg1 != ARM_LSW_REG)
2568 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2569 if (saved != ARM_MSW_REG)
2570 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2575 ARM_MVFD (code, ins->dreg, ins->sreg1);
2576 #elif defined(ARM_FPU_VFP)
2577 ARM_CPYD (code, ins->dreg, ins->sreg1);
2580 case OP_FCONV_TO_R4:
2582 ARM_MVFS (code, ins->dreg, ins->sreg1);
2583 #elif defined(ARM_FPU_VFP)
2584 ARM_CVTD (code, ins->dreg, ins->sreg1);
2585 ARM_CVTS (code, ins->dreg, ins->dreg);
2590 * Keep in sync with mono_arch_emit_epilog
2592 g_assert (!cfg->method->save_lmf);
2594 code = emit_load_volatile_arguments (cfg, code);
2596 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2597 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2598 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2602 /* ensure ins->sreg1 is not NULL */
2603 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2607 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2608 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2610 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2611 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2613 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2622 call = (MonoCallInst*)ins;
2623 if (ins->flags & MONO_INST_HAS_METHOD)
2624 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2626 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2627 code = emit_call_seq (cfg, code);
2628 code = emit_move_return_value (cfg, ins, code);
2633 case OP_VOIDCALL_REG:
2635 code = emit_call_reg (code, ins->sreg1);
2636 code = emit_move_return_value (cfg, ins, code);
2638 case OP_FCALL_MEMBASE:
2639 case OP_LCALL_MEMBASE:
2640 case OP_VCALL_MEMBASE:
2641 case OP_VOIDCALL_MEMBASE:
2642 case OP_CALL_MEMBASE:
2643 g_assert (arm_is_imm12 (ins->inst_offset));
2644 g_assert (ins->sreg1 != ARMREG_LR);
2645 call = (MonoCallInst*)ins;
2646 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2647 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
2648 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2649 if (cfg->compile_aot) {
2651 * We can't embed the method in the code stream in PIC code. Instead,
2652 * we put it in V5 in code emitted by mono_arch_emit_imt_argument (),
2653 * and embed NULL here to signal the IMT thunk that the call is made
2656 *((gpointer*)code) = NULL;
2658 *((gpointer*)code) = (gpointer)call->method;
2662 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
2663 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2665 code = emit_move_return_value (cfg, ins, code);
2668 g_assert_not_reached ();
2671 /* keep alignment */
2672 int alloca_waste = cfg->param_area;
2675 /* round the size to 8 bytes */
2676 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
2677 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
2679 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
2680 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
2681 /* memzero the area: dreg holds the size, sp is the pointer */
2682 if (ins->flags & MONO_INST_INIT) {
2683 guint8 *start_loop, *branch_to_cond;
2684 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
2685 branch_to_cond = code;
2688 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
2689 arm_patch (branch_to_cond, code);
2690 /* decrement by 4 and set flags */
2691 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
2692 ARM_B_COND (code, ARMCOND_GE, 0);
2693 arm_patch (code - 4, start_loop);
2695 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
2699 if (ins->sreg1 != ARMREG_R0)
2700 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2701 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2702 (gpointer)"mono_arch_throw_exception");
2703 code = emit_call_seq (cfg, code);
2707 if (ins->sreg1 != ARMREG_R0)
2708 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2709 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2710 (gpointer)"mono_arch_rethrow_exception");
2711 code = emit_call_seq (cfg, code);
2714 case OP_START_HANDLER:
2715 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2716 ARM_STR_IMM (code, ARMREG_LR, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2718 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2719 ARM_STR_REG_REG (code, ARMREG_LR, ins->inst_left->inst_basereg, ARMREG_IP);
2723 if (ins->sreg1 != ARMREG_R0)
2724 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2725 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2726 ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2728 g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
2729 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2730 ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
2732 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2735 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2736 ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2738 g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
2739 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2740 ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
2742 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2744 case OP_CALL_HANDLER:
2745 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2749 ins->inst_c0 = code - cfg->native_code;
2752 if (ins->flags & MONO_INST_BRLABEL) {
2753 /*if (ins->inst_i0->inst_c0) {
2755 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2757 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2761 /*if (ins->inst_target_bb->native_offset) {
2763 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
2765 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2771 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
2775 * In the normal case we have:
2776 * ldr pc, [pc, ins->sreg1 << 2]
2779 * ldr lr, [pc, ins->sreg1 << 2]
2781 * After follows the data.
2782 * FIXME: add aot support.
2784 max_len += 4 * GPOINTER_TO_INT (ins->klass);
2785 if (offset > (cfg->code_size - max_len - 16)) {
2786 cfg->code_size += max_len;
2787 cfg->code_size *= 2;
2788 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2789 code = cfg->native_code + offset;
2791 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
2793 code += 4 * GPOINTER_TO_INT (ins->klass);
2796 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
2797 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
2800 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2801 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
2804 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2805 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
2808 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2809 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
2812 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2813 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
2815 case OP_COND_EXC_EQ:
2816 case OP_COND_EXC_NE_UN:
2817 case OP_COND_EXC_LT:
2818 case OP_COND_EXC_LT_UN:
2819 case OP_COND_EXC_GT:
2820 case OP_COND_EXC_GT_UN:
2821 case OP_COND_EXC_GE:
2822 case OP_COND_EXC_GE_UN:
2823 case OP_COND_EXC_LE:
2824 case OP_COND_EXC_LE_UN:
2825 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
2828 case OP_COND_EXC_OV:
2829 case OP_COND_EXC_NC:
2830 case OP_COND_EXC_NO:
2831 g_assert_not_reached ();
2843 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
2846 /* floating point opcodes */
2849 if (cfg->compile_aot) {
2850 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
2852 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
2854 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
2857 /* FIXME: we can optimize the imm load by dealing with part of
2858 * the displacement in LDFD (aligning to 512).
2860 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2861 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
2865 if (cfg->compile_aot) {
2866 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
2868 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
2871 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2872 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
2875 case OP_STORER8_MEMBASE_REG:
2876 /* This is generated by the local regalloc pass which runs after the lowering pass */
2877 if (!arm_is_fpimm8 (ins->inst_offset)) {
2878 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2879 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
2881 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2884 case OP_LOADR8_MEMBASE:
2885 /* This is generated by the local regalloc pass which runs after the lowering pass */
2886 if (!arm_is_fpimm8 (ins->inst_offset)) {
2887 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2888 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
2890 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2893 case OP_STORER4_MEMBASE_REG:
2894 g_assert (arm_is_fpimm8 (ins->inst_offset));
2895 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2897 case OP_LOADR4_MEMBASE:
2898 g_assert (arm_is_fpimm8 (ins->inst_offset));
2899 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2901 case OP_ICONV_TO_R_UN: {
2903 tmpreg = ins->dreg == 0? 1: 0;
2904 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
2905 ARM_FLTD (code, ins->dreg, ins->sreg1);
2906 ARM_B_COND (code, ARMCOND_GE, 8);
2907 /* save the temp register */
2908 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
2909 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
2910 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
2911 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
2912 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
2913 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
2914 /* skip the constant pool */
2917 *(int*)code = 0x41f00000;
2922 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
2923 * adfltd fdest, fdest, ftemp
2927 case OP_ICONV_TO_R4:
2928 ARM_FLTS (code, ins->dreg, ins->sreg1);
2930 case OP_ICONV_TO_R8:
2931 ARM_FLTD (code, ins->dreg, ins->sreg1);
2933 #elif defined(ARM_FPU_VFP)
2935 if (cfg->compile_aot) {
2936 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
2938 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
2940 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
2943 /* FIXME: we can optimize the imm load by dealing with part of
2944 * the displacement in LDFD (aligning to 512).
2946 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2947 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
2951 if (cfg->compile_aot) {
2952 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
2954 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
2956 ARM_CVTS (code, ins->dreg, ins->dreg);
2958 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2959 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
2960 ARM_CVTS (code, ins->dreg, ins->dreg);
2963 case OP_STORER8_MEMBASE_REG:
2964 g_assert (arm_is_fpimm8 (ins->inst_offset));
2965 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2967 case OP_LOADR8_MEMBASE:
2968 g_assert (arm_is_fpimm8 (ins->inst_offset));
2969 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2971 case OP_STORER4_MEMBASE_REG:
2972 g_assert (arm_is_fpimm8 (ins->inst_offset));
2973 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2975 case OP_LOADR4_MEMBASE:
2976 g_assert (arm_is_fpimm8 (ins->inst_offset));
2977 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2979 case OP_ICONV_TO_R_UN: {
2980 g_assert_not_reached ();
2983 case OP_ICONV_TO_R4:
2984 g_assert_not_reached ();
2985 //ARM_FLTS (code, ins->dreg, ins->sreg1);
2987 case OP_ICONV_TO_R8:
2988 g_assert_not_reached ();
2989 //ARM_FLTD (code, ins->dreg, ins->sreg1);
2992 case OP_FCONV_TO_I1:
2993 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
2995 case OP_FCONV_TO_U1:
2996 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
2998 case OP_FCONV_TO_I2:
2999 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3001 case OP_FCONV_TO_U2:
3002 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3004 case OP_FCONV_TO_I4:
3006 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3008 case OP_FCONV_TO_U4:
3010 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3012 case OP_FCONV_TO_I8:
3013 case OP_FCONV_TO_U8:
3014 g_assert_not_reached ();
3015 /* Implemented as helper calls */
3017 case OP_LCONV_TO_R_UN:
3018 g_assert_not_reached ();
3019 /* Implemented as helper calls */
3021 case OP_LCONV_TO_OVF_I: {
3023 guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
3024 // Check if its negative
3025 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
3026 negative_branch = code;
3027 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
3028 // Its positive msword == 0
3029 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
3030 msword_positive_branch = code;
3031 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
3033 ovf_ex_target = code;
3034 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
3036 ppc_patch (negative_branch, code);
3037 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
3038 msword_negative_branch = code;
3039 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
3040 ppc_patch (msword_negative_branch, ovf_ex_target);
3042 ppc_patch (msword_positive_branch, code);
3043 if (ins->dreg != ins->sreg1)
3044 ppc_mr (code, ins->dreg, ins->sreg1);
3046 if (ins->dreg != ins->sreg1)
3047 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3052 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3055 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3058 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3061 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3064 ARM_MNFD (code, ins->dreg, ins->sreg1);
3066 #elif defined(ARM_FPU_VFP)
3068 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3071 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3074 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3077 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3080 ARM_NEGD (code, ins->dreg, ins->sreg1);
3085 g_assert_not_reached ();
3088 /* each fp compare op needs to do its own */
3089 g_assert_not_reached ();
3090 //ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3094 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3095 #elif defined(ARM_FPU_VFP)
3096 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3098 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3099 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3103 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3104 #elif defined(ARM_FPU_VFP)
3105 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3107 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3108 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3112 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3113 #elif defined(ARM_FPU_VFP)
3114 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3116 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3117 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3118 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3123 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3124 #elif defined(ARM_FPU_VFP)
3125 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3127 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3128 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3133 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3134 #elif defined(ARM_FPU_VFP)
3135 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3137 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3138 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3139 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3141 /* ARM FPA flags table:
3142 * N Less than ARMCOND_MI
3143 * Z Equal ARMCOND_EQ
3144 * C Greater Than or Equal ARMCOND_CS
3145 * V Unordered ARMCOND_VS
3149 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3150 #elif defined(ARM_FPU_VFP)
3151 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3153 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3157 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3158 #elif defined(ARM_FPU_VFP)
3159 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3161 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3165 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3166 #elif defined(ARM_FPU_VFP)
3167 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3169 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3173 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3174 #elif defined(ARM_FPU_VFP)
3175 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3177 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3178 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3182 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3183 #elif defined(ARM_FPU_VFP)
3184 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3186 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
3190 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3191 #elif defined(ARM_FPU_VFP)
3192 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3194 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3195 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
3199 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3200 #elif defined(ARM_FPU_VFP)
3201 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3203 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3207 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3208 #elif defined(ARM_FPU_VFP)
3209 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3211 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3212 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3216 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3217 #elif defined(ARM_FPU_VFP)
3218 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3220 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); /* swapped */
3224 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3225 #elif defined(ARM_FPU_VFP)
3226 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3228 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3229 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); /* swapped */
3233 if (ins->dreg != ins->sreg1)
3234 ARM_MVFD (code, ins->dreg, ins->sreg1);
3236 g_assert_not_reached ();
3241 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3242 g_assert_not_reached ();
3245 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3246 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3247 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3248 g_assert_not_reached ();
3253 last_offset = offset;
3256 cfg->code_len = code - cfg->native_code;
3260 mono_arch_register_lowlevel_calls (void)
3264 #define patch_lis_ori(ip,val) do {\
3265 guint16 *__lis_ori = (guint16*)(ip); \
3266 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3267 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3271 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3273 MonoJumpInfo *patch_info;
3274 gboolean compile_aot = !run_cctors;
3276 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3277 unsigned char *ip = patch_info->ip.i + code;
3278 const unsigned char *target;
3280 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3281 gpointer *jt = (gpointer*)(ip + 8);
3283 /* jt is the inlined jump table, 2 instructions after ip
3284 * In the normal case we store the absolute addresses,
3285 * otherwise the displacements.
3287 for (i = 0; i < patch_info->data.table->table_size; i++)
3288 jt [i] = code + (int)patch_info->data.table->table [i];
3291 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3294 switch (patch_info->type) {
3295 case MONO_PATCH_INFO_BB:
3296 case MONO_PATCH_INFO_LABEL:
3299 /* No need to patch these */
3304 switch (patch_info->type) {
3305 case MONO_PATCH_INFO_IP:
3306 g_assert_not_reached ();
3307 patch_lis_ori (ip, ip);
3309 case MONO_PATCH_INFO_METHOD_REL:
3310 g_assert_not_reached ();
3311 *((gpointer *)(ip)) = code + patch_info->data.offset;
3313 case MONO_PATCH_INFO_METHODCONST:
3314 case MONO_PATCH_INFO_CLASS:
3315 case MONO_PATCH_INFO_IMAGE:
3316 case MONO_PATCH_INFO_FIELD:
3317 case MONO_PATCH_INFO_VTABLE:
3318 case MONO_PATCH_INFO_IID:
3319 case MONO_PATCH_INFO_SFLDA:
3320 case MONO_PATCH_INFO_LDSTR:
3321 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3322 case MONO_PATCH_INFO_LDTOKEN:
3323 g_assert_not_reached ();
3324 /* from OP_AOTCONST : lis + ori */
3325 patch_lis_ori (ip, target);
3327 case MONO_PATCH_INFO_R4:
3328 case MONO_PATCH_INFO_R8:
3329 g_assert_not_reached ();
3330 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3332 case MONO_PATCH_INFO_EXC_NAME:
3333 g_assert_not_reached ();
3334 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3336 case MONO_PATCH_INFO_NONE:
3337 case MONO_PATCH_INFO_BB_OVF:
3338 case MONO_PATCH_INFO_EXC_OVF:
3339 /* everything is dealt with at epilog output time */
3344 arm_patch (ip, target);
3349 * Stack frame layout:
3351 * ------------------- fp
3352 * MonoLMF structure or saved registers
3353 * -------------------
3355 * -------------------
3357 * -------------------
3358 * optional 8 bytes for tracing
3359 * -------------------
3360 * param area size is cfg->param_area
3361 * ------------------- sp
3364 mono_arch_emit_prolog (MonoCompile *cfg)
3366 MonoMethod *method = cfg->method;
3368 MonoMethodSignature *sig;
3370 int alloc_size, pos, max_offset, i, rot_amount;
3377 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3380 sig = mono_method_signature (method);
3381 cfg->code_size = 256 + sig->param_count * 20;
3382 code = cfg->native_code = g_malloc (cfg->code_size);
3384 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3386 alloc_size = cfg->stack_offset;
3389 if (!method->save_lmf) {
3390 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3391 prev_sp_offset = 8; /* ip and lr */
3392 for (i = 0; i < 16; ++i) {
3393 if (cfg->used_int_regs & (1 << i))
3394 prev_sp_offset += 4;
3397 ARM_PUSH (code, 0x5ff0);
3398 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3399 pos += sizeof (MonoLMF) - prev_sp_offset;
3403 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3404 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3405 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3406 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3409 /* the stack used in the pushed regs */
3410 if (prev_sp_offset & 4)
3412 cfg->stack_usage = alloc_size;
3414 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3415 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3417 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3418 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3421 if (cfg->frame_reg != ARMREG_SP)
3422 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3423 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3424 prev_sp_offset += alloc_size;
3426 /* compute max_offset in order to use short forward jumps
3427 * we could skip do it on arm because the immediate displacement
3428 * for jumps is large enough, it may be useful later for constant pools
3431 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3433 bb->max_offset = max_offset;
3435 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3438 MONO_BB_FOR_EACH_INS (bb, ins)
3439 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3442 /* load arguments allocated to register from the stack */
3445 cinfo = calculate_sizes (sig, sig->pinvoke);
3447 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3448 ArgInfo *ainfo = &cinfo->ret;
3449 inst = cfg->vret_addr;
3450 g_assert (arm_is_imm12 (inst->inst_offset));
3451 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3453 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3454 ArgInfo *ainfo = cinfo->args + i;
3455 inst = cfg->args [pos];
3457 if (cfg->verbose_level > 2)
3458 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3459 if (inst->opcode == OP_REGVAR) {
3460 if (ainfo->regtype == RegTypeGeneral)
3461 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3462 else if (ainfo->regtype == RegTypeFP) {
3463 g_assert_not_reached ();
3464 } else if (ainfo->regtype == RegTypeBase) {
3465 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3466 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3468 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3469 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3472 g_assert_not_reached ();
3474 if (cfg->verbose_level > 2)
3475 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3477 /* the argument should be put on the stack: FIXME handle size != word */
3478 if (ainfo->regtype == RegTypeGeneral) {
3479 switch (ainfo->size) {
3481 if (arm_is_imm12 (inst->inst_offset))
3482 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3484 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3485 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3489 if (arm_is_imm8 (inst->inst_offset)) {
3490 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3492 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3493 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3497 g_assert (arm_is_imm12 (inst->inst_offset));
3498 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3499 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3500 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3503 if (arm_is_imm12 (inst->inst_offset)) {
3504 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3506 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3507 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3511 } else if (ainfo->regtype == RegTypeBaseGen) {
3512 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3513 g_assert (arm_is_imm12 (inst->inst_offset));
3514 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3515 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3516 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3517 } else if (ainfo->regtype == RegTypeBase) {
3518 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3519 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3521 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3522 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3525 switch (ainfo->size) {
3527 if (arm_is_imm8 (inst->inst_offset)) {
3528 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3530 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3531 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3535 if (arm_is_imm8 (inst->inst_offset)) {
3536 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3538 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3539 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3543 if (arm_is_imm12 (inst->inst_offset)) {
3544 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3546 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3547 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3549 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3550 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3552 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3553 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3555 if (arm_is_imm12 (inst->inst_offset + 4)) {
3556 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3558 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3559 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3563 if (arm_is_imm12 (inst->inst_offset)) {
3564 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3566 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3567 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3571 } else if (ainfo->regtype == RegTypeFP) {
3572 g_assert_not_reached ();
3573 } else if (ainfo->regtype == RegTypeStructByVal) {
3574 int doffset = inst->inst_offset;
3578 if (mono_class_from_mono_type (inst->inst_vtype))
3579 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3580 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3581 if (arm_is_imm12 (doffset)) {
3582 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3584 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3585 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3587 soffset += sizeof (gpointer);
3588 doffset += sizeof (gpointer);
3590 if (ainfo->vtsize) {
3591 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3592 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3593 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3595 } else if (ainfo->regtype == RegTypeStructByAddr) {
3596 g_assert_not_reached ();
3597 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3598 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3600 g_assert_not_reached ();
3605 if (method->save_lmf) {
3607 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3608 (gpointer)"mono_get_lmf_addr");
3609 code = emit_call_seq (cfg, code);
3610 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3611 /* lmf_offset is the offset from the previous stack pointer,
3612 * alloc_size is the total stack space allocated, so the offset
3613 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3614 * The pointer to the struct is put in r1 (new_lmf).
3615 * r2 is used as scratch
3616 * The callee-saved registers are already in the MonoLMF structure
3618 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3619 /* r0 is the result from mono_get_lmf_addr () */
3620 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3621 /* new_lmf->previous_lmf = *lmf_addr */
3622 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3623 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3624 /* *(lmf_addr) = r1 */
3625 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3626 /* Skip method (only needed for trampoline LMF frames) */
3627 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3628 /* save the current IP */
3629 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3630 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3634 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3636 cfg->code_len = code - cfg->native_code;
3637 g_assert (cfg->code_len < cfg->code_size);
3644 mono_arch_emit_epilog (MonoCompile *cfg)
3646 MonoMethod *method = cfg->method;
3647 int pos, i, rot_amount;
3648 int max_epilog_size = 16 + 20*4;
3651 if (cfg->method->save_lmf)
3652 max_epilog_size += 128;
3654 if (mono_jit_trace_calls != NULL)
3655 max_epilog_size += 50;
3657 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3658 max_epilog_size += 50;
3660 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3661 cfg->code_size *= 2;
3662 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3663 mono_jit_stats.code_reallocs++;
3667 * Keep in sync with OP_JMP
3669 code = cfg->native_code + cfg->code_len;
3671 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
3672 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3676 if (method->save_lmf) {
3678 /* all but r0-r3, sp and pc */
3679 pos += sizeof (MonoLMF) - (4 * 10);
3681 /* r2 contains the pointer to the current LMF */
3682 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
3683 /* ip = previous_lmf */
3684 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3686 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3687 /* *(lmf_addr) = previous_lmf */
3688 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3689 /* FIXME: speedup: there is no actual need to restore the registers if
3690 * we didn't actually change them (idea from Zoltan).
3693 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
3694 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
3695 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
3697 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
3698 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
3700 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
3701 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3703 /* FIXME: add v4 thumb interworking support */
3704 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
3707 cfg->code_len = code - cfg->native_code;
3709 g_assert (cfg->code_len < cfg->code_size);
3713 /* remove once throw_exception_by_name is eliminated */
3715 exception_id_by_name (const char *name)
3717 if (strcmp (name, "IndexOutOfRangeException") == 0)
3718 return MONO_EXC_INDEX_OUT_OF_RANGE;
3719 if (strcmp (name, "OverflowException") == 0)
3720 return MONO_EXC_OVERFLOW;
3721 if (strcmp (name, "ArithmeticException") == 0)
3722 return MONO_EXC_ARITHMETIC;
3723 if (strcmp (name, "DivideByZeroException") == 0)
3724 return MONO_EXC_DIVIDE_BY_ZERO;
3725 if (strcmp (name, "InvalidCastException") == 0)
3726 return MONO_EXC_INVALID_CAST;
3727 if (strcmp (name, "NullReferenceException") == 0)
3728 return MONO_EXC_NULL_REF;
3729 if (strcmp (name, "ArrayTypeMismatchException") == 0)
3730 return MONO_EXC_ARRAY_TYPE_MISMATCH;
3731 g_error ("Unknown intrinsic exception %s\n", name);
3736 mono_arch_emit_exceptions (MonoCompile *cfg)
3738 MonoJumpInfo *patch_info;
3741 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
3742 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
3743 int max_epilog_size = 50;
3745 /* count the number of exception infos */
3748 * make sure we have enough space for exceptions
3750 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3751 if (patch_info->type == MONO_PATCH_INFO_EXC) {
3752 i = exception_id_by_name (patch_info->data.target);
3753 if (!exc_throw_found [i]) {
3754 max_epilog_size += 32;
3755 exc_throw_found [i] = TRUE;
3760 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3761 cfg->code_size *= 2;
3762 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3763 mono_jit_stats.code_reallocs++;
3766 code = cfg->native_code + cfg->code_len;
3768 /* add code to raise exceptions */
3769 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3770 switch (patch_info->type) {
3771 case MONO_PATCH_INFO_EXC: {
3772 MonoClass *exc_class;
3773 unsigned char *ip = patch_info->ip.i + cfg->native_code;
3775 i = exception_id_by_name (patch_info->data.target);
3776 if (exc_throw_pos [i]) {
3777 arm_patch (ip, exc_throw_pos [i]);
3778 patch_info->type = MONO_PATCH_INFO_NONE;
3781 exc_throw_pos [i] = code;
3783 arm_patch (ip, code);
3785 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
3786 g_assert (exc_class);
3788 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
3789 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
3790 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
3791 patch_info->data.name = "mono_arch_throw_corlib_exception";
3792 patch_info->ip.i = code - cfg->native_code;
3794 *(guint32*)(gpointer)code = exc_class->type_token;
3804 cfg->code_len = code - cfg->native_code;
3806 g_assert (cfg->code_len < cfg->code_size);
3811 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
3816 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
3821 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
3824 int this_dreg = ARMREG_R0;
3827 this_dreg = ARMREG_R1;
3829 /* add the this argument */
3830 if (this_reg != -1) {
3832 MONO_INST_NEW (cfg, this, OP_MOVE);
3833 this->type = this_type;
3834 this->sreg1 = this_reg;
3835 this->dreg = mono_regstate_next_int (cfg->rs);
3836 mono_bblock_add_inst (cfg->cbb, this);
3837 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
3842 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
3843 vtarg->type = STACK_MP;
3844 vtarg->sreg1 = vt_reg;
3845 vtarg->dreg = mono_regstate_next_int (cfg->rs);
3846 mono_bblock_add_inst (cfg->cbb, vtarg);
3847 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
3852 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3858 mono_arch_print_tree (MonoInst *tree, int arity)
3863 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
3869 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
3875 mono_arch_get_patch_offset (guint8 *code)
3882 mono_arch_flush_register_windows (void)
3887 mono_arch_fixup_jinfo (MonoCompile *cfg)
3889 /* max encoded stack usage is 64KB * 4 */
3890 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
3891 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
3894 #ifdef MONO_ARCH_HAVE_IMT
3897 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
3899 if (cfg->compile_aot) {
3900 int method_reg = mono_regstate_next_int (cfg->rs);
3903 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
3904 ins->dreg = method_reg;
3905 ins->inst_p0 = call->method;
3906 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
3907 MONO_ADD_INS (cfg->cbb, ins);
3909 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
3914 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
3916 guint32 *code_ptr = (guint32*)code;
3918 /* The IMT value is stored in the code stream right after the LDC instruction. */
3919 if (!IS_LDR_PC (code_ptr [0])) {
3920 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
3921 g_assert (IS_LDR_PC (code_ptr [0]));
3923 if (code_ptr [1] == 0)
3924 /* This is AOTed code, the IMT method is in V5 */
3925 return (MonoMethod*)regs [ARMREG_V5];
3927 return (MonoMethod*) code_ptr [1];
3931 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
3933 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
3937 #define ENABLE_WRONG_METHOD_CHECK 0
3938 #define BASE_SIZE (6 * 4)
3939 #define BSEARCH_ENTRY_SIZE (4 * 4)
3940 #define CMP_SIZE (3 * 4)
3941 #define BRANCH_SIZE (1 * 4)
3942 #define CALL_SIZE (2 * 4)
3943 #define WMC_SIZE (5 * 4)
3944 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
3947 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
3949 guint32 delta = DISTANCE (target, code);
3951 g_assert (delta >= 0 && delta <= 0xFFF);
3952 *target = *target | delta;
3958 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
3960 int size, i, extra_space = 0;
3961 arminstr_t *code, *start, *vtable_target = NULL;
3964 for (i = 0; i < count; ++i) {
3965 MonoIMTCheckItem *item = imt_entries [i];
3966 if (item->is_equals) {
3967 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->vtable_slot])));
3969 if (item->check_target_idx) {
3970 if (!item->compare_done)
3971 item->chunk_size += CMP_SIZE;
3972 item->chunk_size += BRANCH_SIZE;
3974 #if ENABLE_WRONG_METHOD_CHECK
3975 item->chunk_size += WMC_SIZE;
3978 item->chunk_size += CALL_SIZE;
3980 item->chunk_size += BSEARCH_ENTRY_SIZE;
3981 imt_entries [item->check_target_idx]->compare_done = TRUE;
3983 size += item->chunk_size;
3986 start = code = mono_code_manager_reserve (domain->code_mp, size);
3989 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
3990 for (i = 0; i < count; ++i) {
3991 MonoIMTCheckItem *item = imt_entries [i];
3992 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->method, item->method->name, &vtable->vtable [item->vtable_slot], item->is_equals, item->chunk_size);
3996 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
3997 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
3998 vtable_target = code;
3999 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4001 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4002 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4003 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4005 for (i = 0; i < count; ++i) {
4006 MonoIMTCheckItem *item = imt_entries [i];
4007 arminstr_t *imt_method = NULL;
4008 item->code_target = (guint8*)code;
4010 if (item->is_equals) {
4011 if (item->check_target_idx) {
4012 if (!item->compare_done) {
4014 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4015 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4017 item->jmp_code = (guint8*)code;
4018 ARM_B_COND (code, ARMCOND_NE, 0);
4020 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4021 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4023 /*Enable the commented code to assert on wrong method*/
4024 #if ENABLE_WRONG_METHOD_CHECK
4026 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4027 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4028 ARM_B_COND (code, ARMCOND_NE, 1);
4030 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4031 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->vtable_slot]));
4033 #if ENABLE_WRONG_METHOD_CHECK
4039 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->method);
4041 /*must emit after unconditional branch*/
4042 if (vtable_target) {
4043 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4044 item->chunk_size += 4;
4045 vtable_target = NULL;
4048 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4050 code += extra_space;
4054 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4055 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4057 item->jmp_code = (guint8*)code;
4058 ARM_B_COND (code, ARMCOND_GE, 0);
4063 for (i = 0; i < count; ++i) {
4064 MonoIMTCheckItem *item = imt_entries [i];
4065 if (item->jmp_code) {
4066 if (item->check_target_idx)
4067 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4069 if (i > 0 && item->is_equals) {
4071 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4072 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4073 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->method);
4080 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4081 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4086 mono_arch_flush_icache ((guint8*)start, size);
4087 mono_stats.imt_thunks_size += code - start;
4089 g_assert (DISTANCE (start, code) <= size);
4096 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4098 /* FIXME: implement */
4099 g_assert_not_reached ();