2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 static gint lmf_tls_offset = -1;
27 static gint lmf_addr_tls_offset = -1;
29 /* This mutex protects architecture specific caches */
30 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
31 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
32 static CRITICAL_SECTION mini_arch_mutex;
34 static int v5_supported = 0;
35 static int thumb_supported = 0;
39 * floating point support: on ARM it is a mess, there are at least 3
40 * different setups, each of which binary incompat with the other.
41 * 1) FPA: old and ugly, but unfortunately what current distros use
42 * the double binary format has the two words swapped. 8 double registers.
43 * Implemented usually by kernel emulation.
44 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
45 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
46 * 3) VFP: the new and actually sensible and useful FP support. Implemented
47 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
49 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
51 int mono_exc_esp_offset = 0;
53 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
54 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
55 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
57 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
58 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
59 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
61 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
62 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
66 mono_arch_regname (int reg)
68 static const char * rnames[] = {
69 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
70 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
71 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
74 if (reg >= 0 && reg < 16)
80 mono_arch_fregname (int reg)
82 static const char * rnames[] = {
83 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
84 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
85 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
86 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
87 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
88 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
91 if (reg >= 0 && reg < 32)
97 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
100 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
101 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
104 g_assert (dreg != sreg);
105 code = mono_arm_emit_load_imm (code, dreg, imm);
106 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
111 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
113 /* we can use r0-r3, since this is called only for incoming args on the stack */
114 if (size > sizeof (gpointer) * 4) {
116 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
117 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
118 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
119 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
120 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
121 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
122 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
123 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
124 ARM_B_COND (code, ARMCOND_NE, 0);
125 arm_patch (code - 4, start_loop);
128 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
129 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
131 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
132 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
138 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
139 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
140 doffset = soffset = 0;
142 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
143 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
149 g_assert (size == 0);
154 emit_call_reg (guint8 *code, int reg)
157 ARM_BLX_REG (code, reg);
159 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
163 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
169 emit_call_seq (MonoCompile *cfg, guint8 *code)
171 if (cfg->method->dynamic) {
172 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
174 *(gpointer*)code = NULL;
176 code = emit_call_reg (code, ARMREG_IP);
184 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
186 switch (ins->opcode) {
189 case OP_FCALL_MEMBASE:
191 if (ins->dreg != ARM_FPA_F0)
192 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
201 * mono_arch_get_argument_info:
202 * @csig: a method signature
203 * @param_count: the number of parameters to consider
204 * @arg_info: an array to store the result infos
206 * Gathers information on parameters such as size, alignment and
207 * padding. arg_info should be large enought to hold param_count + 1 entries.
209 * Returns the size of the activation frame.
212 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
214 int k, frame_size = 0;
215 guint32 size, align, pad;
218 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
219 frame_size += sizeof (gpointer);
223 arg_info [0].offset = offset;
226 frame_size += sizeof (gpointer);
230 arg_info [0].size = frame_size;
232 for (k = 0; k < param_count; k++) {
233 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
235 /* ignore alignment for now */
238 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
239 arg_info [k].pad = pad;
241 arg_info [k + 1].pad = 0;
242 arg_info [k + 1].size = size;
244 arg_info [k + 1].offset = offset;
248 align = MONO_ARCH_FRAME_ALIGNMENT;
249 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
250 arg_info [k].pad = pad;
256 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
260 reg = (ldr >> 16 ) & 0xf;
261 offset = ldr & 0xfff;
262 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
264 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
267 *displacement = offset;
272 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
274 guint32* code = (guint32*)code_ptr;
276 /* Locate the address of the method-specific trampoline. The call using
277 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
278 looks something like this:
287 The call sequence could be also:
290 function pointer literal
294 Note that on ARM5+ we can use one instruction instead of the last two.
295 Therefore, we need to locate the 'ldr rA' instruction to know which
296 register was used to hold the method addrs.
299 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
302 /* Three possible code sequences can happen here:
306 * ldr pc, [rX - #offset]
312 * ldr pc, [rX - #offset]
314 * direct branch with bl:
318 * direct branch with mov:
322 * We only need to identify interface and virtual calls, the others can be ignored.
325 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
326 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
328 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
329 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
335 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
339 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
342 return (gpointer*)((char*)vt + displacement);
345 #define MAX_ARCH_DELEGATE_PARAMS 3
348 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
350 guint8 *code, *start;
352 /* FIXME: Support more cases */
353 if (MONO_TYPE_ISSTRUCT (sig->ret))
357 static guint8* cached = NULL;
358 mono_mini_arch_lock ();
360 mono_mini_arch_unlock ();
364 start = code = mono_global_codeman_reserve (12);
366 /* Replace the this argument with the target */
367 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
368 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
369 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
371 g_assert ((code - start) <= 12);
373 mono_arch_flush_icache (start, 12);
375 mono_mini_arch_unlock ();
378 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
381 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
383 for (i = 0; i < sig->param_count; ++i)
384 if (!mono_is_regsize_var (sig->params [i]))
387 mono_mini_arch_lock ();
388 code = cache [sig->param_count];
390 mono_mini_arch_unlock ();
394 size = 8 + sig->param_count * 4;
395 start = code = mono_global_codeman_reserve (size);
397 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
398 /* slide down the arguments */
399 for (i = 0; i < sig->param_count; ++i) {
400 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
402 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
404 g_assert ((code - start) <= size);
406 mono_arch_flush_icache (start, size);
407 cache [sig->param_count] = start;
408 mono_mini_arch_unlock ();
416 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
418 /* FIXME: handle returning a struct */
419 if (MONO_TYPE_ISSTRUCT (sig->ret))
420 return (gpointer)regs [ARMREG_R1];
421 return (gpointer)regs [ARMREG_R0];
425 * Initialize the cpu to execute managed code.
428 mono_arch_cpu_init (void)
433 * Initialize architecture specific code.
436 mono_arch_init (void)
438 InitializeCriticalSection (&mini_arch_mutex);
442 * Cleanup architecture specific code.
445 mono_arch_cleanup (void)
450 * This function returns the optimizations supported on this cpu.
453 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
457 thumb_supported = TRUE;
462 FILE *file = fopen ("/proc/cpuinfo", "r");
464 while ((line = fgets (buf, 512, file))) {
465 if (strncmp (line, "Processor", 9) == 0) {
466 char *ver = strstr (line, "(v");
467 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
472 if (strncmp (line, "Features", 8) == 0) {
473 char *th = strstr (line, "thumb");
475 thumb_supported = TRUE;
483 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
487 /* no arm-specific optimizations yet */
493 is_regsize_var (MonoType *t) {
496 t = mini_type_get_underlying_type (NULL, t);
503 case MONO_TYPE_FNPTR:
505 case MONO_TYPE_OBJECT:
506 case MONO_TYPE_STRING:
507 case MONO_TYPE_CLASS:
508 case MONO_TYPE_SZARRAY:
509 case MONO_TYPE_ARRAY:
511 case MONO_TYPE_GENERICINST:
512 if (!mono_type_generic_inst_is_valuetype (t))
515 case MONO_TYPE_VALUETYPE:
522 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
527 for (i = 0; i < cfg->num_varinfo; i++) {
528 MonoInst *ins = cfg->varinfo [i];
529 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
532 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
535 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
538 /* we can only allocate 32 bit values */
539 if (is_regsize_var (ins->inst_vtype)) {
540 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
541 g_assert (i == vmv->idx);
542 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
549 #define USE_EXTRA_TEMPS 0
552 mono_arch_get_global_int_regs (MonoCompile *cfg)
555 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
556 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
557 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
558 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
559 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
560 /* V5 is reserved for passing the vtable/rgctx/IMT method */
561 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
562 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
563 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
569 * mono_arch_regalloc_cost:
571 * Return the cost, in number of memory references, of the action of
572 * allocating the variable VMV into a register during global register
576 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
582 #ifndef __GNUC_PREREQ
583 #define __GNUC_PREREQ(maj, min) (0)
587 mono_arch_flush_icache (guint8 *code, gint size)
590 sys_icache_invalidate (code, size);
591 #elif __GNUC_PREREQ(4, 1)
592 __clear_cache (code, code + size);
593 #elif defined(PLATFORM_ANDROID)
594 const int syscall = 0xf0002;
602 : "r" (code), "r" (code + size), "r" (syscall)
606 __asm __volatile ("mov r0, %0\n"
609 "swi 0x9f0002 @ sys_cacheflush"
611 : "r" (code), "r" (code + size), "r" (0)
612 : "r0", "r1", "r3" );
627 guint16 vtsize; /* in param area */
629 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
630 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
645 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
648 if (*gr > ARMREG_R3) {
649 ainfo->offset = *stack_size;
650 ainfo->reg = ARMREG_SP; /* in the caller */
651 ainfo->regtype = RegTypeBase;
662 /* first word in r3 and the second on the stack */
663 ainfo->offset = *stack_size;
664 ainfo->reg = ARMREG_SP; /* in the caller */
665 ainfo->regtype = RegTypeBaseGen;
667 } else if (*gr >= ARMREG_R3) {
672 ainfo->offset = *stack_size;
673 ainfo->reg = ARMREG_SP; /* in the caller */
674 ainfo->regtype = RegTypeBase;
689 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
692 int n = sig->hasthis + sig->param_count;
693 MonoType *simpletype;
694 guint32 stack_size = 0;
695 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
699 /* FIXME: handle returning a struct */
700 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
701 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
702 cinfo->struct_ret = ARMREG_R0;
707 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
710 DEBUG(printf("params: %d\n", sig->param_count));
711 for (i = 0; i < sig->param_count; ++i) {
712 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
713 /* Prevent implicit arguments and sig_cookie from
714 being passed in registers */
716 /* Emit the signature cookie just before the implicit arguments */
717 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
719 DEBUG(printf("param %d: ", i));
720 if (sig->params [i]->byref) {
721 DEBUG(printf("byref\n"));
722 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
726 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
727 switch (simpletype->type) {
728 case MONO_TYPE_BOOLEAN:
731 cinfo->args [n].size = 1;
732 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
738 cinfo->args [n].size = 2;
739 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
744 cinfo->args [n].size = 4;
745 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
751 case MONO_TYPE_FNPTR:
752 case MONO_TYPE_CLASS:
753 case MONO_TYPE_OBJECT:
754 case MONO_TYPE_STRING:
755 case MONO_TYPE_SZARRAY:
756 case MONO_TYPE_ARRAY:
758 cinfo->args [n].size = sizeof (gpointer);
759 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
762 case MONO_TYPE_GENERICINST:
763 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
764 cinfo->args [n].size = sizeof (gpointer);
765 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
770 case MONO_TYPE_TYPEDBYREF:
771 case MONO_TYPE_VALUETYPE: {
776 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
777 size = sizeof (MonoTypedRef);
779 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
781 size = mono_class_native_size (klass, NULL);
783 size = mono_class_value_size (klass, NULL);
785 DEBUG(printf ("load %d bytes struct\n",
786 mono_class_native_size (sig->params [i]->data.klass, NULL)));
789 align_size += (sizeof (gpointer) - 1);
790 align_size &= ~(sizeof (gpointer) - 1);
791 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
792 cinfo->args [n].regtype = RegTypeStructByVal;
793 /* FIXME: align gr and stack_size if needed */
794 if (gr > ARMREG_R3) {
795 cinfo->args [n].size = 0;
796 cinfo->args [n].vtsize = nwords;
798 int rest = ARMREG_R3 - gr + 1;
799 int n_in_regs = rest >= nwords? nwords: rest;
800 cinfo->args [n].size = n_in_regs;
801 cinfo->args [n].vtsize = nwords - n_in_regs;
802 cinfo->args [n].reg = gr;
805 cinfo->args [n].offset = stack_size;
806 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
807 stack_size += nwords * sizeof (gpointer);
814 cinfo->args [n].size = 8;
815 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
819 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
824 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
825 switch (simpletype->type) {
826 case MONO_TYPE_BOOLEAN:
837 case MONO_TYPE_FNPTR:
838 case MONO_TYPE_CLASS:
839 case MONO_TYPE_OBJECT:
840 case MONO_TYPE_SZARRAY:
841 case MONO_TYPE_ARRAY:
842 case MONO_TYPE_STRING:
843 cinfo->ret.reg = ARMREG_R0;
847 cinfo->ret.reg = ARMREG_R0;
851 cinfo->ret.reg = ARMREG_R0;
852 /* FIXME: cinfo->ret.reg = ???;
853 cinfo->ret.regtype = RegTypeFP;*/
855 case MONO_TYPE_GENERICINST:
856 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
857 cinfo->ret.reg = ARMREG_R0;
861 case MONO_TYPE_VALUETYPE:
863 case MONO_TYPE_TYPEDBYREF:
867 g_error ("Can't handle as return value 0x%x", sig->ret->type);
871 /* align stack size to 8 */
872 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
873 stack_size = (stack_size + 7) & ~7;
875 cinfo->stack_usage = stack_size;
881 * Set var information according to the calling convention. arm version.
882 * The locals var stuff should most likely be split in another method.
885 mono_arch_allocate_vars (MonoCompile *cfg)
887 MonoMethodSignature *sig;
888 MonoMethodHeader *header;
890 int i, offset, size, align, curinst;
891 int frame_reg = ARMREG_FP;
893 /* FIXME: this will change when we use FP as gcc does */
894 cfg->flags |= MONO_CFG_HAS_SPILLUP;
896 /* allow room for the vararg method args: void* and long/double */
897 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
898 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
900 header = mono_method_get_header (cfg->method);
903 * We use the frame register also for any method that has
904 * exception clauses. This way, when the handlers are called,
905 * the code will reference local variables using the frame reg instead of
906 * the stack pointer: if we had to restore the stack pointer, we'd
907 * corrupt the method frames that are already on the stack (since
908 * filters get called before stack unwinding happens) when the filter
909 * code would call any method (this also applies to finally etc.).
911 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
912 frame_reg = ARMREG_FP;
913 cfg->frame_reg = frame_reg;
914 if (frame_reg != ARMREG_SP) {
915 cfg->used_int_regs |= 1 << frame_reg;
918 if (!cfg->compile_aot || cfg->uses_rgctx_reg)
919 /* V5 is reserved for passing the vtable/rgctx/IMT method */
920 cfg->used_int_regs |= (1 << ARMREG_V5);
922 sig = mono_method_signature (cfg->method);
926 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
927 /* FIXME: handle long and FP values */
928 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
932 cfg->ret->opcode = OP_REGVAR;
933 cfg->ret->inst_c0 = ARMREG_R0;
937 /* local vars are at a positive offset from the stack pointer */
939 * also note that if the function uses alloca, we use FP
940 * to point at the local variables.
942 offset = 0; /* linkage area */
943 /* align the offset to 16 bytes: not sure this is needed here */
945 //offset &= ~(8 - 1);
947 /* add parameter area size for called functions */
948 offset += cfg->param_area;
951 if (cfg->flags & MONO_CFG_HAS_FPOUT)
954 /* allow room to save the return value */
955 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
958 /* the MonoLMF structure is stored just below the stack pointer */
960 if (sig->call_convention == MONO_CALL_VARARG) {
964 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
965 inst = cfg->vret_addr;
966 offset += sizeof(gpointer) - 1;
967 offset &= ~(sizeof(gpointer) - 1);
968 inst->inst_offset = offset;
969 inst->opcode = OP_REGOFFSET;
970 inst->inst_basereg = frame_reg;
971 if (G_UNLIKELY (cfg->verbose_level > 1)) {
972 printf ("vret_addr =");
973 mono_print_ins (cfg->vret_addr);
975 offset += sizeof(gpointer);
976 if (sig->call_convention == MONO_CALL_VARARG)
977 cfg->sig_cookie += sizeof (gpointer);
980 curinst = cfg->locals_start;
981 for (i = curinst; i < cfg->num_varinfo; ++i) {
982 inst = cfg->varinfo [i];
983 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
986 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
987 * pinvoke wrappers when they call functions returning structure */
988 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
990 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
994 size = mono_type_size (inst->inst_vtype, &align);
996 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
997 * since it loads/stores misaligned words, which don't do the right thing.
999 if (align < 4 && size >= 4)
1001 offset += align - 1;
1002 offset &= ~(align - 1);
1003 inst->inst_offset = offset;
1004 inst->opcode = OP_REGOFFSET;
1005 inst->inst_basereg = frame_reg;
1007 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1012 inst = cfg->args [curinst];
1013 if (inst->opcode != OP_REGVAR) {
1014 inst->opcode = OP_REGOFFSET;
1015 inst->inst_basereg = frame_reg;
1016 offset += sizeof (gpointer) - 1;
1017 offset &= ~(sizeof (gpointer) - 1);
1018 inst->inst_offset = offset;
1019 offset += sizeof (gpointer);
1020 if (sig->call_convention == MONO_CALL_VARARG)
1021 cfg->sig_cookie += sizeof (gpointer);
1026 for (i = 0; i < sig->param_count; ++i) {
1027 inst = cfg->args [curinst];
1028 if (inst->opcode != OP_REGVAR) {
1029 inst->opcode = OP_REGOFFSET;
1030 inst->inst_basereg = frame_reg;
1031 size = mono_type_size (sig->params [i], &align);
1032 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1033 * since it loads/stores misaligned words, which don't do the right thing.
1035 if (align < 4 && size >= 4)
1037 offset += align - 1;
1038 offset &= ~(align - 1);
1039 inst->inst_offset = offset;
1041 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1042 cfg->sig_cookie += size;
1047 /* align the offset to 8 bytes */
1052 cfg->stack_offset = offset;
1056 mono_arch_create_vars (MonoCompile *cfg)
1058 MonoMethodSignature *sig;
1060 sig = mono_method_signature (cfg->method);
1062 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1063 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1064 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1065 printf ("vret_addr = ");
1066 mono_print_ins (cfg->vret_addr);
1072 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1075 MonoMethodSignature *sig;
1079 sig = call->signature;
1080 n = sig->param_count + sig->hasthis;
1082 cinfo = calculate_sizes (sig, sig->pinvoke);
1084 for (i = 0; i < n; ++i) {
1085 ArgInfo *ainfo = cinfo->args + i;
1088 if (i >= sig->hasthis)
1089 t = sig->params [i - sig->hasthis];
1091 t = &mono_defaults.int_class->byval_arg;
1092 t = mini_type_get_underlying_type (NULL, t);
1094 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1099 in = call->args [i];
1101 switch (ainfo->regtype) {
1102 case RegTypeGeneral:
1103 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1104 MONO_INST_NEW (cfg, ins, OP_MOVE);
1105 ins->dreg = mono_alloc_ireg (cfg);
1106 ins->sreg1 = in->dreg + 1;
1107 MONO_ADD_INS (cfg->cbb, ins);
1108 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1110 MONO_INST_NEW (cfg, ins, OP_MOVE);
1111 ins->dreg = mono_alloc_ireg (cfg);
1112 ins->sreg1 = in->dreg + 2;
1113 MONO_ADD_INS (cfg->cbb, ins);
1114 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1115 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1116 #ifndef MONO_ARCH_SOFT_FLOAT
1120 if (ainfo->size == 4) {
1121 #ifdef MONO_ARCH_SOFT_FLOAT
1122 /* mono_emit_call_args () have already done the r8->r4 conversion */
1123 /* The converted value is in an int vreg */
1124 MONO_INST_NEW (cfg, ins, OP_MOVE);
1125 ins->dreg = mono_alloc_ireg (cfg);
1126 ins->sreg1 = in->dreg;
1127 MONO_ADD_INS (cfg->cbb, ins);
1128 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1131 creg = mono_alloc_ireg (cfg);
1132 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1133 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1136 #ifdef MONO_ARCH_SOFT_FLOAT
1137 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1138 ins->dreg = mono_alloc_ireg (cfg);
1139 ins->sreg1 = in->dreg;
1140 MONO_ADD_INS (cfg->cbb, ins);
1141 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1143 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1144 ins->dreg = mono_alloc_ireg (cfg);
1145 ins->sreg1 = in->dreg;
1146 MONO_ADD_INS (cfg->cbb, ins);
1147 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1149 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1150 creg = mono_alloc_ireg (cfg);
1151 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1152 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1153 creg = mono_alloc_ireg (cfg);
1154 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1155 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1158 cfg->flags |= MONO_CFG_HAS_FPOUT;
1160 MONO_INST_NEW (cfg, ins, OP_MOVE);
1161 ins->dreg = mono_alloc_ireg (cfg);
1162 ins->sreg1 = in->dreg;
1163 MONO_ADD_INS (cfg->cbb, ins);
1165 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1168 case RegTypeStructByAddr:
1171 /* FIXME: where si the data allocated? */
1172 arg->backend.reg3 = ainfo->reg;
1173 call->used_iregs |= 1 << ainfo->reg;
1174 g_assert_not_reached ();
1177 case RegTypeStructByVal:
1178 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1179 ins->opcode = OP_OUTARG_VT;
1180 ins->sreg1 = in->dreg;
1181 ins->klass = in->klass;
1182 ins->inst_p0 = call;
1183 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1184 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1185 MONO_ADD_INS (cfg->cbb, ins);
1188 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1189 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1190 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1191 if (t->type == MONO_TYPE_R8) {
1192 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1194 #ifdef MONO_ARCH_SOFT_FLOAT
1195 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1197 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1201 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1204 case RegTypeBaseGen:
1205 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1206 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1207 MONO_INST_NEW (cfg, ins, OP_MOVE);
1208 ins->dreg = mono_alloc_ireg (cfg);
1209 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1210 MONO_ADD_INS (cfg->cbb, ins);
1211 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1212 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1215 #ifdef MONO_ARCH_SOFT_FLOAT
1216 g_assert_not_reached ();
1219 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1220 creg = mono_alloc_ireg (cfg);
1221 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1222 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1223 creg = mono_alloc_ireg (cfg);
1224 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1225 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1226 cfg->flags |= MONO_CFG_HAS_FPOUT;
1228 g_assert_not_reached ();
1235 arg->backend.reg3 = ainfo->reg;
1236 /* FP args are passed in int regs */
1237 call->used_iregs |= 1 << ainfo->reg;
1238 if (ainfo->size == 8) {
1239 arg->opcode = OP_OUTARG_R8;
1240 call->used_iregs |= 1 << (ainfo->reg + 1);
1242 arg->opcode = OP_OUTARG_R4;
1245 cfg->flags |= MONO_CFG_HAS_FPOUT;
1249 g_assert_not_reached ();
1253 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1256 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1257 vtarg->sreg1 = call->vret_var->dreg;
1258 vtarg->dreg = mono_alloc_preg (cfg);
1259 MONO_ADD_INS (cfg->cbb, vtarg);
1261 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1264 call->stack_usage = cinfo->stack_usage;
1270 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1272 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1273 ArgInfo *ainfo = ins->inst_p1;
1274 int ovf_size = ainfo->vtsize;
1275 int doffset = ainfo->offset;
1276 int i, soffset, dreg;
1279 for (i = 0; i < ainfo->size; ++i) {
1280 dreg = mono_alloc_ireg (cfg);
1281 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1282 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1283 soffset += sizeof (gpointer);
1285 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1287 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1291 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1293 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1296 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1299 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1300 ins->sreg1 = val->dreg + 1;
1301 ins->sreg2 = val->dreg + 2;
1302 MONO_ADD_INS (cfg->cbb, ins);
1305 #ifdef MONO_ARCH_SOFT_FLOAT
1306 if (ret->type == MONO_TYPE_R8) {
1309 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1310 ins->dreg = cfg->ret->dreg;
1311 ins->sreg1 = val->dreg;
1312 MONO_ADD_INS (cfg->cbb, ins);
1315 if (ret->type == MONO_TYPE_R4) {
1316 /* Already converted to an int in method_to_ir () */
1317 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1324 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1328 mono_arch_is_inst_imm (gint64 imm)
1334 * Allow tracing to work with this interface (with an optional argument)
1338 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1342 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1343 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1344 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1345 code = emit_call_reg (code, ARMREG_R2);
1358 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1361 int save_mode = SAVE_NONE;
1363 MonoMethod *method = cfg->method;
1364 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1365 int save_offset = cfg->param_area;
1369 offset = code - cfg->native_code;
1370 /* we need about 16 instructions */
1371 if (offset > (cfg->code_size - 16 * 4)) {
1372 cfg->code_size *= 2;
1373 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1374 code = cfg->native_code + offset;
1377 case MONO_TYPE_VOID:
1378 /* special case string .ctor icall */
1379 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1380 save_mode = SAVE_ONE;
1382 save_mode = SAVE_NONE;
1386 save_mode = SAVE_TWO;
1390 save_mode = SAVE_FP;
1392 case MONO_TYPE_VALUETYPE:
1393 save_mode = SAVE_STRUCT;
1396 save_mode = SAVE_ONE;
1400 switch (save_mode) {
1402 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1403 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1404 if (enable_arguments) {
1405 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1406 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1410 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1411 if (enable_arguments) {
1412 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1416 /* FIXME: what reg? */
1417 if (enable_arguments) {
1418 /* FIXME: what reg? */
1422 if (enable_arguments) {
1423 /* FIXME: get the actual address */
1424 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1432 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1433 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1434 code = emit_call_reg (code, ARMREG_IP);
1436 switch (save_mode) {
1438 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1439 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1442 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1456 * The immediate field for cond branches is big enough for all reasonable methods
1458 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1459 if (ins->flags & MONO_INST_BRLABEL) { \
1460 if (0 && ins->inst_i0->inst_c0) { \
1461 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1463 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1464 ARM_B_COND (code, (condcode), 0); \
1467 if (0 && ins->inst_true_bb->native_offset) { \
1468 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1470 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1471 ARM_B_COND (code, (condcode), 0); \
1475 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1477 /* emit an exception if condition is fail
1479 * We assign the extra code used to throw the implicit exceptions
1480 * to cfg->bb_exit as far as the big branch handling is concerned
1482 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1484 mono_add_patch_info (cfg, code - cfg->native_code, \
1485 MONO_PATCH_INFO_EXC, exc_name); \
1486 ARM_BL_COND (code, (condcode), 0); \
1489 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1492 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1497 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1499 MonoInst *ins, *n, *last_ins = NULL;
1501 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1502 switch (ins->opcode) {
1505 /* Already done by an arch-independent pass */
1507 case OP_LOAD_MEMBASE:
1508 case OP_LOADI4_MEMBASE:
1510 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1511 * OP_LOAD_MEMBASE offset(basereg), reg
1513 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1514 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1515 ins->inst_basereg == last_ins->inst_destbasereg &&
1516 ins->inst_offset == last_ins->inst_offset) {
1517 if (ins->dreg == last_ins->sreg1) {
1518 MONO_DELETE_INS (bb, ins);
1521 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1522 ins->opcode = OP_MOVE;
1523 ins->sreg1 = last_ins->sreg1;
1527 * Note: reg1 must be different from the basereg in the second load
1528 * OP_LOAD_MEMBASE offset(basereg), reg1
1529 * OP_LOAD_MEMBASE offset(basereg), reg2
1531 * OP_LOAD_MEMBASE offset(basereg), reg1
1532 * OP_MOVE reg1, reg2
1534 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1535 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1536 ins->inst_basereg != last_ins->dreg &&
1537 ins->inst_basereg == last_ins->inst_basereg &&
1538 ins->inst_offset == last_ins->inst_offset) {
1540 if (ins->dreg == last_ins->dreg) {
1541 MONO_DELETE_INS (bb, ins);
1544 ins->opcode = OP_MOVE;
1545 ins->sreg1 = last_ins->dreg;
1548 //g_assert_not_reached ();
1552 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1553 * OP_LOAD_MEMBASE offset(basereg), reg
1555 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1556 * OP_ICONST reg, imm
1558 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1559 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1560 ins->inst_basereg == last_ins->inst_destbasereg &&
1561 ins->inst_offset == last_ins->inst_offset) {
1562 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1563 ins->opcode = OP_ICONST;
1564 ins->inst_c0 = last_ins->inst_imm;
1565 g_assert_not_reached (); // check this rule
1569 case OP_LOADU1_MEMBASE:
1570 case OP_LOADI1_MEMBASE:
1571 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1572 ins->inst_basereg == last_ins->inst_destbasereg &&
1573 ins->inst_offset == last_ins->inst_offset) {
1574 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1575 ins->sreg1 = last_ins->sreg1;
1578 case OP_LOADU2_MEMBASE:
1579 case OP_LOADI2_MEMBASE:
1580 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1581 ins->inst_basereg == last_ins->inst_destbasereg &&
1582 ins->inst_offset == last_ins->inst_offset) {
1583 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1584 ins->sreg1 = last_ins->sreg1;
1588 ins->opcode = OP_MOVE;
1592 if (ins->dreg == ins->sreg1) {
1593 MONO_DELETE_INS (bb, ins);
1597 * OP_MOVE sreg, dreg
1598 * OP_MOVE dreg, sreg
1600 if (last_ins && last_ins->opcode == OP_MOVE &&
1601 ins->sreg1 == last_ins->dreg &&
1602 ins->dreg == last_ins->sreg1) {
1603 MONO_DELETE_INS (bb, ins);
1611 bb->last_ins = last_ins;
1615 * the branch_cc_table should maintain the order of these
1629 branch_cc_table [] = {
1643 #define NEW_INS(cfg,dest,op) do { \
1644 MONO_INST_NEW ((cfg), (dest), (op)); \
1645 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1649 map_to_reg_reg_op (int op)
1658 case OP_COMPARE_IMM:
1660 case OP_ICOMPARE_IMM:
1674 case OP_LOAD_MEMBASE:
1675 return OP_LOAD_MEMINDEX;
1676 case OP_LOADI4_MEMBASE:
1677 return OP_LOADI4_MEMINDEX;
1678 case OP_LOADU4_MEMBASE:
1679 return OP_LOADU4_MEMINDEX;
1680 case OP_LOADU1_MEMBASE:
1681 return OP_LOADU1_MEMINDEX;
1682 case OP_LOADI2_MEMBASE:
1683 return OP_LOADI2_MEMINDEX;
1684 case OP_LOADU2_MEMBASE:
1685 return OP_LOADU2_MEMINDEX;
1686 case OP_LOADI1_MEMBASE:
1687 return OP_LOADI1_MEMINDEX;
1688 case OP_STOREI1_MEMBASE_REG:
1689 return OP_STOREI1_MEMINDEX;
1690 case OP_STOREI2_MEMBASE_REG:
1691 return OP_STOREI2_MEMINDEX;
1692 case OP_STOREI4_MEMBASE_REG:
1693 return OP_STOREI4_MEMINDEX;
1694 case OP_STORE_MEMBASE_REG:
1695 return OP_STORE_MEMINDEX;
1696 case OP_STORER4_MEMBASE_REG:
1697 return OP_STORER4_MEMINDEX;
1698 case OP_STORER8_MEMBASE_REG:
1699 return OP_STORER8_MEMINDEX;
1700 case OP_STORE_MEMBASE_IMM:
1701 return OP_STORE_MEMBASE_REG;
1702 case OP_STOREI1_MEMBASE_IMM:
1703 return OP_STOREI1_MEMBASE_REG;
1704 case OP_STOREI2_MEMBASE_IMM:
1705 return OP_STOREI2_MEMBASE_REG;
1706 case OP_STOREI4_MEMBASE_IMM:
1707 return OP_STOREI4_MEMBASE_REG;
1709 g_assert_not_reached ();
1713 * Remove from the instruction list the instructions that can't be
1714 * represented with very simple instructions with no register
1718 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1720 MonoInst *ins, *temp, *last_ins = NULL;
1721 int rot_amount, imm8, low_imm;
1723 MONO_BB_FOR_EACH_INS (bb, ins) {
1725 switch (ins->opcode) {
1729 case OP_COMPARE_IMM:
1730 case OP_ICOMPARE_IMM:
1744 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1745 NEW_INS (cfg, temp, OP_ICONST);
1746 temp->inst_c0 = ins->inst_imm;
1747 temp->dreg = mono_alloc_ireg (cfg);
1748 ins->sreg2 = temp->dreg;
1749 ins->opcode = mono_op_imm_to_op (ins->opcode);
1754 if (ins->inst_imm == 1) {
1755 ins->opcode = OP_MOVE;
1758 if (ins->inst_imm == 0) {
1759 ins->opcode = OP_ICONST;
1763 imm8 = mono_is_power_of_two (ins->inst_imm);
1765 ins->opcode = OP_SHL_IMM;
1766 ins->inst_imm = imm8;
1769 NEW_INS (cfg, temp, OP_ICONST);
1770 temp->inst_c0 = ins->inst_imm;
1771 temp->dreg = mono_alloc_ireg (cfg);
1772 ins->sreg2 = temp->dreg;
1773 ins->opcode = OP_IMUL;
1775 case OP_LOCALLOC_IMM:
1776 NEW_INS (cfg, temp, OP_ICONST);
1777 temp->inst_c0 = ins->inst_imm;
1778 temp->dreg = mono_alloc_ireg (cfg);
1779 ins->sreg1 = temp->dreg;
1780 ins->opcode = OP_LOCALLOC;
1782 case OP_LOAD_MEMBASE:
1783 case OP_LOADI4_MEMBASE:
1784 case OP_LOADU4_MEMBASE:
1785 case OP_LOADU1_MEMBASE:
1786 /* we can do two things: load the immed in a register
1787 * and use an indexed load, or see if the immed can be
1788 * represented as an ad_imm + a load with a smaller offset
1789 * that fits. We just do the first for now, optimize later.
1791 if (arm_is_imm12 (ins->inst_offset))
1793 NEW_INS (cfg, temp, OP_ICONST);
1794 temp->inst_c0 = ins->inst_offset;
1795 temp->dreg = mono_alloc_ireg (cfg);
1796 ins->sreg2 = temp->dreg;
1797 ins->opcode = map_to_reg_reg_op (ins->opcode);
1799 case OP_LOADI2_MEMBASE:
1800 case OP_LOADU2_MEMBASE:
1801 case OP_LOADI1_MEMBASE:
1802 if (arm_is_imm8 (ins->inst_offset))
1804 NEW_INS (cfg, temp, OP_ICONST);
1805 temp->inst_c0 = ins->inst_offset;
1806 temp->dreg = mono_alloc_ireg (cfg);
1807 ins->sreg2 = temp->dreg;
1808 ins->opcode = map_to_reg_reg_op (ins->opcode);
1810 case OP_LOADR4_MEMBASE:
1811 case OP_LOADR8_MEMBASE:
1812 if (arm_is_fpimm8 (ins->inst_offset))
1814 low_imm = ins->inst_offset & 0x1ff;
1815 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1816 NEW_INS (cfg, temp, OP_ADD_IMM);
1817 temp->inst_imm = ins->inst_offset & ~0x1ff;
1818 temp->sreg1 = ins->inst_basereg;
1819 temp->dreg = mono_alloc_ireg (cfg);
1820 ins->inst_basereg = temp->dreg;
1821 ins->inst_offset = low_imm;
1824 /* VFP/FPA doesn't have indexed load instructions */
1825 g_assert_not_reached ();
1827 case OP_STORE_MEMBASE_REG:
1828 case OP_STOREI4_MEMBASE_REG:
1829 case OP_STOREI1_MEMBASE_REG:
1830 if (arm_is_imm12 (ins->inst_offset))
1832 NEW_INS (cfg, temp, OP_ICONST);
1833 temp->inst_c0 = ins->inst_offset;
1834 temp->dreg = mono_alloc_ireg (cfg);
1835 ins->sreg2 = temp->dreg;
1836 ins->opcode = map_to_reg_reg_op (ins->opcode);
1838 case OP_STOREI2_MEMBASE_REG:
1839 if (arm_is_imm8 (ins->inst_offset))
1841 NEW_INS (cfg, temp, OP_ICONST);
1842 temp->inst_c0 = ins->inst_offset;
1843 temp->dreg = mono_alloc_ireg (cfg);
1844 ins->sreg2 = temp->dreg;
1845 ins->opcode = map_to_reg_reg_op (ins->opcode);
1847 case OP_STORER4_MEMBASE_REG:
1848 case OP_STORER8_MEMBASE_REG:
1849 if (arm_is_fpimm8 (ins->inst_offset))
1851 low_imm = ins->inst_offset & 0x1ff;
1852 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1853 NEW_INS (cfg, temp, OP_ADD_IMM);
1854 temp->inst_imm = ins->inst_offset & ~0x1ff;
1855 temp->sreg1 = ins->inst_destbasereg;
1856 temp->dreg = mono_alloc_ireg (cfg);
1857 ins->inst_destbasereg = temp->dreg;
1858 ins->inst_offset = low_imm;
1861 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
1862 /* VFP/FPA doesn't have indexed store instructions */
1863 g_assert_not_reached ();
1865 case OP_STORE_MEMBASE_IMM:
1866 case OP_STOREI1_MEMBASE_IMM:
1867 case OP_STOREI2_MEMBASE_IMM:
1868 case OP_STOREI4_MEMBASE_IMM:
1869 NEW_INS (cfg, temp, OP_ICONST);
1870 temp->inst_c0 = ins->inst_imm;
1871 temp->dreg = mono_alloc_ireg (cfg);
1872 ins->sreg1 = temp->dreg;
1873 ins->opcode = map_to_reg_reg_op (ins->opcode);
1875 goto loop_start; /* make it handle the possibly big ins->inst_offset */
1877 gboolean swap = FALSE;
1880 /* Some fp compares require swapped operands */
1881 g_assert (ins->next);
1882 switch (ins->next->opcode) {
1884 ins->next->opcode = OP_FBLT;
1888 ins->next->opcode = OP_FBLT_UN;
1892 ins->next->opcode = OP_FBGE;
1896 ins->next->opcode = OP_FBGE_UN;
1904 ins->sreg1 = ins->sreg2;
1913 bb->last_ins = last_ins;
1914 bb->max_vreg = cfg->next_vreg;
1918 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
1920 /* sreg is a float, dreg is an integer reg */
1922 ARM_FIXZ (code, dreg, sreg);
1923 #elif defined(ARM_FPU_VFP)
1925 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
1927 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
1928 ARM_FMRS (code, dreg, ARM_VFP_F0);
1932 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
1933 else if (size == 2) {
1934 ARM_SHL_IMM (code, dreg, dreg, 16);
1935 ARM_SHR_IMM (code, dreg, dreg, 16);
1939 ARM_SHL_IMM (code, dreg, dreg, 24);
1940 ARM_SAR_IMM (code, dreg, dreg, 24);
1941 } else if (size == 2) {
1942 ARM_SHL_IMM (code, dreg, dreg, 16);
1943 ARM_SAR_IMM (code, dreg, dreg, 16);
1951 const guchar *target;
1956 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
1959 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
1960 PatchData *pdata = (PatchData*)user_data;
1961 guchar *code = data;
1962 guint32 *thunks = data;
1963 guint32 *endthunks = (guint32*)(code + bsize);
1965 int difflow, diffhigh;
1967 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
1968 difflow = (char*)pdata->code - (char*)thunks;
1969 diffhigh = (char*)pdata->code - (char*)endthunks;
1970 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
1974 * The thunk is composed of 3 words:
1975 * load constant from thunks [2] into ARM_IP
1978 * Note that the LR register is already setup
1980 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
1981 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
1982 while (thunks < endthunks) {
1983 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
1984 if (thunks [2] == (guint32)pdata->target) {
1985 arm_patch (pdata->code, (guchar*)thunks);
1986 mono_arch_flush_icache (pdata->code, 4);
1989 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
1990 /* found a free slot instead: emit thunk */
1991 /* ARMREG_IP is fine to use since this can't be an IMT call
1994 code = (guchar*)thunks;
1995 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
1996 if (thumb_supported)
1997 ARM_BX (code, ARMREG_IP);
1999 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2000 thunks [2] = (guint32)pdata->target;
2001 mono_arch_flush_icache ((guchar*)thunks, 12);
2003 arm_patch (pdata->code, (guchar*)thunks);
2004 mono_arch_flush_icache (pdata->code, 4);
2008 /* skip 12 bytes, the size of the thunk */
2012 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2018 handle_thunk (int absolute, guchar *code, const guchar *target) {
2019 MonoDomain *domain = mono_domain_get ();
2023 pdata.target = target;
2024 pdata.absolute = absolute;
2027 mono_domain_lock (domain);
2028 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2031 /* this uses the first available slot */
2033 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2035 mono_domain_unlock (domain);
2037 if (pdata.found != 1)
2038 g_print ("thunk failed for %p from %p\n", target, code);
2039 g_assert (pdata.found == 1);
2043 arm_patch (guchar *code, const guchar *target)
2045 guint32 *code32 = (void*)code;
2046 guint32 ins = *code32;
2047 guint32 prim = (ins >> 25) & 7;
2048 guint32 tval = GPOINTER_TO_UINT (target);
2050 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2051 if (prim == 5) { /* 101b */
2052 /* the diff starts 8 bytes from the branch opcode */
2053 gint diff = target - code - 8;
2055 gint tmask = 0xffffffff;
2056 if (tval & 1) { /* entering thumb mode */
2057 diff = target - 1 - code - 8;
2058 g_assert (thumb_supported);
2059 tbits = 0xf << 28; /* bl->blx bit pattern */
2060 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2061 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2065 tmask = ~(1 << 24); /* clear the link bit */
2066 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2071 if (diff <= 33554431) {
2073 ins = (ins & 0xff000000) | diff;
2075 *code32 = ins | tbits;
2079 /* diff between 0 and -33554432 */
2080 if (diff >= -33554432) {
2082 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2084 *code32 = ins | tbits;
2089 handle_thunk (TRUE, code, target);
2094 * The alternative call sequences looks like this:
2096 * ldr ip, [pc] // loads the address constant
2097 * b 1f // jumps around the constant
2098 * address constant embedded in the code
2103 * There are two cases for patching:
2104 * a) at the end of method emission: in this case code points to the start
2105 * of the call sequence
2106 * b) during runtime patching of the call site: in this case code points
2107 * to the mov pc, ip instruction
2109 * We have to handle also the thunk jump code sequence:
2113 * address constant // execution never reaches here
2115 if ((ins & 0x0ffffff0) == 0x12fff10) {
2116 /* Branch and exchange: the address is constructed in a reg
2117 * We can patch BX when the code sequence is the following:
2118 * ldr ip, [pc, #0] ; 0x8
2125 guint8 *emit = (guint8*)ccode;
2126 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2128 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2129 ARM_BX (emit, ARMREG_IP);
2131 /*patching from magic trampoline*/
2132 if (ins == ccode [3]) {
2133 g_assert (code32 [-4] == ccode [0]);
2134 g_assert (code32 [-3] == ccode [1]);
2135 g_assert (code32 [-1] == ccode [2]);
2136 code32 [-2] = (guint32)target;
2139 /*patching from JIT*/
2140 if (ins == ccode [0]) {
2141 g_assert (code32 [1] == ccode [1]);
2142 g_assert (code32 [3] == ccode [2]);
2143 g_assert (code32 [4] == ccode [3]);
2144 code32 [2] = (guint32)target;
2147 g_assert_not_reached ();
2148 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2156 guint8 *emit = (guint8*)ccode;
2157 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2159 ARM_BLX_REG (emit, ARMREG_IP);
2161 g_assert (code32 [-3] == ccode [0]);
2162 g_assert (code32 [-2] == ccode [1]);
2163 g_assert (code32 [0] == ccode [2]);
2165 code32 [-1] = (guint32)target;
2168 guint32 *tmp = ccode;
2169 guint8 *emit = (guint8*)tmp;
2170 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2171 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2172 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2173 ARM_BX (emit, ARMREG_IP);
2174 if (ins == ccode [2]) {
2175 g_assert_not_reached (); // should be -2 ...
2176 code32 [-1] = (guint32)target;
2179 if (ins == ccode [0]) {
2180 /* handles both thunk jump code and the far call sequence */
2181 code32 [2] = (guint32)target;
2184 g_assert_not_reached ();
2186 // g_print ("patched with 0x%08x\n", ins);
2190 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2191 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2192 * to be used with the emit macros.
2193 * Return -1 otherwise.
2196 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2199 for (i = 0; i < 31; i+= 2) {
2200 res = (val << (32 - i)) | (val >> i);
2203 *rot_amount = i? 32 - i: 0;
2210 * Emits in code a sequence of instructions that load the value 'val'
2211 * into the dreg register. Uses at most 4 instructions.
2214 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2216 int imm8, rot_amount;
2218 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2219 /* skip the constant pool */
2225 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2226 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2227 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2228 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2231 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2233 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2235 if (val & 0xFF0000) {
2236 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2238 if (val & 0xFF000000) {
2239 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2241 } else if (val & 0xFF00) {
2242 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2243 if (val & 0xFF0000) {
2244 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2246 if (val & 0xFF000000) {
2247 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2249 } else if (val & 0xFF0000) {
2250 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2251 if (val & 0xFF000000) {
2252 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2255 //g_assert_not_reached ();
2261 * emit_load_volatile_arguments:
2263 * Load volatile arguments from the stack to the original input registers.
2264 * Required before a tail call.
2267 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2269 MonoMethod *method = cfg->method;
2270 MonoMethodSignature *sig;
2275 /* FIXME: Generate intermediate code instead */
2277 sig = mono_method_signature (method);
2279 /* This is the opposite of the code in emit_prolog */
2283 cinfo = calculate_sizes (sig, sig->pinvoke);
2285 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2286 ArgInfo *ainfo = &cinfo->ret;
2287 inst = cfg->vret_addr;
2288 g_assert (arm_is_imm12 (inst->inst_offset));
2289 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2291 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2292 ArgInfo *ainfo = cinfo->args + i;
2293 inst = cfg->args [pos];
2295 if (cfg->verbose_level > 2)
2296 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2297 if (inst->opcode == OP_REGVAR) {
2298 if (ainfo->regtype == RegTypeGeneral)
2299 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2300 else if (ainfo->regtype == RegTypeFP) {
2301 g_assert_not_reached ();
2302 } else if (ainfo->regtype == RegTypeBase) {
2306 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2307 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2309 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2310 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2314 g_assert_not_reached ();
2316 if (ainfo->regtype == RegTypeGeneral) {
2317 switch (ainfo->size) {
2324 g_assert (arm_is_imm12 (inst->inst_offset));
2325 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2326 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2327 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2330 if (arm_is_imm12 (inst->inst_offset)) {
2331 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2333 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2334 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2338 } else if (ainfo->regtype == RegTypeBaseGen) {
2341 } else if (ainfo->regtype == RegTypeBase) {
2344 } else if (ainfo->regtype == RegTypeFP) {
2345 g_assert_not_reached ();
2346 } else if (ainfo->regtype == RegTypeStructByVal) {
2347 int doffset = inst->inst_offset;
2351 if (mono_class_from_mono_type (inst->inst_vtype))
2352 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2353 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2354 if (arm_is_imm12 (doffset)) {
2355 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2357 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2358 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2360 soffset += sizeof (gpointer);
2361 doffset += sizeof (gpointer);
2366 } else if (ainfo->regtype == RegTypeStructByAddr) {
2383 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2388 guint8 *code = cfg->native_code + cfg->code_len;
2389 MonoInst *last_ins = NULL;
2390 guint last_offset = 0;
2392 int imm8, rot_amount;
2394 /* we don't align basic blocks of loops on arm */
2396 if (cfg->verbose_level > 2)
2397 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2399 cpos = bb->max_offset;
2401 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2402 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2403 //g_assert (!mono_compile_aot);
2406 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2407 /* this is not thread save, but good enough */
2408 /* fixme: howto handle overflows? */
2409 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2412 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2413 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2414 (gpointer)"mono_break");
2415 code = emit_call_seq (cfg, code);
2418 MONO_BB_FOR_EACH_INS (bb, ins) {
2419 offset = code - cfg->native_code;
2421 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2423 if (offset > (cfg->code_size - max_len - 16)) {
2424 cfg->code_size *= 2;
2425 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2426 code = cfg->native_code + offset;
2428 // if (ins->cil_code)
2429 // g_print ("cil code\n");
2430 mono_debug_record_line_number (cfg, ins, offset);
2432 switch (ins->opcode) {
2433 case OP_MEMORY_BARRIER:
2436 #if defined(__ARM_EABI__) && defined(__linux__)
2437 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2438 (gpointer)"__aeabi_read_tp");
2439 code = emit_call_seq (cfg, code);
2441 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
2443 g_assert_not_reached ();
2447 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2448 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2451 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2452 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2454 case OP_STOREI1_MEMBASE_IMM:
2455 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2456 g_assert (arm_is_imm12 (ins->inst_offset));
2457 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2459 case OP_STOREI2_MEMBASE_IMM:
2460 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2461 g_assert (arm_is_imm8 (ins->inst_offset));
2462 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2464 case OP_STORE_MEMBASE_IMM:
2465 case OP_STOREI4_MEMBASE_IMM:
2466 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2467 g_assert (arm_is_imm12 (ins->inst_offset));
2468 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2470 case OP_STOREI1_MEMBASE_REG:
2471 g_assert (arm_is_imm12 (ins->inst_offset));
2472 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2474 case OP_STOREI2_MEMBASE_REG:
2475 g_assert (arm_is_imm8 (ins->inst_offset));
2476 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2478 case OP_STORE_MEMBASE_REG:
2479 case OP_STOREI4_MEMBASE_REG:
2480 /* this case is special, since it happens for spill code after lowering has been called */
2481 if (arm_is_imm12 (ins->inst_offset)) {
2482 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2484 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2485 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2488 case OP_STOREI1_MEMINDEX:
2489 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2491 case OP_STOREI2_MEMINDEX:
2492 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2494 case OP_STORE_MEMINDEX:
2495 case OP_STOREI4_MEMINDEX:
2496 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2499 g_assert_not_reached ();
2501 case OP_LOAD_MEMINDEX:
2502 case OP_LOADI4_MEMINDEX:
2503 case OP_LOADU4_MEMINDEX:
2504 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2506 case OP_LOADI1_MEMINDEX:
2507 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2509 case OP_LOADU1_MEMINDEX:
2510 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2512 case OP_LOADI2_MEMINDEX:
2513 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2515 case OP_LOADU2_MEMINDEX:
2516 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2518 case OP_LOAD_MEMBASE:
2519 case OP_LOADI4_MEMBASE:
2520 case OP_LOADU4_MEMBASE:
2521 /* this case is special, since it happens for spill code after lowering has been called */
2522 if (arm_is_imm12 (ins->inst_offset)) {
2523 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2525 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2526 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2529 case OP_LOADI1_MEMBASE:
2530 g_assert (arm_is_imm8 (ins->inst_offset));
2531 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2533 case OP_LOADU1_MEMBASE:
2534 g_assert (arm_is_imm12 (ins->inst_offset));
2535 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2537 case OP_LOADU2_MEMBASE:
2538 g_assert (arm_is_imm8 (ins->inst_offset));
2539 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2541 case OP_LOADI2_MEMBASE:
2542 g_assert (arm_is_imm8 (ins->inst_offset));
2543 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2545 case OP_ICONV_TO_I1:
2546 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2547 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2549 case OP_ICONV_TO_I2:
2550 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2551 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2553 case OP_ICONV_TO_U1:
2554 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2556 case OP_ICONV_TO_U2:
2557 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2558 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2562 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2564 case OP_COMPARE_IMM:
2565 case OP_ICOMPARE_IMM:
2566 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2567 g_assert (imm8 >= 0);
2568 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2572 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2573 * So instead of emitting a trap, we emit a call a C function and place a
2576 //*(int*)code = 0xef9f0001;
2579 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2580 (gpointer)"mono_break");
2581 code = emit_call_seq (cfg, code);
2583 case OP_RELAXED_NOP:
2588 case OP_DUMMY_STORE:
2589 case OP_NOT_REACHED:
2594 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2597 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2601 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2604 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2605 g_assert (imm8 >= 0);
2606 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2610 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2611 g_assert (imm8 >= 0);
2612 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2616 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2617 g_assert (imm8 >= 0);
2618 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2621 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2622 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2624 case OP_IADD_OVF_UN:
2625 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2626 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2629 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2630 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2632 case OP_ISUB_OVF_UN:
2633 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2634 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2636 case OP_ADD_OVF_CARRY:
2637 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2638 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2640 case OP_ADD_OVF_UN_CARRY:
2641 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2642 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2644 case OP_SUB_OVF_CARRY:
2645 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2646 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2648 case OP_SUB_OVF_UN_CARRY:
2649 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2650 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2654 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2657 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2658 g_assert (imm8 >= 0);
2659 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2662 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2666 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2670 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2671 g_assert (imm8 >= 0);
2672 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2676 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2677 g_assert (imm8 >= 0);
2678 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2680 case OP_ARM_RSBS_IMM:
2681 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2682 g_assert (imm8 >= 0);
2683 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2685 case OP_ARM_RSC_IMM:
2686 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2687 g_assert (imm8 >= 0);
2688 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2691 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2695 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2696 g_assert (imm8 >= 0);
2697 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2705 /* crappy ARM arch doesn't have a DIV instruction */
2706 g_assert_not_reached ();
2708 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2712 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2713 g_assert (imm8 >= 0);
2714 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2717 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2721 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2722 g_assert (imm8 >= 0);
2723 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2726 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2731 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2732 else if (ins->dreg != ins->sreg1)
2733 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2736 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2741 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2742 else if (ins->dreg != ins->sreg1)
2743 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2746 case OP_ISHR_UN_IMM:
2748 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2749 else if (ins->dreg != ins->sreg1)
2750 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2753 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2756 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2759 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2762 if (ins->dreg == ins->sreg2)
2763 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2765 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2768 g_assert_not_reached ();
2771 /* FIXME: handle ovf/ sreg2 != dreg */
2772 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2774 case OP_IMUL_OVF_UN:
2775 /* FIXME: handle ovf/ sreg2 != dreg */
2776 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2779 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2782 /* Load the GOT offset */
2783 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2784 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2786 *(gpointer*)code = NULL;
2788 /* Load the value from the GOT */
2789 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2791 case OP_ICONV_TO_I4:
2792 case OP_ICONV_TO_U4:
2794 if (ins->dreg != ins->sreg1)
2795 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2798 int saved = ins->sreg2;
2799 if (ins->sreg2 == ARM_LSW_REG) {
2800 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2803 if (ins->sreg1 != ARM_LSW_REG)
2804 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2805 if (saved != ARM_MSW_REG)
2806 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2811 ARM_MVFD (code, ins->dreg, ins->sreg1);
2812 #elif defined(ARM_FPU_VFP)
2813 ARM_CPYD (code, ins->dreg, ins->sreg1);
2816 case OP_FCONV_TO_R4:
2818 ARM_MVFS (code, ins->dreg, ins->sreg1);
2819 #elif defined(ARM_FPU_VFP)
2820 ARM_CVTD (code, ins->dreg, ins->sreg1);
2821 ARM_CVTS (code, ins->dreg, ins->dreg);
2826 * Keep in sync with mono_arch_emit_epilog
2828 g_assert (!cfg->method->save_lmf);
2830 code = emit_load_volatile_arguments (cfg, code);
2832 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2833 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2834 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2838 /* ensure ins->sreg1 is not NULL */
2839 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2843 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2844 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2846 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2847 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2849 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2859 call = (MonoCallInst*)ins;
2860 if (ins->flags & MONO_INST_HAS_METHOD)
2861 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2863 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2864 code = emit_call_seq (cfg, code);
2865 code = emit_move_return_value (cfg, ins, code);
2871 case OP_VOIDCALL_REG:
2873 code = emit_call_reg (code, ins->sreg1);
2874 code = emit_move_return_value (cfg, ins, code);
2876 case OP_FCALL_MEMBASE:
2877 case OP_LCALL_MEMBASE:
2878 case OP_VCALL_MEMBASE:
2879 case OP_VCALL2_MEMBASE:
2880 case OP_VOIDCALL_MEMBASE:
2881 case OP_CALL_MEMBASE:
2882 g_assert (arm_is_imm12 (ins->inst_offset));
2883 g_assert (ins->sreg1 != ARMREG_LR);
2884 call = (MonoCallInst*)ins;
2885 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2886 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
2887 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2889 * We can't embed the method in the code stream in PIC code, or
2891 * Instead, we put it in V5 in code emitted by
2892 * mono_arch_emit_imt_argument (), and embed NULL here to
2893 * signal the IMT thunk that the value is in V5.
2895 if (call->dynamic_imt_arg)
2896 *((gpointer*)code) = NULL;
2898 *((gpointer*)code) = (gpointer)call->method;
2901 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
2902 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2904 code = emit_move_return_value (cfg, ins, code);
2907 /* keep alignment */
2908 int alloca_waste = cfg->param_area;
2911 /* round the size to 8 bytes */
2912 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
2913 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
2915 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
2916 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
2917 /* memzero the area: dreg holds the size, sp is the pointer */
2918 if (ins->flags & MONO_INST_INIT) {
2919 guint8 *start_loop, *branch_to_cond;
2920 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
2921 branch_to_cond = code;
2924 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
2925 arm_patch (branch_to_cond, code);
2926 /* decrement by 4 and set flags */
2927 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
2928 ARM_B_COND (code, ARMCOND_GE, 0);
2929 arm_patch (code - 4, start_loop);
2931 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
2935 if (ins->sreg1 != ARMREG_R0)
2936 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2937 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2938 (gpointer)"mono_arch_throw_exception");
2939 code = emit_call_seq (cfg, code);
2943 if (ins->sreg1 != ARMREG_R0)
2944 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2945 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2946 (gpointer)"mono_arch_rethrow_exception");
2947 code = emit_call_seq (cfg, code);
2950 case OP_START_HANDLER: {
2951 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2953 if (arm_is_imm12 (spvar->inst_offset)) {
2954 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
2956 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2957 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
2961 case OP_ENDFILTER: {
2962 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2964 if (ins->sreg1 != ARMREG_R0)
2965 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2966 if (arm_is_imm12 (spvar->inst_offset)) {
2967 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
2969 g_assert (ARMREG_IP != spvar->inst_basereg);
2970 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2971 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
2973 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2976 case OP_ENDFINALLY: {
2977 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2979 if (arm_is_imm12 (spvar->inst_offset)) {
2980 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
2982 g_assert (ARMREG_IP != spvar->inst_basereg);
2983 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2984 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
2986 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2989 case OP_CALL_HANDLER:
2990 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2994 ins->inst_c0 = code - cfg->native_code;
2997 if (ins->flags & MONO_INST_BRLABEL) {
2998 /*if (ins->inst_i0->inst_c0) {
3000 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
3002 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3006 /*if (ins->inst_target_bb->native_offset) {
3008 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3010 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3016 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3020 * In the normal case we have:
3021 * ldr pc, [pc, ins->sreg1 << 2]
3024 * ldr lr, [pc, ins->sreg1 << 2]
3026 * After follows the data.
3027 * FIXME: add aot support.
3029 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3030 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3031 if (offset > (cfg->code_size - max_len - 16)) {
3032 cfg->code_size += max_len;
3033 cfg->code_size *= 2;
3034 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3035 code = cfg->native_code + offset;
3037 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3039 code += 4 * GPOINTER_TO_INT (ins->klass);
3043 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3044 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3048 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3049 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3053 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3054 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3058 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3059 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3063 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3064 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3066 case OP_COND_EXC_EQ:
3067 case OP_COND_EXC_NE_UN:
3068 case OP_COND_EXC_LT:
3069 case OP_COND_EXC_LT_UN:
3070 case OP_COND_EXC_GT:
3071 case OP_COND_EXC_GT_UN:
3072 case OP_COND_EXC_GE:
3073 case OP_COND_EXC_GE_UN:
3074 case OP_COND_EXC_LE:
3075 case OP_COND_EXC_LE_UN:
3076 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3078 case OP_COND_EXC_IEQ:
3079 case OP_COND_EXC_INE_UN:
3080 case OP_COND_EXC_ILT:
3081 case OP_COND_EXC_ILT_UN:
3082 case OP_COND_EXC_IGT:
3083 case OP_COND_EXC_IGT_UN:
3084 case OP_COND_EXC_IGE:
3085 case OP_COND_EXC_IGE_UN:
3086 case OP_COND_EXC_ILE:
3087 case OP_COND_EXC_ILE_UN:
3088 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3091 case OP_COND_EXC_OV:
3092 case OP_COND_EXC_NC:
3093 case OP_COND_EXC_NO:
3094 case OP_COND_EXC_IC:
3095 case OP_COND_EXC_IOV:
3096 case OP_COND_EXC_INC:
3097 case OP_COND_EXC_INO:
3110 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3113 /* floating point opcodes */
3116 if (cfg->compile_aot) {
3117 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3119 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3121 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3124 /* FIXME: we can optimize the imm load by dealing with part of
3125 * the displacement in LDFD (aligning to 512).
3127 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3128 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3132 if (cfg->compile_aot) {
3133 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3135 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3138 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3139 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3142 case OP_STORER8_MEMBASE_REG:
3143 /* This is generated by the local regalloc pass which runs after the lowering pass */
3144 if (!arm_is_fpimm8 (ins->inst_offset)) {
3145 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3146 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3148 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3151 case OP_LOADR8_MEMBASE:
3152 /* This is generated by the local regalloc pass which runs after the lowering pass */
3153 if (!arm_is_fpimm8 (ins->inst_offset)) {
3154 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3155 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3157 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3160 case OP_STORER4_MEMBASE_REG:
3161 g_assert (arm_is_fpimm8 (ins->inst_offset));
3162 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3164 case OP_LOADR4_MEMBASE:
3165 g_assert (arm_is_fpimm8 (ins->inst_offset));
3166 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3168 case OP_ICONV_TO_R_UN: {
3170 tmpreg = ins->dreg == 0? 1: 0;
3171 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3172 ARM_FLTD (code, ins->dreg, ins->sreg1);
3173 ARM_B_COND (code, ARMCOND_GE, 8);
3174 /* save the temp register */
3175 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3176 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3177 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3178 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3179 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3180 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3181 /* skip the constant pool */
3184 *(int*)code = 0x41f00000;
3189 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3190 * adfltd fdest, fdest, ftemp
3194 case OP_ICONV_TO_R4:
3195 ARM_FLTS (code, ins->dreg, ins->sreg1);
3197 case OP_ICONV_TO_R8:
3198 ARM_FLTD (code, ins->dreg, ins->sreg1);
3200 #elif defined(ARM_FPU_VFP)
3202 if (cfg->compile_aot) {
3203 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3205 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3207 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3210 /* FIXME: we can optimize the imm load by dealing with part of
3211 * the displacement in LDFD (aligning to 512).
3213 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3214 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3218 if (cfg->compile_aot) {
3219 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3221 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3223 ARM_CVTS (code, ins->dreg, ins->dreg);
3225 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3226 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3227 ARM_CVTS (code, ins->dreg, ins->dreg);
3230 case OP_STORER8_MEMBASE_REG:
3231 g_assert (arm_is_fpimm8 (ins->inst_offset));
3232 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3234 case OP_LOADR8_MEMBASE:
3235 g_assert (arm_is_fpimm8 (ins->inst_offset));
3236 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3238 case OP_STORER4_MEMBASE_REG:
3239 g_assert (arm_is_fpimm8 (ins->inst_offset));
3240 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3242 case OP_LOADR4_MEMBASE:
3243 g_assert (arm_is_fpimm8 (ins->inst_offset));
3244 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3246 case OP_ICONV_TO_R_UN: {
3247 g_assert_not_reached ();
3250 case OP_ICONV_TO_R4:
3251 g_assert_not_reached ();
3252 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3254 case OP_ICONV_TO_R8:
3255 g_assert_not_reached ();
3256 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3259 case OP_FCONV_TO_I1:
3260 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3262 case OP_FCONV_TO_U1:
3263 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3265 case OP_FCONV_TO_I2:
3266 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3268 case OP_FCONV_TO_U2:
3269 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3271 case OP_FCONV_TO_I4:
3273 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3275 case OP_FCONV_TO_U4:
3277 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3279 case OP_FCONV_TO_I8:
3280 case OP_FCONV_TO_U8:
3281 g_assert_not_reached ();
3282 /* Implemented as helper calls */
3284 case OP_LCONV_TO_R_UN:
3285 g_assert_not_reached ();
3286 /* Implemented as helper calls */
3288 case OP_LCONV_TO_OVF_I:
3289 case OP_LCONV_TO_OVF_I4_2: {
3290 guint32 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3292 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3295 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3296 high_bit_not_set = code;
3297 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3299 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3300 valid_negative = code;
3301 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3302 invalid_negative = code;
3303 ARM_B_COND (code, ARMCOND_AL, 0);
3305 arm_patch (high_bit_not_set, code);
3307 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3308 valid_positive = code;
3309 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3311 arm_patch (invalid_negative, code);
3312 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3314 arm_patch (valid_negative, code);
3315 arm_patch (valid_positive, code);
3317 if (ins->dreg != ins->sreg1)
3318 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3323 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3326 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3329 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3332 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3335 ARM_MNFD (code, ins->dreg, ins->sreg1);
3337 #elif defined(ARM_FPU_VFP)
3339 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3342 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3345 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3348 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3351 ARM_NEGD (code, ins->dreg, ins->sreg1);
3356 g_assert_not_reached ();
3360 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3361 #elif defined(ARM_FPU_VFP)
3362 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3367 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3368 #elif defined(ARM_FPU_VFP)
3369 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3371 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3372 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3376 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3377 #elif defined(ARM_FPU_VFP)
3378 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3380 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3381 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3385 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3386 #elif defined(ARM_FPU_VFP)
3387 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3389 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3390 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3391 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3396 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3397 #elif defined(ARM_FPU_VFP)
3398 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3400 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3401 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3406 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3407 #elif defined(ARM_FPU_VFP)
3408 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3410 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3411 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3412 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3414 /* ARM FPA flags table:
3415 * N Less than ARMCOND_MI
3416 * Z Equal ARMCOND_EQ
3417 * C Greater Than or Equal ARMCOND_CS
3418 * V Unordered ARMCOND_VS
3421 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3424 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3427 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3430 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3431 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3437 g_assert_not_reached ();
3440 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3443 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3444 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3449 if (ins->dreg != ins->sreg1)
3450 ARM_MVFD (code, ins->dreg, ins->sreg1);
3452 g_assert_not_reached ();
3457 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3458 g_assert_not_reached ();
3461 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3462 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3463 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3464 g_assert_not_reached ();
3470 last_offset = offset;
3473 cfg->code_len = code - cfg->native_code;
3476 #endif /* DISABLE_JIT */
3478 #if defined(__ARM_EABI__) && defined(__linux__)
3479 void __aeabi_read_tp (void);
3483 mono_arch_register_lowlevel_calls (void)
3485 /* The signature doesn't matter */
3486 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
3488 #if defined(__ARM_EABI__) && defined(__linux__)
3489 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
3493 #define patch_lis_ori(ip,val) do {\
3494 guint16 *__lis_ori = (guint16*)(ip); \
3495 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3496 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3500 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3502 MonoJumpInfo *patch_info;
3503 gboolean compile_aot = !run_cctors;
3505 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3506 unsigned char *ip = patch_info->ip.i + code;
3507 const unsigned char *target;
3509 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3510 gpointer *jt = (gpointer*)(ip + 8);
3512 /* jt is the inlined jump table, 2 instructions after ip
3513 * In the normal case we store the absolute addresses,
3514 * otherwise the displacements.
3516 for (i = 0; i < patch_info->data.table->table_size; i++)
3517 jt [i] = code + (int)patch_info->data.table->table [i];
3520 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3523 switch (patch_info->type) {
3524 case MONO_PATCH_INFO_BB:
3525 case MONO_PATCH_INFO_LABEL:
3528 /* No need to patch these */
3533 switch (patch_info->type) {
3534 case MONO_PATCH_INFO_IP:
3535 g_assert_not_reached ();
3536 patch_lis_ori (ip, ip);
3538 case MONO_PATCH_INFO_METHOD_REL:
3539 g_assert_not_reached ();
3540 *((gpointer *)(ip)) = code + patch_info->data.offset;
3542 case MONO_PATCH_INFO_METHODCONST:
3543 case MONO_PATCH_INFO_CLASS:
3544 case MONO_PATCH_INFO_IMAGE:
3545 case MONO_PATCH_INFO_FIELD:
3546 case MONO_PATCH_INFO_VTABLE:
3547 case MONO_PATCH_INFO_IID:
3548 case MONO_PATCH_INFO_SFLDA:
3549 case MONO_PATCH_INFO_LDSTR:
3550 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3551 case MONO_PATCH_INFO_LDTOKEN:
3552 g_assert_not_reached ();
3553 /* from OP_AOTCONST : lis + ori */
3554 patch_lis_ori (ip, target);
3556 case MONO_PATCH_INFO_R4:
3557 case MONO_PATCH_INFO_R8:
3558 g_assert_not_reached ();
3559 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3561 case MONO_PATCH_INFO_EXC_NAME:
3562 g_assert_not_reached ();
3563 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3565 case MONO_PATCH_INFO_NONE:
3566 case MONO_PATCH_INFO_BB_OVF:
3567 case MONO_PATCH_INFO_EXC_OVF:
3568 /* everything is dealt with at epilog output time */
3573 arm_patch (ip, target);
3578 * Stack frame layout:
3580 * ------------------- fp
3581 * MonoLMF structure or saved registers
3582 * -------------------
3584 * -------------------
3586 * -------------------
3587 * optional 8 bytes for tracing
3588 * -------------------
3589 * param area size is cfg->param_area
3590 * ------------------- sp
3593 mono_arch_emit_prolog (MonoCompile *cfg)
3595 MonoMethod *method = cfg->method;
3597 MonoMethodSignature *sig;
3599 int alloc_size, pos, max_offset, i, rot_amount;
3606 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3609 sig = mono_method_signature (method);
3610 cfg->code_size = 256 + sig->param_count * 20;
3611 code = cfg->native_code = g_malloc (cfg->code_size);
3613 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3615 alloc_size = cfg->stack_offset;
3618 if (!method->save_lmf) {
3619 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3620 prev_sp_offset = 8; /* ip and lr */
3621 for (i = 0; i < 16; ++i) {
3622 if (cfg->used_int_regs & (1 << i))
3623 prev_sp_offset += 4;
3626 ARM_PUSH (code, 0x5ff0);
3627 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3628 pos += sizeof (MonoLMF) - prev_sp_offset;
3632 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3633 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3634 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3635 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3638 /* the stack used in the pushed regs */
3639 if (prev_sp_offset & 4)
3641 cfg->stack_usage = alloc_size;
3643 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3644 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3646 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3647 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3650 if (cfg->frame_reg != ARMREG_SP)
3651 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3652 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3653 prev_sp_offset += alloc_size;
3655 /* compute max_offset in order to use short forward jumps
3656 * we could skip do it on arm because the immediate displacement
3657 * for jumps is large enough, it may be useful later for constant pools
3660 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3661 MonoInst *ins = bb->code;
3662 bb->max_offset = max_offset;
3664 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3667 MONO_BB_FOR_EACH_INS (bb, ins)
3668 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3671 /* store runtime generic context */
3672 if (cfg->rgctx_var) {
3673 MonoInst *ins = cfg->rgctx_var;
3675 g_assert (ins->opcode == OP_REGOFFSET);
3677 if (arm_is_imm12 (ins->inst_offset)) {
3678 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
3680 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3681 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
3685 /* load arguments allocated to register from the stack */
3688 cinfo = calculate_sizes (sig, sig->pinvoke);
3690 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3691 ArgInfo *ainfo = &cinfo->ret;
3692 inst = cfg->vret_addr;
3693 g_assert (arm_is_imm12 (inst->inst_offset));
3694 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3696 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3697 ArgInfo *ainfo = cinfo->args + i;
3698 inst = cfg->args [pos];
3700 if (cfg->verbose_level > 2)
3701 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3702 if (inst->opcode == OP_REGVAR) {
3703 if (ainfo->regtype == RegTypeGeneral)
3704 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3705 else if (ainfo->regtype == RegTypeFP) {
3706 g_assert_not_reached ();
3707 } else if (ainfo->regtype == RegTypeBase) {
3708 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3709 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3711 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3712 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3715 g_assert_not_reached ();
3717 if (cfg->verbose_level > 2)
3718 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3720 /* the argument should be put on the stack: FIXME handle size != word */
3721 if (ainfo->regtype == RegTypeGeneral) {
3722 switch (ainfo->size) {
3724 if (arm_is_imm12 (inst->inst_offset))
3725 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3727 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3728 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3732 if (arm_is_imm8 (inst->inst_offset)) {
3733 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3735 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3736 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3740 g_assert (arm_is_imm12 (inst->inst_offset));
3741 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3742 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3743 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3746 if (arm_is_imm12 (inst->inst_offset)) {
3747 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3749 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3750 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3754 } else if (ainfo->regtype == RegTypeBaseGen) {
3755 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3756 g_assert (arm_is_imm12 (inst->inst_offset));
3757 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3758 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3759 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3760 } else if (ainfo->regtype == RegTypeBase) {
3761 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3762 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3764 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3765 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3768 switch (ainfo->size) {
3770 if (arm_is_imm8 (inst->inst_offset)) {
3771 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3773 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3774 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3778 if (arm_is_imm8 (inst->inst_offset)) {
3779 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3781 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3782 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3786 if (arm_is_imm12 (inst->inst_offset)) {
3787 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3789 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3790 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3792 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3793 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3795 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3796 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3798 if (arm_is_imm12 (inst->inst_offset + 4)) {
3799 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3801 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3802 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3806 if (arm_is_imm12 (inst->inst_offset)) {
3807 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3809 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3810 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3814 } else if (ainfo->regtype == RegTypeFP) {
3815 g_assert_not_reached ();
3816 } else if (ainfo->regtype == RegTypeStructByVal) {
3817 int doffset = inst->inst_offset;
3821 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
3822 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3823 if (arm_is_imm12 (doffset)) {
3824 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3826 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3827 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3829 soffset += sizeof (gpointer);
3830 doffset += sizeof (gpointer);
3832 if (ainfo->vtsize) {
3833 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3834 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3835 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3837 } else if (ainfo->regtype == RegTypeStructByAddr) {
3838 g_assert_not_reached ();
3839 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3840 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3842 g_assert_not_reached ();
3847 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
3848 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
3849 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3850 (gpointer)"mono_jit_thread_attach");
3851 code = emit_call_seq (cfg, code);
3854 if (method->save_lmf) {
3855 gboolean get_lmf_fast = FALSE;
3857 #if defined(__ARM_EABI__) && defined(__linux__)
3858 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
3860 if (lmf_addr_tls_offset != -1) {
3861 get_lmf_fast = TRUE;
3863 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3864 (gpointer)"__aeabi_read_tp");
3865 code = emit_call_seq (cfg, code);
3867 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
3868 get_lmf_fast = TRUE;
3871 if (!get_lmf_fast) {
3872 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3873 (gpointer)"mono_get_lmf_addr");
3874 code = emit_call_seq (cfg, code);
3876 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3877 /* lmf_offset is the offset from the previous stack pointer,
3878 * alloc_size is the total stack space allocated, so the offset
3879 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3880 * The pointer to the struct is put in r1 (new_lmf).
3881 * r2 is used as scratch
3882 * The callee-saved registers are already in the MonoLMF structure
3884 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3885 /* r0 is the result from mono_get_lmf_addr () */
3886 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3887 /* new_lmf->previous_lmf = *lmf_addr */
3888 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3889 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3890 /* *(lmf_addr) = r1 */
3891 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3892 /* Skip method (only needed for trampoline LMF frames) */
3893 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3894 /* save the current IP */
3895 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3896 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3900 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3902 cfg->code_len = code - cfg->native_code;
3903 g_assert (cfg->code_len < cfg->code_size);
3910 mono_arch_emit_epilog (MonoCompile *cfg)
3912 MonoMethod *method = cfg->method;
3913 int pos, i, rot_amount;
3914 int max_epilog_size = 16 + 20*4;
3917 if (cfg->method->save_lmf)
3918 max_epilog_size += 128;
3920 if (mono_jit_trace_calls != NULL)
3921 max_epilog_size += 50;
3923 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3924 max_epilog_size += 50;
3926 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3927 cfg->code_size *= 2;
3928 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3929 mono_jit_stats.code_reallocs++;
3933 * Keep in sync with OP_JMP
3935 code = cfg->native_code + cfg->code_len;
3937 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
3938 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3942 if (method->save_lmf) {
3944 /* all but r0-r3, sp and pc */
3945 pos += sizeof (MonoLMF) - (4 * 10);
3947 /* r2 contains the pointer to the current LMF */
3948 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
3949 /* ip = previous_lmf */
3950 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3952 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3953 /* *(lmf_addr) = previous_lmf */
3954 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3955 /* FIXME: speedup: there is no actual need to restore the registers if
3956 * we didn't actually change them (idea from Zoltan).
3959 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
3960 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
3961 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
3963 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
3964 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
3966 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
3967 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3969 /* FIXME: add v4 thumb interworking support */
3970 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
3973 cfg->code_len = code - cfg->native_code;
3975 g_assert (cfg->code_len < cfg->code_size);
3979 /* remove once throw_exception_by_name is eliminated */
3981 exception_id_by_name (const char *name)
3983 if (strcmp (name, "IndexOutOfRangeException") == 0)
3984 return MONO_EXC_INDEX_OUT_OF_RANGE;
3985 if (strcmp (name, "OverflowException") == 0)
3986 return MONO_EXC_OVERFLOW;
3987 if (strcmp (name, "ArithmeticException") == 0)
3988 return MONO_EXC_ARITHMETIC;
3989 if (strcmp (name, "DivideByZeroException") == 0)
3990 return MONO_EXC_DIVIDE_BY_ZERO;
3991 if (strcmp (name, "InvalidCastException") == 0)
3992 return MONO_EXC_INVALID_CAST;
3993 if (strcmp (name, "NullReferenceException") == 0)
3994 return MONO_EXC_NULL_REF;
3995 if (strcmp (name, "ArrayTypeMismatchException") == 0)
3996 return MONO_EXC_ARRAY_TYPE_MISMATCH;
3997 g_error ("Unknown intrinsic exception %s\n", name);
4002 mono_arch_emit_exceptions (MonoCompile *cfg)
4004 MonoJumpInfo *patch_info;
4007 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4008 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4009 int max_epilog_size = 50;
4011 /* count the number of exception infos */
4014 * make sure we have enough space for exceptions
4016 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4017 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4018 i = exception_id_by_name (patch_info->data.target);
4019 if (!exc_throw_found [i]) {
4020 max_epilog_size += 32;
4021 exc_throw_found [i] = TRUE;
4026 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4027 cfg->code_size *= 2;
4028 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4029 mono_jit_stats.code_reallocs++;
4032 code = cfg->native_code + cfg->code_len;
4034 /* add code to raise exceptions */
4035 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4036 switch (patch_info->type) {
4037 case MONO_PATCH_INFO_EXC: {
4038 MonoClass *exc_class;
4039 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4041 i = exception_id_by_name (patch_info->data.target);
4042 if (exc_throw_pos [i]) {
4043 arm_patch (ip, exc_throw_pos [i]);
4044 patch_info->type = MONO_PATCH_INFO_NONE;
4047 exc_throw_pos [i] = code;
4049 arm_patch (ip, code);
4051 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4052 g_assert (exc_class);
4054 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4055 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4056 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4057 patch_info->data.name = "mono_arch_throw_corlib_exception";
4058 patch_info->ip.i = code - cfg->native_code;
4060 *(guint32*)(gpointer)code = exc_class->type_token;
4070 cfg->code_len = code - cfg->native_code;
4072 g_assert (cfg->code_len < cfg->code_size);
4076 static gboolean tls_offset_inited = FALSE;
4079 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4081 if (!tls_offset_inited) {
4082 tls_offset_inited = TRUE;
4084 lmf_tls_offset = mono_get_lmf_tls_offset ();
4085 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4090 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4095 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4102 mono_arch_print_tree (MonoInst *tree, int arity)
4108 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4110 return mono_get_domain_intrinsic (cfg);
4114 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4116 return mono_get_thread_intrinsic (cfg);
4120 mono_arch_get_patch_offset (guint8 *code)
4127 mono_arch_flush_register_windows (void)
4132 mono_arch_fixup_jinfo (MonoCompile *cfg)
4134 /* max encoded stack usage is 64KB * 4 */
4135 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4136 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4139 #ifdef MONO_ARCH_HAVE_IMT
4142 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4144 if (cfg->compile_aot) {
4145 int method_reg = mono_alloc_ireg (cfg);
4148 call->dynamic_imt_arg = TRUE;
4150 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4151 ins->dreg = method_reg;
4152 ins->inst_p0 = call->method;
4153 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4154 MONO_ADD_INS (cfg->cbb, ins);
4156 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4157 } else if (cfg->generic_context) {
4159 /* Always pass in a register for simplicity */
4160 call->dynamic_imt_arg = TRUE;
4162 cfg->uses_rgctx_reg = TRUE;
4165 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4168 int method_reg = mono_alloc_preg (cfg);
4170 MONO_INST_NEW (cfg, ins, OP_PCONST);
4171 ins->inst_p0 = call->method;
4172 ins->dreg = method_reg;
4173 MONO_ADD_INS (cfg->cbb, ins);
4175 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4181 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4183 guint32 *code_ptr = (guint32*)code;
4185 /* The IMT value is stored in the code stream right after the LDC instruction. */
4186 if (!IS_LDR_PC (code_ptr [0])) {
4187 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4188 g_assert (IS_LDR_PC (code_ptr [0]));
4190 if (code_ptr [1] == 0)
4191 /* This is AOTed code, the IMT method is in V5 */
4192 return (MonoMethod*)regs [ARMREG_V5];
4194 return (MonoMethod*) code_ptr [1];
4198 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4200 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4204 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4206 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4209 #define ENABLE_WRONG_METHOD_CHECK 0
4210 #define BASE_SIZE (6 * 4)
4211 #define BSEARCH_ENTRY_SIZE (4 * 4)
4212 #define CMP_SIZE (3 * 4)
4213 #define BRANCH_SIZE (1 * 4)
4214 #define CALL_SIZE (2 * 4)
4215 #define WMC_SIZE (5 * 4)
4216 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4219 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4221 guint32 delta = DISTANCE (target, code);
4223 g_assert (delta >= 0 && delta <= 0xFFF);
4224 *target = *target | delta;
4230 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4231 gpointer fail_tramp)
4233 int size, i, extra_space = 0;
4234 arminstr_t *code, *start, *vtable_target = NULL;
4237 g_assert (!fail_tramp);
4239 for (i = 0; i < count; ++i) {
4240 MonoIMTCheckItem *item = imt_entries [i];
4241 if (item->is_equals) {
4242 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot])));
4244 if (item->check_target_idx) {
4245 if (!item->compare_done)
4246 item->chunk_size += CMP_SIZE;
4247 item->chunk_size += BRANCH_SIZE;
4249 #if ENABLE_WRONG_METHOD_CHECK
4250 item->chunk_size += WMC_SIZE;
4253 item->chunk_size += CALL_SIZE;
4255 item->chunk_size += BSEARCH_ENTRY_SIZE;
4256 imt_entries [item->check_target_idx]->compare_done = TRUE;
4258 size += item->chunk_size;
4261 start = code = mono_code_manager_reserve (domain->code_mp, size);
4264 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4265 for (i = 0; i < count; ++i) {
4266 MonoIMTCheckItem *item = imt_entries [i];
4267 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
4271 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4272 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4273 vtable_target = code;
4274 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4276 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4277 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4278 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4280 for (i = 0; i < count; ++i) {
4281 MonoIMTCheckItem *item = imt_entries [i];
4282 arminstr_t *imt_method = NULL;
4283 item->code_target = (guint8*)code;
4285 if (item->is_equals) {
4286 if (item->check_target_idx) {
4287 if (!item->compare_done) {
4289 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4290 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4292 item->jmp_code = (guint8*)code;
4293 ARM_B_COND (code, ARMCOND_NE, 0);
4295 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4296 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
4298 /*Enable the commented code to assert on wrong method*/
4299 #if ENABLE_WRONG_METHOD_CHECK
4301 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4302 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4303 ARM_B_COND (code, ARMCOND_NE, 1);
4305 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4306 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
4308 #if ENABLE_WRONG_METHOD_CHECK
4314 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
4316 /*must emit after unconditional branch*/
4317 if (vtable_target) {
4318 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4319 item->chunk_size += 4;
4320 vtable_target = NULL;
4323 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4325 code += extra_space;
4329 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4330 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4332 item->jmp_code = (guint8*)code;
4333 ARM_B_COND (code, ARMCOND_GE, 0);
4338 for (i = 0; i < count; ++i) {
4339 MonoIMTCheckItem *item = imt_entries [i];
4340 if (item->jmp_code) {
4341 if (item->check_target_idx)
4342 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4344 if (i > 0 && item->is_equals) {
4346 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4347 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4348 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
4355 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4356 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4361 mono_arch_flush_icache ((guint8*)start, size);
4362 mono_stats.imt_thunks_size += code - start;
4364 g_assert (DISTANCE (start, code) <= size);
4371 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4373 return ctx->regs [reg];