2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 /* This mutex protects architecture specific caches */
27 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
28 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
29 static CRITICAL_SECTION mini_arch_mutex;
31 static int v5_supported = 0;
32 static int thumb_supported = 0;
36 * floating point support: on ARM it is a mess, there are at least 3
37 * different setups, each of which binary incompat with the other.
38 * 1) FPA: old and ugly, but unfortunately what current distros use
39 * the double binary format has the two words swapped. 8 double registers.
40 * Implemented usually by kernel emulation.
41 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
42 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
43 * 3) VFP: the new and actually sensible and useful FP support. Implemented
44 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
46 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
48 int mono_exc_esp_offset = 0;
50 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
51 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
52 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
54 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
55 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
56 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
58 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
59 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
63 mono_arch_regname (int reg)
65 static const char * rnames[] = {
66 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
67 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
68 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
71 if (reg >= 0 && reg < 16)
77 mono_arch_fregname (int reg)
79 static const char * rnames[] = {
80 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
81 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
82 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
83 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
84 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
85 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
88 if (reg >= 0 && reg < 32)
94 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
97 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
98 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
101 g_assert (dreg != sreg);
102 code = mono_arm_emit_load_imm (code, dreg, imm);
103 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
108 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
110 /* we can use r0-r3, since this is called only for incoming args on the stack */
111 if (size > sizeof (gpointer) * 4) {
113 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
114 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
115 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
116 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
117 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
118 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
119 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
120 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
121 ARM_B_COND (code, ARMCOND_NE, 0);
122 arm_patch (code - 4, start_loop);
125 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
126 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
128 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
129 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
135 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
136 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
137 doffset = soffset = 0;
139 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
140 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
146 g_assert (size == 0);
151 emit_call_reg (guint8 *code, int reg)
154 ARM_BLX_REG (code, reg);
156 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
160 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
166 emit_call_seq (MonoCompile *cfg, guint8 *code)
168 if (cfg->method->dynamic) {
169 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
171 *(gpointer*)code = NULL;
173 code = emit_call_reg (code, ARMREG_IP);
181 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
183 switch (ins->opcode) {
186 case OP_FCALL_MEMBASE:
188 if (ins->dreg != ARM_FPA_F0)
189 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
198 * mono_arch_get_argument_info:
199 * @csig: a method signature
200 * @param_count: the number of parameters to consider
201 * @arg_info: an array to store the result infos
203 * Gathers information on parameters such as size, alignment and
204 * padding. arg_info should be large enought to hold param_count + 1 entries.
206 * Returns the size of the activation frame.
209 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
211 int k, frame_size = 0;
212 guint32 size, align, pad;
215 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
216 frame_size += sizeof (gpointer);
220 arg_info [0].offset = offset;
223 frame_size += sizeof (gpointer);
227 arg_info [0].size = frame_size;
229 for (k = 0; k < param_count; k++) {
230 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
232 /* ignore alignment for now */
235 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
236 arg_info [k].pad = pad;
238 arg_info [k + 1].pad = 0;
239 arg_info [k + 1].size = size;
241 arg_info [k + 1].offset = offset;
245 align = MONO_ARCH_FRAME_ALIGNMENT;
246 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
247 arg_info [k].pad = pad;
253 decode_vcall_slot_from_ldr (guint32 ldr, gpointer *regs, int *displacement)
257 reg = (ldr >> 16 ) & 0xf;
258 offset = ldr & 0xfff;
259 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
261 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
264 *displacement = offset;
269 mono_arch_get_vcall_slot (guint8 *code_ptr, gpointer *regs, int *displacement)
271 guint32* code = (guint32*)code_ptr;
273 /* Locate the address of the method-specific trampoline. The call using
274 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
275 looks something like this:
284 The call sequence could be also:
287 function pointer literal
291 Note that on ARM5+ we can use one instruction instead of the last two.
292 Therefore, we need to locate the 'ldr rA' instruction to know which
293 register was used to hold the method addrs.
296 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
299 /* Three possible code sequences can happen here:
303 * ldr pc, [rX - #offset]
309 * ldr pc, [rX - #offset]
311 * direct branch with bl:
315 * direct branch with mov:
319 * We only need to identify interface and virtual calls, the others can be ignored.
322 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
323 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
325 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
326 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
332 mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
336 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
339 return (gpointer*)((char*)vt + displacement);
342 #define MAX_ARCH_DELEGATE_PARAMS 3
345 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
347 guint8 *code, *start;
349 /* FIXME: Support more cases */
350 if (MONO_TYPE_ISSTRUCT (sig->ret))
354 static guint8* cached = NULL;
355 mono_mini_arch_lock ();
357 mono_mini_arch_unlock ();
361 start = code = mono_global_codeman_reserve (12);
363 /* Replace the this argument with the target */
364 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
365 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
366 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
368 g_assert ((code - start) <= 12);
370 mono_arch_flush_icache (code, 12);
372 mono_mini_arch_unlock ();
375 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
378 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
380 for (i = 0; i < sig->param_count; ++i)
381 if (!mono_is_regsize_var (sig->params [i]))
384 mono_mini_arch_lock ();
385 code = cache [sig->param_count];
387 mono_mini_arch_unlock ();
391 size = 8 + sig->param_count * 4;
392 start = code = mono_global_codeman_reserve (size);
394 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
395 /* slide down the arguments */
396 for (i = 0; i < sig->param_count; ++i) {
397 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
399 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
401 g_assert ((code - start) <= size);
403 mono_arch_flush_icache (code, size);
404 cache [sig->param_count] = start;
405 mono_mini_arch_unlock ();
413 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
415 /* FIXME: handle returning a struct */
416 if (MONO_TYPE_ISSTRUCT (sig->ret))
417 return (gpointer)regs [ARMREG_R1];
418 return (gpointer)regs [ARMREG_R0];
422 * Initialize the cpu to execute managed code.
425 mono_arch_cpu_init (void)
430 * Initialize architecture specific code.
433 mono_arch_init (void)
435 InitializeCriticalSection (&mini_arch_mutex);
439 * Cleanup architecture specific code.
442 mono_arch_cleanup (void)
447 * This function returns the optimizations supported on this cpu.
450 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
454 thumb_supported = TRUE;
459 FILE *file = fopen ("/proc/cpuinfo", "r");
461 while ((line = fgets (buf, 512, file))) {
462 if (strncmp (line, "Processor", 9) == 0) {
463 char *ver = strstr (line, "(v");
464 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
469 if (strncmp (line, "Features", 8) == 0) {
470 char *th = strstr (line, "thumb");
472 thumb_supported = TRUE;
480 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
484 /* no arm-specific optimizations yet */
490 is_regsize_var (MonoType *t) {
493 t = mini_type_get_underlying_type (NULL, t);
500 case MONO_TYPE_FNPTR:
502 case MONO_TYPE_OBJECT:
503 case MONO_TYPE_STRING:
504 case MONO_TYPE_CLASS:
505 case MONO_TYPE_SZARRAY:
506 case MONO_TYPE_ARRAY:
508 case MONO_TYPE_GENERICINST:
509 if (!mono_type_generic_inst_is_valuetype (t))
512 case MONO_TYPE_VALUETYPE:
519 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
524 for (i = 0; i < cfg->num_varinfo; i++) {
525 MonoInst *ins = cfg->varinfo [i];
526 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
529 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
532 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
535 /* we can only allocate 32 bit values */
536 if (is_regsize_var (ins->inst_vtype)) {
537 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
538 g_assert (i == vmv->idx);
539 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
546 #define USE_EXTRA_TEMPS 0
549 mono_arch_get_global_int_regs (MonoCompile *cfg)
552 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
553 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
554 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
555 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
556 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
557 /* V5 is reserved for passing the vtable/rgctx/IMT method */
558 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
559 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
560 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
566 * mono_arch_regalloc_cost:
568 * Return the cost, in number of memory references, of the action of
569 * allocating the variable VMV into a register during global register
573 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
580 mono_arch_flush_icache (guint8 *code, gint size)
583 sys_icache_invalidate (code, size);
585 __asm __volatile ("mov r0, %0\n"
588 "swi 0x9f0002 @ sys_cacheflush"
590 : "r" (code), "r" (code + size), "r" (0)
591 : "r0", "r1", "r3" );
606 guint16 vtsize; /* in param area */
608 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
609 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
624 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
627 if (*gr > ARMREG_R3) {
628 ainfo->offset = *stack_size;
629 ainfo->reg = ARMREG_SP; /* in the caller */
630 ainfo->regtype = RegTypeBase;
641 /* first word in r3 and the second on the stack */
642 ainfo->offset = *stack_size;
643 ainfo->reg = ARMREG_SP; /* in the caller */
644 ainfo->regtype = RegTypeBaseGen;
646 } else if (*gr >= ARMREG_R3) {
651 ainfo->offset = *stack_size;
652 ainfo->reg = ARMREG_SP; /* in the caller */
653 ainfo->regtype = RegTypeBase;
668 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
671 int n = sig->hasthis + sig->param_count;
672 MonoType *simpletype;
673 guint32 stack_size = 0;
674 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
678 /* FIXME: handle returning a struct */
679 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
680 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
681 cinfo->struct_ret = ARMREG_R0;
686 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
689 DEBUG(printf("params: %d\n", sig->param_count));
690 for (i = 0; i < sig->param_count; ++i) {
691 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
692 /* Prevent implicit arguments and sig_cookie from
693 being passed in registers */
695 /* Emit the signature cookie just before the implicit arguments */
696 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
698 DEBUG(printf("param %d: ", i));
699 if (sig->params [i]->byref) {
700 DEBUG(printf("byref\n"));
701 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
705 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
706 switch (simpletype->type) {
707 case MONO_TYPE_BOOLEAN:
710 cinfo->args [n].size = 1;
711 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
717 cinfo->args [n].size = 2;
718 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
723 cinfo->args [n].size = 4;
724 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
730 case MONO_TYPE_FNPTR:
731 case MONO_TYPE_CLASS:
732 case MONO_TYPE_OBJECT:
733 case MONO_TYPE_STRING:
734 case MONO_TYPE_SZARRAY:
735 case MONO_TYPE_ARRAY:
737 cinfo->args [n].size = sizeof (gpointer);
738 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
741 case MONO_TYPE_GENERICINST:
742 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
743 cinfo->args [n].size = sizeof (gpointer);
744 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
749 case MONO_TYPE_TYPEDBYREF:
750 case MONO_TYPE_VALUETYPE: {
755 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
756 size = sizeof (MonoTypedRef);
758 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
760 size = mono_class_native_size (klass, NULL);
762 size = mono_class_value_size (klass, NULL);
764 DEBUG(printf ("load %d bytes struct\n",
765 mono_class_native_size (sig->params [i]->data.klass, NULL)));
768 align_size += (sizeof (gpointer) - 1);
769 align_size &= ~(sizeof (gpointer) - 1);
770 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
771 cinfo->args [n].regtype = RegTypeStructByVal;
772 /* FIXME: align gr and stack_size if needed */
773 if (gr > ARMREG_R3) {
774 cinfo->args [n].size = 0;
775 cinfo->args [n].vtsize = nwords;
777 int rest = ARMREG_R3 - gr + 1;
778 int n_in_regs = rest >= nwords? nwords: rest;
779 cinfo->args [n].size = n_in_regs;
780 cinfo->args [n].vtsize = nwords - n_in_regs;
781 cinfo->args [n].reg = gr;
784 cinfo->args [n].offset = stack_size;
785 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
786 stack_size += nwords * sizeof (gpointer);
793 cinfo->args [n].size = 8;
794 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
798 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
803 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
804 switch (simpletype->type) {
805 case MONO_TYPE_BOOLEAN:
816 case MONO_TYPE_FNPTR:
817 case MONO_TYPE_CLASS:
818 case MONO_TYPE_OBJECT:
819 case MONO_TYPE_SZARRAY:
820 case MONO_TYPE_ARRAY:
821 case MONO_TYPE_STRING:
822 cinfo->ret.reg = ARMREG_R0;
826 cinfo->ret.reg = ARMREG_R0;
830 cinfo->ret.reg = ARMREG_R0;
831 /* FIXME: cinfo->ret.reg = ???;
832 cinfo->ret.regtype = RegTypeFP;*/
834 case MONO_TYPE_GENERICINST:
835 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
836 cinfo->ret.reg = ARMREG_R0;
840 case MONO_TYPE_VALUETYPE:
842 case MONO_TYPE_TYPEDBYREF:
846 g_error ("Can't handle as return value 0x%x", sig->ret->type);
850 /* align stack size to 8 */
851 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
852 stack_size = (stack_size + 7) & ~7;
854 cinfo->stack_usage = stack_size;
860 * Set var information according to the calling convention. arm version.
861 * The locals var stuff should most likely be split in another method.
864 mono_arch_allocate_vars (MonoCompile *cfg)
866 MonoMethodSignature *sig;
867 MonoMethodHeader *header;
869 int i, offset, size, align, curinst;
870 int frame_reg = ARMREG_FP;
872 /* FIXME: this will change when we use FP as gcc does */
873 cfg->flags |= MONO_CFG_HAS_SPILLUP;
875 /* allow room for the vararg method args: void* and long/double */
876 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
877 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
879 header = mono_method_get_header (cfg->method);
882 * We use the frame register also for any method that has
883 * exception clauses. This way, when the handlers are called,
884 * the code will reference local variables using the frame reg instead of
885 * the stack pointer: if we had to restore the stack pointer, we'd
886 * corrupt the method frames that are already on the stack (since
887 * filters get called before stack unwinding happens) when the filter
888 * code would call any method (this also applies to finally etc.).
890 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
891 frame_reg = ARMREG_FP;
892 cfg->frame_reg = frame_reg;
893 if (frame_reg != ARMREG_SP) {
894 cfg->used_int_regs |= 1 << frame_reg;
897 if (!cfg->compile_aot || cfg->uses_rgctx_reg)
898 /* V5 is reserved for passing the vtable/rgctx/IMT method */
899 cfg->used_int_regs |= (1 << ARMREG_V5);
901 sig = mono_method_signature (cfg->method);
905 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
906 /* FIXME: handle long and FP values */
907 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
911 cfg->ret->opcode = OP_REGVAR;
912 cfg->ret->inst_c0 = ARMREG_R0;
916 /* local vars are at a positive offset from the stack pointer */
918 * also note that if the function uses alloca, we use FP
919 * to point at the local variables.
921 offset = 0; /* linkage area */
922 /* align the offset to 16 bytes: not sure this is needed here */
924 //offset &= ~(8 - 1);
926 /* add parameter area size for called functions */
927 offset += cfg->param_area;
930 if (cfg->flags & MONO_CFG_HAS_FPOUT)
933 /* allow room to save the return value */
934 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
937 /* the MonoLMF structure is stored just below the stack pointer */
939 if (sig->call_convention == MONO_CALL_VARARG) {
943 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
944 inst = cfg->vret_addr;
945 offset += sizeof(gpointer) - 1;
946 offset &= ~(sizeof(gpointer) - 1);
947 inst->inst_offset = offset;
948 inst->opcode = OP_REGOFFSET;
949 inst->inst_basereg = frame_reg;
950 if (G_UNLIKELY (cfg->verbose_level > 1)) {
951 printf ("vret_addr =");
952 mono_print_ins (cfg->vret_addr);
954 offset += sizeof(gpointer);
955 if (sig->call_convention == MONO_CALL_VARARG)
956 cfg->sig_cookie += sizeof (gpointer);
959 curinst = cfg->locals_start;
960 for (i = curinst; i < cfg->num_varinfo; ++i) {
961 inst = cfg->varinfo [i];
962 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
965 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
966 * pinvoke wrappers when they call functions returning structure */
967 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
969 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
973 size = mono_type_size (inst->inst_vtype, &align);
975 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
976 * since it loads/stores misaligned words, which don't do the right thing.
978 if (align < 4 && size >= 4)
981 offset &= ~(align - 1);
982 inst->inst_offset = offset;
983 inst->opcode = OP_REGOFFSET;
984 inst->inst_basereg = frame_reg;
986 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
991 inst = cfg->args [curinst];
992 if (inst->opcode != OP_REGVAR) {
993 inst->opcode = OP_REGOFFSET;
994 inst->inst_basereg = frame_reg;
995 offset += sizeof (gpointer) - 1;
996 offset &= ~(sizeof (gpointer) - 1);
997 inst->inst_offset = offset;
998 offset += sizeof (gpointer);
999 if (sig->call_convention == MONO_CALL_VARARG)
1000 cfg->sig_cookie += sizeof (gpointer);
1005 for (i = 0; i < sig->param_count; ++i) {
1006 inst = cfg->args [curinst];
1007 if (inst->opcode != OP_REGVAR) {
1008 inst->opcode = OP_REGOFFSET;
1009 inst->inst_basereg = frame_reg;
1010 size = mono_type_size (sig->params [i], &align);
1011 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1012 * since it loads/stores misaligned words, which don't do the right thing.
1014 if (align < 4 && size >= 4)
1016 offset += align - 1;
1017 offset &= ~(align - 1);
1018 inst->inst_offset = offset;
1020 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
1021 cfg->sig_cookie += size;
1026 /* align the offset to 8 bytes */
1031 cfg->stack_offset = offset;
1035 mono_arch_create_vars (MonoCompile *cfg)
1037 MonoMethodSignature *sig;
1039 sig = mono_method_signature (cfg->method);
1041 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1042 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1043 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1044 printf ("vret_addr = ");
1045 mono_print_ins (cfg->vret_addr);
1051 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1054 MonoMethodSignature *sig;
1058 sig = call->signature;
1059 n = sig->param_count + sig->hasthis;
1061 cinfo = calculate_sizes (sig, sig->pinvoke);
1063 for (i = 0; i < n; ++i) {
1064 ArgInfo *ainfo = cinfo->args + i;
1067 if (i >= sig->hasthis)
1068 t = sig->params [i - sig->hasthis];
1070 t = &mono_defaults.int_class->byval_arg;
1071 t = mini_type_get_underlying_type (NULL, t);
1073 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1078 in = call->args [i];
1080 switch (ainfo->regtype) {
1081 case RegTypeGeneral:
1082 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1083 MONO_INST_NEW (cfg, ins, OP_MOVE);
1084 ins->dreg = mono_alloc_ireg (cfg);
1085 ins->sreg1 = in->dreg + 1;
1086 MONO_ADD_INS (cfg->cbb, ins);
1087 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1089 MONO_INST_NEW (cfg, ins, OP_MOVE);
1090 ins->dreg = mono_alloc_ireg (cfg);
1091 ins->sreg1 = in->dreg + 2;
1092 MONO_ADD_INS (cfg->cbb, ins);
1093 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1094 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1095 #ifndef MONO_ARCH_SOFT_FLOAT
1099 if (ainfo->size == 4) {
1100 #ifdef MONO_ARCH_SOFT_FLOAT
1101 /* mono_emit_call_args () have already done the r8->r4 conversion */
1102 /* The converted value is in an int vreg */
1103 MONO_INST_NEW (cfg, ins, OP_MOVE);
1104 ins->dreg = mono_alloc_ireg (cfg);
1105 ins->sreg1 = in->dreg;
1106 MONO_ADD_INS (cfg->cbb, ins);
1107 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1109 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1110 creg = mono_alloc_ireg (cfg);
1111 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1112 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1115 #ifdef MONO_ARCH_SOFT_FLOAT
1116 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1117 ins->dreg = mono_alloc_ireg (cfg);
1118 ins->sreg1 = in->dreg;
1119 MONO_ADD_INS (cfg->cbb, ins);
1120 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1122 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1123 ins->dreg = mono_alloc_ireg (cfg);
1124 ins->sreg1 = in->dreg;
1125 MONO_ADD_INS (cfg->cbb, ins);
1126 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1128 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1129 creg = mono_alloc_ireg (cfg);
1130 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1131 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1132 creg = mono_alloc_ireg (cfg);
1133 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1134 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1137 cfg->flags |= MONO_CFG_HAS_FPOUT;
1139 MONO_INST_NEW (cfg, ins, OP_MOVE);
1140 ins->dreg = mono_alloc_ireg (cfg);
1141 ins->sreg1 = in->dreg;
1142 MONO_ADD_INS (cfg->cbb, ins);
1144 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1147 case RegTypeStructByAddr:
1150 /* FIXME: where si the data allocated? */
1151 arg->backend.reg3 = ainfo->reg;
1152 call->used_iregs |= 1 << ainfo->reg;
1153 g_assert_not_reached ();
1156 case RegTypeStructByVal:
1157 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1158 ins->opcode = OP_OUTARG_VT;
1159 ins->sreg1 = in->dreg;
1160 ins->klass = in->klass;
1161 ins->inst_p0 = call;
1162 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1163 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1164 MONO_ADD_INS (cfg->cbb, ins);
1167 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1168 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1169 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1170 if (t->type == MONO_TYPE_R8) {
1171 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1173 #ifdef MONO_ARCH_SOFT_FLOAT
1174 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1176 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1180 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1183 case RegTypeBaseGen:
1184 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1185 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1186 MONO_INST_NEW (cfg, ins, OP_MOVE);
1187 ins->dreg = mono_alloc_ireg (cfg);
1188 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1189 MONO_ADD_INS (cfg->cbb, ins);
1190 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1191 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1194 #ifdef MONO_ARCH_SOFT_FLOAT
1195 g_assert_not_reached ();
1198 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1199 creg = mono_alloc_ireg (cfg);
1200 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1201 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1202 creg = mono_alloc_ireg (cfg);
1203 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1204 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1205 cfg->flags |= MONO_CFG_HAS_FPOUT;
1207 g_assert_not_reached ();
1214 arg->backend.reg3 = ainfo->reg;
1215 /* FP args are passed in int regs */
1216 call->used_iregs |= 1 << ainfo->reg;
1217 if (ainfo->size == 8) {
1218 arg->opcode = OP_OUTARG_R8;
1219 call->used_iregs |= 1 << (ainfo->reg + 1);
1221 arg->opcode = OP_OUTARG_R4;
1224 cfg->flags |= MONO_CFG_HAS_FPOUT;
1228 g_assert_not_reached ();
1232 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1235 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1236 vtarg->sreg1 = call->vret_var->dreg;
1237 vtarg->dreg = mono_alloc_preg (cfg);
1238 MONO_ADD_INS (cfg->cbb, vtarg);
1240 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1243 call->stack_usage = cinfo->stack_usage;
1249 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1251 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1252 ArgInfo *ainfo = ins->inst_p1;
1253 int ovf_size = ainfo->vtsize;
1254 int doffset = ainfo->offset;
1255 int i, soffset, dreg;
1258 for (i = 0; i < ainfo->size; ++i) {
1259 dreg = mono_alloc_ireg (cfg);
1260 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1261 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1262 soffset += sizeof (gpointer);
1264 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1266 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1270 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1272 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1275 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1278 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1279 ins->sreg1 = val->dreg + 1;
1280 ins->sreg2 = val->dreg + 2;
1281 MONO_ADD_INS (cfg->cbb, ins);
1284 #ifdef MONO_ARCH_SOFT_FLOAT
1285 if (ret->type == MONO_TYPE_R8) {
1288 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1289 ins->dreg = cfg->ret->dreg;
1290 ins->sreg1 = val->dreg;
1291 MONO_ADD_INS (cfg->cbb, ins);
1294 if (ret->type == MONO_TYPE_R4) {
1295 /* Already converted to an int in method_to_ir () */
1296 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1303 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1307 mono_arch_is_inst_imm (gint64 imm)
1313 * Allow tracing to work with this interface (with an optional argument)
1317 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1321 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1322 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1323 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1324 code = emit_call_reg (code, ARMREG_R2);
1337 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1340 int save_mode = SAVE_NONE;
1342 MonoMethod *method = cfg->method;
1343 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1344 int save_offset = cfg->param_area;
1348 offset = code - cfg->native_code;
1349 /* we need about 16 instructions */
1350 if (offset > (cfg->code_size - 16 * 4)) {
1351 cfg->code_size *= 2;
1352 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1353 code = cfg->native_code + offset;
1356 case MONO_TYPE_VOID:
1357 /* special case string .ctor icall */
1358 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1359 save_mode = SAVE_ONE;
1361 save_mode = SAVE_NONE;
1365 save_mode = SAVE_TWO;
1369 save_mode = SAVE_FP;
1371 case MONO_TYPE_VALUETYPE:
1372 save_mode = SAVE_STRUCT;
1375 save_mode = SAVE_ONE;
1379 switch (save_mode) {
1381 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1382 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1383 if (enable_arguments) {
1384 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1385 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1389 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1390 if (enable_arguments) {
1391 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1395 /* FIXME: what reg? */
1396 if (enable_arguments) {
1397 /* FIXME: what reg? */
1401 if (enable_arguments) {
1402 /* FIXME: get the actual address */
1403 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1411 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1412 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1413 code = emit_call_reg (code, ARMREG_IP);
1415 switch (save_mode) {
1417 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1418 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1421 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1435 * The immediate field for cond branches is big enough for all reasonable methods
1437 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1438 if (ins->flags & MONO_INST_BRLABEL) { \
1439 if (0 && ins->inst_i0->inst_c0) { \
1440 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1442 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1443 ARM_B_COND (code, (condcode), 0); \
1446 if (0 && ins->inst_true_bb->native_offset) { \
1447 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1449 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1450 ARM_B_COND (code, (condcode), 0); \
1454 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1456 /* emit an exception if condition is fail
1458 * We assign the extra code used to throw the implicit exceptions
1459 * to cfg->bb_exit as far as the big branch handling is concerned
1461 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1463 mono_add_patch_info (cfg, code - cfg->native_code, \
1464 MONO_PATCH_INFO_EXC, exc_name); \
1465 ARM_BL_COND (code, (condcode), 0); \
1468 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1471 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1476 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1478 MonoInst *ins, *n, *last_ins = NULL;
1480 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1481 switch (ins->opcode) {
1484 /* Already done by an arch-independent pass */
1486 case OP_LOAD_MEMBASE:
1487 case OP_LOADI4_MEMBASE:
1489 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1490 * OP_LOAD_MEMBASE offset(basereg), reg
1492 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1493 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1494 ins->inst_basereg == last_ins->inst_destbasereg &&
1495 ins->inst_offset == last_ins->inst_offset) {
1496 if (ins->dreg == last_ins->sreg1) {
1497 MONO_DELETE_INS (bb, ins);
1500 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1501 ins->opcode = OP_MOVE;
1502 ins->sreg1 = last_ins->sreg1;
1506 * Note: reg1 must be different from the basereg in the second load
1507 * OP_LOAD_MEMBASE offset(basereg), reg1
1508 * OP_LOAD_MEMBASE offset(basereg), reg2
1510 * OP_LOAD_MEMBASE offset(basereg), reg1
1511 * OP_MOVE reg1, reg2
1513 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1514 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1515 ins->inst_basereg != last_ins->dreg &&
1516 ins->inst_basereg == last_ins->inst_basereg &&
1517 ins->inst_offset == last_ins->inst_offset) {
1519 if (ins->dreg == last_ins->dreg) {
1520 MONO_DELETE_INS (bb, ins);
1523 ins->opcode = OP_MOVE;
1524 ins->sreg1 = last_ins->dreg;
1527 //g_assert_not_reached ();
1531 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1532 * OP_LOAD_MEMBASE offset(basereg), reg
1534 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1535 * OP_ICONST reg, imm
1537 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1538 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1539 ins->inst_basereg == last_ins->inst_destbasereg &&
1540 ins->inst_offset == last_ins->inst_offset) {
1541 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1542 ins->opcode = OP_ICONST;
1543 ins->inst_c0 = last_ins->inst_imm;
1544 g_assert_not_reached (); // check this rule
1548 case OP_LOADU1_MEMBASE:
1549 case OP_LOADI1_MEMBASE:
1550 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1551 ins->inst_basereg == last_ins->inst_destbasereg &&
1552 ins->inst_offset == last_ins->inst_offset) {
1553 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
1554 ins->sreg1 = last_ins->sreg1;
1557 case OP_LOADU2_MEMBASE:
1558 case OP_LOADI2_MEMBASE:
1559 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1560 ins->inst_basereg == last_ins->inst_destbasereg &&
1561 ins->inst_offset == last_ins->inst_offset) {
1562 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
1563 ins->sreg1 = last_ins->sreg1;
1567 ins->opcode = OP_MOVE;
1571 if (ins->dreg == ins->sreg1) {
1572 MONO_DELETE_INS (bb, ins);
1576 * OP_MOVE sreg, dreg
1577 * OP_MOVE dreg, sreg
1579 if (last_ins && last_ins->opcode == OP_MOVE &&
1580 ins->sreg1 == last_ins->dreg &&
1581 ins->dreg == last_ins->sreg1) {
1582 MONO_DELETE_INS (bb, ins);
1590 bb->last_ins = last_ins;
1594 * the branch_cc_table should maintain the order of these
1608 branch_cc_table [] = {
1622 #define NEW_INS(cfg,dest,op) do { \
1623 MONO_INST_NEW ((cfg), (dest), (op)); \
1624 mono_bblock_insert_before_ins (bb, ins, (dest)); \
1628 map_to_reg_reg_op (int op)
1637 case OP_COMPARE_IMM:
1639 case OP_ICOMPARE_IMM:
1653 case OP_LOAD_MEMBASE:
1654 return OP_LOAD_MEMINDEX;
1655 case OP_LOADI4_MEMBASE:
1656 return OP_LOADI4_MEMINDEX;
1657 case OP_LOADU4_MEMBASE:
1658 return OP_LOADU4_MEMINDEX;
1659 case OP_LOADU1_MEMBASE:
1660 return OP_LOADU1_MEMINDEX;
1661 case OP_LOADI2_MEMBASE:
1662 return OP_LOADI2_MEMINDEX;
1663 case OP_LOADU2_MEMBASE:
1664 return OP_LOADU2_MEMINDEX;
1665 case OP_LOADI1_MEMBASE:
1666 return OP_LOADI1_MEMINDEX;
1667 case OP_STOREI1_MEMBASE_REG:
1668 return OP_STOREI1_MEMINDEX;
1669 case OP_STOREI2_MEMBASE_REG:
1670 return OP_STOREI2_MEMINDEX;
1671 case OP_STOREI4_MEMBASE_REG:
1672 return OP_STOREI4_MEMINDEX;
1673 case OP_STORE_MEMBASE_REG:
1674 return OP_STORE_MEMINDEX;
1675 case OP_STORER4_MEMBASE_REG:
1676 return OP_STORER4_MEMINDEX;
1677 case OP_STORER8_MEMBASE_REG:
1678 return OP_STORER8_MEMINDEX;
1679 case OP_STORE_MEMBASE_IMM:
1680 return OP_STORE_MEMBASE_REG;
1681 case OP_STOREI1_MEMBASE_IMM:
1682 return OP_STOREI1_MEMBASE_REG;
1683 case OP_STOREI2_MEMBASE_IMM:
1684 return OP_STOREI2_MEMBASE_REG;
1685 case OP_STOREI4_MEMBASE_IMM:
1686 return OP_STOREI4_MEMBASE_REG;
1688 g_assert_not_reached ();
1692 * Remove from the instruction list the instructions that can't be
1693 * represented with very simple instructions with no register
1697 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1699 MonoInst *ins, *temp, *last_ins = NULL;
1700 int rot_amount, imm8, low_imm;
1702 MONO_BB_FOR_EACH_INS (bb, ins) {
1704 switch (ins->opcode) {
1708 case OP_COMPARE_IMM:
1709 case OP_ICOMPARE_IMM:
1723 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1724 NEW_INS (cfg, temp, OP_ICONST);
1725 temp->inst_c0 = ins->inst_imm;
1726 temp->dreg = mono_alloc_ireg (cfg);
1727 ins->sreg2 = temp->dreg;
1728 ins->opcode = mono_op_imm_to_op (ins->opcode);
1733 if (ins->inst_imm == 1) {
1734 ins->opcode = OP_MOVE;
1737 if (ins->inst_imm == 0) {
1738 ins->opcode = OP_ICONST;
1742 imm8 = mono_is_power_of_two (ins->inst_imm);
1744 ins->opcode = OP_SHL_IMM;
1745 ins->inst_imm = imm8;
1748 NEW_INS (cfg, temp, OP_ICONST);
1749 temp->inst_c0 = ins->inst_imm;
1750 temp->dreg = mono_alloc_ireg (cfg);
1751 ins->sreg2 = temp->dreg;
1752 ins->opcode = OP_IMUL;
1754 case OP_LOCALLOC_IMM:
1755 NEW_INS (cfg, temp, OP_ICONST);
1756 temp->inst_c0 = ins->inst_imm;
1757 temp->dreg = mono_alloc_ireg (cfg);
1758 ins->sreg1 = temp->dreg;
1759 ins->opcode = OP_LOCALLOC;
1761 case OP_LOAD_MEMBASE:
1762 case OP_LOADI4_MEMBASE:
1763 case OP_LOADU4_MEMBASE:
1764 case OP_LOADU1_MEMBASE:
1765 /* we can do two things: load the immed in a register
1766 * and use an indexed load, or see if the immed can be
1767 * represented as an ad_imm + a load with a smaller offset
1768 * that fits. We just do the first for now, optimize later.
1770 if (arm_is_imm12 (ins->inst_offset))
1772 NEW_INS (cfg, temp, OP_ICONST);
1773 temp->inst_c0 = ins->inst_offset;
1774 temp->dreg = mono_alloc_ireg (cfg);
1775 ins->sreg2 = temp->dreg;
1776 ins->opcode = map_to_reg_reg_op (ins->opcode);
1778 case OP_LOADI2_MEMBASE:
1779 case OP_LOADU2_MEMBASE:
1780 case OP_LOADI1_MEMBASE:
1781 if (arm_is_imm8 (ins->inst_offset))
1783 NEW_INS (cfg, temp, OP_ICONST);
1784 temp->inst_c0 = ins->inst_offset;
1785 temp->dreg = mono_alloc_ireg (cfg);
1786 ins->sreg2 = temp->dreg;
1787 ins->opcode = map_to_reg_reg_op (ins->opcode);
1789 case OP_LOADR4_MEMBASE:
1790 case OP_LOADR8_MEMBASE:
1791 if (arm_is_fpimm8 (ins->inst_offset))
1793 low_imm = ins->inst_offset & 0x1ff;
1794 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1795 NEW_INS (cfg, temp, OP_ADD_IMM);
1796 temp->inst_imm = ins->inst_offset & ~0x1ff;
1797 temp->sreg1 = ins->inst_basereg;
1798 temp->dreg = mono_alloc_ireg (cfg);
1799 ins->inst_basereg = temp->dreg;
1800 ins->inst_offset = low_imm;
1803 /* VFP/FPA doesn't have indexed load instructions */
1804 g_assert_not_reached ();
1806 case OP_STORE_MEMBASE_REG:
1807 case OP_STOREI4_MEMBASE_REG:
1808 case OP_STOREI1_MEMBASE_REG:
1809 if (arm_is_imm12 (ins->inst_offset))
1811 NEW_INS (cfg, temp, OP_ICONST);
1812 temp->inst_c0 = ins->inst_offset;
1813 temp->dreg = mono_alloc_ireg (cfg);
1814 ins->sreg2 = temp->dreg;
1815 ins->opcode = map_to_reg_reg_op (ins->opcode);
1817 case OP_STOREI2_MEMBASE_REG:
1818 if (arm_is_imm8 (ins->inst_offset))
1820 NEW_INS (cfg, temp, OP_ICONST);
1821 temp->inst_c0 = ins->inst_offset;
1822 temp->dreg = mono_alloc_ireg (cfg);
1823 ins->sreg2 = temp->dreg;
1824 ins->opcode = map_to_reg_reg_op (ins->opcode);
1826 case OP_STORER4_MEMBASE_REG:
1827 case OP_STORER8_MEMBASE_REG:
1828 if (arm_is_fpimm8 (ins->inst_offset))
1830 low_imm = ins->inst_offset & 0x1ff;
1831 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1832 NEW_INS (cfg, temp, OP_ADD_IMM);
1833 temp->inst_imm = ins->inst_offset & ~0x1ff;
1834 temp->sreg1 = ins->inst_destbasereg;
1835 temp->dreg = mono_alloc_ireg (cfg);
1836 ins->inst_destbasereg = temp->dreg;
1837 ins->inst_offset = low_imm;
1840 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
1841 /* VFP/FPA doesn't have indexed store instructions */
1842 g_assert_not_reached ();
1844 case OP_STORE_MEMBASE_IMM:
1845 case OP_STOREI1_MEMBASE_IMM:
1846 case OP_STOREI2_MEMBASE_IMM:
1847 case OP_STOREI4_MEMBASE_IMM:
1848 NEW_INS (cfg, temp, OP_ICONST);
1849 temp->inst_c0 = ins->inst_imm;
1850 temp->dreg = mono_alloc_ireg (cfg);
1851 ins->sreg1 = temp->dreg;
1852 ins->opcode = map_to_reg_reg_op (ins->opcode);
1854 goto loop_start; /* make it handle the possibly big ins->inst_offset */
1856 gboolean swap = FALSE;
1859 /* Some fp compares require swapped operands */
1860 g_assert (ins->next);
1861 switch (ins->next->opcode) {
1863 ins->next->opcode = OP_FBLT;
1867 ins->next->opcode = OP_FBLT_UN;
1871 ins->next->opcode = OP_FBGE;
1875 ins->next->opcode = OP_FBGE_UN;
1883 ins->sreg1 = ins->sreg2;
1892 bb->last_ins = last_ins;
1893 bb->max_vreg = cfg->next_vreg;
1897 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
1899 /* sreg is a float, dreg is an integer reg */
1901 ARM_FIXZ (code, dreg, sreg);
1902 #elif defined(ARM_FPU_VFP)
1904 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
1906 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
1907 ARM_FMRS (code, dreg, ARM_VFP_F0);
1911 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
1912 else if (size == 2) {
1913 ARM_SHL_IMM (code, dreg, dreg, 16);
1914 ARM_SHR_IMM (code, dreg, dreg, 16);
1918 ARM_SHL_IMM (code, dreg, dreg, 24);
1919 ARM_SAR_IMM (code, dreg, dreg, 24);
1920 } else if (size == 2) {
1921 ARM_SHL_IMM (code, dreg, dreg, 16);
1922 ARM_SAR_IMM (code, dreg, dreg, 16);
1930 const guchar *target;
1935 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
1938 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
1939 PatchData *pdata = (PatchData*)user_data;
1940 guchar *code = data;
1941 guint32 *thunks = data;
1942 guint32 *endthunks = (guint32*)(code + bsize);
1944 int difflow, diffhigh;
1946 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
1947 difflow = (char*)pdata->code - (char*)thunks;
1948 diffhigh = (char*)pdata->code - (char*)endthunks;
1949 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
1953 * The thunk is composed of 3 words:
1954 * load constant from thunks [2] into ARM_IP
1957 * Note that the LR register is already setup
1959 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
1960 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
1961 while (thunks < endthunks) {
1962 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
1963 if (thunks [2] == (guint32)pdata->target) {
1964 arm_patch (pdata->code, (guchar*)thunks);
1965 mono_arch_flush_icache (pdata->code, 4);
1968 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
1969 /* found a free slot instead: emit thunk */
1970 /* ARMREG_IP is fine to use since this can't be an IMT call
1973 code = (guchar*)thunks;
1974 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
1975 if (thumb_supported)
1976 ARM_BX (code, ARMREG_IP);
1978 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
1979 thunks [2] = (guint32)pdata->target;
1980 mono_arch_flush_icache ((guchar*)thunks, 12);
1982 arm_patch (pdata->code, (guchar*)thunks);
1983 mono_arch_flush_icache (pdata->code, 4);
1987 /* skip 12 bytes, the size of the thunk */
1991 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
1997 handle_thunk (int absolute, guchar *code, const guchar *target) {
1998 MonoDomain *domain = mono_domain_get ();
2002 pdata.target = target;
2003 pdata.absolute = absolute;
2006 mono_domain_lock (domain);
2007 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2010 /* this uses the first available slot */
2012 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
2014 mono_domain_unlock (domain);
2016 if (pdata.found != 1)
2017 g_print ("thunk failed for %p from %p\n", target, code);
2018 g_assert (pdata.found == 1);
2022 arm_patch (guchar *code, const guchar *target)
2024 guint32 *code32 = (void*)code;
2025 guint32 ins = *code32;
2026 guint32 prim = (ins >> 25) & 7;
2027 guint32 tval = GPOINTER_TO_UINT (target);
2029 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2030 if (prim == 5) { /* 101b */
2031 /* the diff starts 8 bytes from the branch opcode */
2032 gint diff = target - code - 8;
2034 gint tmask = 0xffffffff;
2035 if (tval & 1) { /* entering thumb mode */
2036 diff = target - 1 - code - 8;
2037 g_assert (thumb_supported);
2038 tbits = 0xf << 28; /* bl->blx bit pattern */
2039 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2040 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2044 tmask = ~(1 << 24); /* clear the link bit */
2045 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2050 if (diff <= 33554431) {
2052 ins = (ins & 0xff000000) | diff;
2054 *code32 = ins | tbits;
2058 /* diff between 0 and -33554432 */
2059 if (diff >= -33554432) {
2061 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2063 *code32 = ins | tbits;
2068 handle_thunk (TRUE, code, target);
2073 * The alternative call sequences looks like this:
2075 * ldr ip, [pc] // loads the address constant
2076 * b 1f // jumps around the constant
2077 * address constant embedded in the code
2082 * There are two cases for patching:
2083 * a) at the end of method emission: in this case code points to the start
2084 * of the call sequence
2085 * b) during runtime patching of the call site: in this case code points
2086 * to the mov pc, ip instruction
2088 * We have to handle also the thunk jump code sequence:
2092 * address constant // execution never reaches here
2094 if ((ins & 0x0ffffff0) == 0x12fff10) {
2095 /* Branch and exchange: the address is constructed in a reg
2096 * We can patch BX when the code sequence is the following:
2097 * ldr ip, [pc, #0] ; 0x8
2104 guint8 *emit = (guint8*)ccode;
2105 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2107 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2108 ARM_BX (emit, ARMREG_IP);
2110 /*patching from magic trampoline*/
2111 if (ins == ccode [3]) {
2112 g_assert (code32 [-4] == ccode [0]);
2113 g_assert (code32 [-3] == ccode [1]);
2114 g_assert (code32 [-1] == ccode [2]);
2115 code32 [-2] = (guint32)target;
2118 /*patching from JIT*/
2119 if (ins == ccode [0]) {
2120 g_assert (code32 [1] == ccode [1]);
2121 g_assert (code32 [3] == ccode [2]);
2122 g_assert (code32 [4] == ccode [3]);
2123 code32 [2] = (guint32)target;
2126 g_assert_not_reached ();
2127 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2135 guint8 *emit = (guint8*)ccode;
2136 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2138 ARM_BLX_REG (emit, ARMREG_IP);
2140 g_assert (code32 [-3] == ccode [0]);
2141 g_assert (code32 [-2] == ccode [1]);
2142 g_assert (code32 [0] == ccode [2]);
2144 code32 [-1] = (guint32)target;
2147 guint32 *tmp = ccode;
2148 guint8 *emit = (guint8*)tmp;
2149 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2150 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2151 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2152 ARM_BX (emit, ARMREG_IP);
2153 if (ins == ccode [2]) {
2154 g_assert_not_reached (); // should be -2 ...
2155 code32 [-1] = (guint32)target;
2158 if (ins == ccode [0]) {
2159 /* handles both thunk jump code and the far call sequence */
2160 code32 [2] = (guint32)target;
2163 g_assert_not_reached ();
2165 // g_print ("patched with 0x%08x\n", ins);
2169 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2170 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2171 * to be used with the emit macros.
2172 * Return -1 otherwise.
2175 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2178 for (i = 0; i < 31; i+= 2) {
2179 res = (val << (32 - i)) | (val >> i);
2182 *rot_amount = i? 32 - i: 0;
2189 * Emits in code a sequence of instructions that load the value 'val'
2190 * into the dreg register. Uses at most 4 instructions.
2193 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2195 int imm8, rot_amount;
2197 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2198 /* skip the constant pool */
2204 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2205 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2206 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2207 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2210 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2212 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2214 if (val & 0xFF0000) {
2215 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2217 if (val & 0xFF000000) {
2218 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2220 } else if (val & 0xFF00) {
2221 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2222 if (val & 0xFF0000) {
2223 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2225 if (val & 0xFF000000) {
2226 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2228 } else if (val & 0xFF0000) {
2229 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2230 if (val & 0xFF000000) {
2231 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2234 //g_assert_not_reached ();
2240 * emit_load_volatile_arguments:
2242 * Load volatile arguments from the stack to the original input registers.
2243 * Required before a tail call.
2246 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2248 MonoMethod *method = cfg->method;
2249 MonoMethodSignature *sig;
2254 /* FIXME: Generate intermediate code instead */
2256 sig = mono_method_signature (method);
2258 /* This is the opposite of the code in emit_prolog */
2262 cinfo = calculate_sizes (sig, sig->pinvoke);
2264 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2265 ArgInfo *ainfo = &cinfo->ret;
2266 inst = cfg->vret_addr;
2267 g_assert (arm_is_imm12 (inst->inst_offset));
2268 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2270 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2271 ArgInfo *ainfo = cinfo->args + i;
2272 inst = cfg->args [pos];
2274 if (cfg->verbose_level > 2)
2275 g_print ("Loading argument %d (type: %d)\n", i, ainfo->regtype);
2276 if (inst->opcode == OP_REGVAR) {
2277 if (ainfo->regtype == RegTypeGeneral)
2278 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2279 else if (ainfo->regtype == RegTypeFP) {
2280 g_assert_not_reached ();
2281 } else if (ainfo->regtype == RegTypeBase) {
2285 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2286 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2288 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2289 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2293 g_assert_not_reached ();
2295 if (ainfo->regtype == RegTypeGeneral) {
2296 switch (ainfo->size) {
2303 g_assert (arm_is_imm12 (inst->inst_offset));
2304 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2305 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2306 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2309 if (arm_is_imm12 (inst->inst_offset)) {
2310 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2312 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2313 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2317 } else if (ainfo->regtype == RegTypeBaseGen) {
2320 } else if (ainfo->regtype == RegTypeBase) {
2323 } else if (ainfo->regtype == RegTypeFP) {
2324 g_assert_not_reached ();
2325 } else if (ainfo->regtype == RegTypeStructByVal) {
2326 int doffset = inst->inst_offset;
2330 if (mono_class_from_mono_type (inst->inst_vtype))
2331 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
2332 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
2333 if (arm_is_imm12 (doffset)) {
2334 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
2336 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
2337 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
2339 soffset += sizeof (gpointer);
2340 doffset += sizeof (gpointer);
2345 } else if (ainfo->regtype == RegTypeStructByAddr) {
2362 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2367 guint8 *code = cfg->native_code + cfg->code_len;
2368 MonoInst *last_ins = NULL;
2369 guint last_offset = 0;
2371 int imm8, rot_amount;
2373 /* we don't align basic blocks of loops on arm */
2375 if (cfg->verbose_level > 2)
2376 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2378 cpos = bb->max_offset;
2380 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2381 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
2382 //g_assert (!mono_compile_aot);
2385 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
2386 /* this is not thread save, but good enough */
2387 /* fixme: howto handle overflows? */
2388 //x86_inc_mem (code, &cov->data [bb->dfn].count);
2391 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
2392 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2393 (gpointer)"mono_break");
2394 code = emit_call_seq (cfg, code);
2397 MONO_BB_FOR_EACH_INS (bb, ins) {
2398 offset = code - cfg->native_code;
2400 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
2402 if (offset > (cfg->code_size - max_len - 16)) {
2403 cfg->code_size *= 2;
2404 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2405 code = cfg->native_code + offset;
2407 // if (ins->cil_code)
2408 // g_print ("cil code\n");
2409 mono_debug_record_line_number (cfg, ins, offset);
2411 switch (ins->opcode) {
2412 case OP_MEMORY_BARRIER:
2415 g_assert_not_reached ();
2418 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2419 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
2422 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
2423 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
2425 case OP_STOREI1_MEMBASE_IMM:
2426 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
2427 g_assert (arm_is_imm12 (ins->inst_offset));
2428 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2430 case OP_STOREI2_MEMBASE_IMM:
2431 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
2432 g_assert (arm_is_imm8 (ins->inst_offset));
2433 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2435 case OP_STORE_MEMBASE_IMM:
2436 case OP_STOREI4_MEMBASE_IMM:
2437 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
2438 g_assert (arm_is_imm12 (ins->inst_offset));
2439 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
2441 case OP_STOREI1_MEMBASE_REG:
2442 g_assert (arm_is_imm12 (ins->inst_offset));
2443 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2445 case OP_STOREI2_MEMBASE_REG:
2446 g_assert (arm_is_imm8 (ins->inst_offset));
2447 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2449 case OP_STORE_MEMBASE_REG:
2450 case OP_STOREI4_MEMBASE_REG:
2451 /* this case is special, since it happens for spill code after lowering has been called */
2452 if (arm_is_imm12 (ins->inst_offset)) {
2453 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2455 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2456 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
2459 case OP_STOREI1_MEMINDEX:
2460 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2462 case OP_STOREI2_MEMINDEX:
2463 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2465 case OP_STORE_MEMINDEX:
2466 case OP_STOREI4_MEMINDEX:
2467 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
2470 g_assert_not_reached ();
2472 case OP_LOAD_MEMINDEX:
2473 case OP_LOADI4_MEMINDEX:
2474 case OP_LOADU4_MEMINDEX:
2475 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2477 case OP_LOADI1_MEMINDEX:
2478 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2480 case OP_LOADU1_MEMINDEX:
2481 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2483 case OP_LOADI2_MEMINDEX:
2484 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2486 case OP_LOADU2_MEMINDEX:
2487 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
2489 case OP_LOAD_MEMBASE:
2490 case OP_LOADI4_MEMBASE:
2491 case OP_LOADU4_MEMBASE:
2492 /* this case is special, since it happens for spill code after lowering has been called */
2493 if (arm_is_imm12 (ins->inst_offset)) {
2494 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2496 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
2497 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
2500 case OP_LOADI1_MEMBASE:
2501 g_assert (arm_is_imm8 (ins->inst_offset));
2502 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2504 case OP_LOADU1_MEMBASE:
2505 g_assert (arm_is_imm12 (ins->inst_offset));
2506 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2508 case OP_LOADU2_MEMBASE:
2509 g_assert (arm_is_imm8 (ins->inst_offset));
2510 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2512 case OP_LOADI2_MEMBASE:
2513 g_assert (arm_is_imm8 (ins->inst_offset));
2514 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2516 case OP_ICONV_TO_I1:
2517 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
2518 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
2520 case OP_ICONV_TO_I2:
2521 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2522 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
2524 case OP_ICONV_TO_U1:
2525 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
2527 case OP_ICONV_TO_U2:
2528 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
2529 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
2533 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2535 case OP_COMPARE_IMM:
2536 case OP_ICOMPARE_IMM:
2537 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2538 g_assert (imm8 >= 0);
2539 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2543 * gdb does not like encountering the hw breakpoint ins in the debugged code.
2544 * So instead of emitting a trap, we emit a call a C function and place a
2547 //*(int*)code = 0xef9f0001;
2550 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2551 (gpointer)"mono_break");
2552 code = emit_call_seq (cfg, code);
2554 case OP_RELAXED_NOP:
2559 case OP_DUMMY_STORE:
2560 case OP_NOT_REACHED:
2565 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2568 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2572 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2575 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2576 g_assert (imm8 >= 0);
2577 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2581 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2582 g_assert (imm8 >= 0);
2583 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2587 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2588 g_assert (imm8 >= 0);
2589 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2592 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2593 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2595 case OP_IADD_OVF_UN:
2596 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2597 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2600 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2601 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2603 case OP_ISUB_OVF_UN:
2604 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2605 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2607 case OP_ADD_OVF_CARRY:
2608 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2609 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2611 case OP_ADD_OVF_UN_CARRY:
2612 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2613 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2615 case OP_SUB_OVF_CARRY:
2616 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2617 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2619 case OP_SUB_OVF_UN_CARRY:
2620 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2621 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2625 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2628 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2629 g_assert (imm8 >= 0);
2630 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2633 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2637 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2641 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2642 g_assert (imm8 >= 0);
2643 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2647 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2648 g_assert (imm8 >= 0);
2649 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2651 case OP_ARM_RSBS_IMM:
2652 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2653 g_assert (imm8 >= 0);
2654 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2656 case OP_ARM_RSC_IMM:
2657 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2658 g_assert (imm8 >= 0);
2659 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2662 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2666 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2667 g_assert (imm8 >= 0);
2668 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2676 /* crappy ARM arch doesn't have a DIV instruction */
2677 g_assert_not_reached ();
2679 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2683 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2684 g_assert (imm8 >= 0);
2685 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2688 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2692 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2693 g_assert (imm8 >= 0);
2694 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2697 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2702 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2703 else if (ins->dreg != ins->sreg1)
2704 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2707 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2712 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2713 else if (ins->dreg != ins->sreg1)
2714 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2717 case OP_ISHR_UN_IMM:
2719 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2720 else if (ins->dreg != ins->sreg1)
2721 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2724 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2727 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2730 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2733 if (ins->dreg == ins->sreg2)
2734 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2736 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2739 g_assert_not_reached ();
2742 /* FIXME: handle ovf/ sreg2 != dreg */
2743 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2745 case OP_IMUL_OVF_UN:
2746 /* FIXME: handle ovf/ sreg2 != dreg */
2747 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2750 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2753 /* Load the GOT offset */
2754 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2755 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
2757 *(gpointer*)code = NULL;
2759 /* Load the value from the GOT */
2760 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
2762 case OP_ICONV_TO_I4:
2763 case OP_ICONV_TO_U4:
2765 if (ins->dreg != ins->sreg1)
2766 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2769 int saved = ins->sreg2;
2770 if (ins->sreg2 == ARM_LSW_REG) {
2771 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2774 if (ins->sreg1 != ARM_LSW_REG)
2775 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2776 if (saved != ARM_MSW_REG)
2777 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2782 ARM_MVFD (code, ins->dreg, ins->sreg1);
2783 #elif defined(ARM_FPU_VFP)
2784 ARM_CPYD (code, ins->dreg, ins->sreg1);
2787 case OP_FCONV_TO_R4:
2789 ARM_MVFS (code, ins->dreg, ins->sreg1);
2790 #elif defined(ARM_FPU_VFP)
2791 ARM_CVTD (code, ins->dreg, ins->sreg1);
2792 ARM_CVTS (code, ins->dreg, ins->dreg);
2797 * Keep in sync with mono_arch_emit_epilog
2799 g_assert (!cfg->method->save_lmf);
2801 code = emit_load_volatile_arguments (cfg, code);
2803 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2804 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2805 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2809 /* ensure ins->sreg1 is not NULL */
2810 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2814 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2815 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2817 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2818 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2820 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2830 call = (MonoCallInst*)ins;
2831 if (ins->flags & MONO_INST_HAS_METHOD)
2832 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2834 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2835 code = emit_call_seq (cfg, code);
2836 code = emit_move_return_value (cfg, ins, code);
2842 case OP_VOIDCALL_REG:
2844 code = emit_call_reg (code, ins->sreg1);
2845 code = emit_move_return_value (cfg, ins, code);
2847 case OP_FCALL_MEMBASE:
2848 case OP_LCALL_MEMBASE:
2849 case OP_VCALL_MEMBASE:
2850 case OP_VCALL2_MEMBASE:
2851 case OP_VOIDCALL_MEMBASE:
2852 case OP_CALL_MEMBASE:
2853 g_assert (arm_is_imm12 (ins->inst_offset));
2854 g_assert (ins->sreg1 != ARMREG_LR);
2855 call = (MonoCallInst*)ins;
2856 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
2857 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
2858 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2860 * We can't embed the method in the code stream in PIC code, or
2862 * Instead, we put it in V5 in code emitted by
2863 * mono_arch_emit_imt_argument (), and embed NULL here to
2864 * signal the IMT thunk that the value is in V5.
2866 if (call->dynamic_imt_arg)
2867 *((gpointer*)code) = NULL;
2869 *((gpointer*)code) = (gpointer)call->method;
2872 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
2873 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2875 code = emit_move_return_value (cfg, ins, code);
2878 /* keep alignment */
2879 int alloca_waste = cfg->param_area;
2882 /* round the size to 8 bytes */
2883 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
2884 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
2886 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
2887 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
2888 /* memzero the area: dreg holds the size, sp is the pointer */
2889 if (ins->flags & MONO_INST_INIT) {
2890 guint8 *start_loop, *branch_to_cond;
2891 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
2892 branch_to_cond = code;
2895 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
2896 arm_patch (branch_to_cond, code);
2897 /* decrement by 4 and set flags */
2898 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
2899 ARM_B_COND (code, ARMCOND_GE, 0);
2900 arm_patch (code - 4, start_loop);
2902 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
2906 if (ins->sreg1 != ARMREG_R0)
2907 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2908 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2909 (gpointer)"mono_arch_throw_exception");
2910 code = emit_call_seq (cfg, code);
2914 if (ins->sreg1 != ARMREG_R0)
2915 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2916 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2917 (gpointer)"mono_arch_rethrow_exception");
2918 code = emit_call_seq (cfg, code);
2921 case OP_START_HANDLER: {
2922 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2924 if (arm_is_imm12 (spvar->inst_offset)) {
2925 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
2927 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2928 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
2932 case OP_ENDFILTER: {
2933 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2935 if (ins->sreg1 != ARMREG_R0)
2936 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2937 if (arm_is_imm12 (spvar->inst_offset)) {
2938 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
2940 g_assert (ARMREG_IP != spvar->inst_basereg);
2941 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2942 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
2944 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2947 case OP_ENDFINALLY: {
2948 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2950 if (arm_is_imm12 (spvar->inst_offset)) {
2951 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
2953 g_assert (ARMREG_IP != spvar->inst_basereg);
2954 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
2955 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
2957 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2960 case OP_CALL_HANDLER:
2961 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2965 ins->inst_c0 = code - cfg->native_code;
2968 if (ins->flags & MONO_INST_BRLABEL) {
2969 /*if (ins->inst_i0->inst_c0) {
2971 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2973 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2977 /*if (ins->inst_target_bb->native_offset) {
2979 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
2981 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2987 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
2991 * In the normal case we have:
2992 * ldr pc, [pc, ins->sreg1 << 2]
2995 * ldr lr, [pc, ins->sreg1 << 2]
2997 * After follows the data.
2998 * FIXME: add aot support.
3000 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3001 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3002 if (offset > (cfg->code_size - max_len - 16)) {
3003 cfg->code_size += max_len;
3004 cfg->code_size *= 2;
3005 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3006 code = cfg->native_code + offset;
3008 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3010 code += 4 * GPOINTER_TO_INT (ins->klass);
3014 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3015 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3019 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3020 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3024 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3025 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3029 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3030 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3034 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3035 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3037 case OP_COND_EXC_EQ:
3038 case OP_COND_EXC_NE_UN:
3039 case OP_COND_EXC_LT:
3040 case OP_COND_EXC_LT_UN:
3041 case OP_COND_EXC_GT:
3042 case OP_COND_EXC_GT_UN:
3043 case OP_COND_EXC_GE:
3044 case OP_COND_EXC_GE_UN:
3045 case OP_COND_EXC_LE:
3046 case OP_COND_EXC_LE_UN:
3047 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3049 case OP_COND_EXC_IEQ:
3050 case OP_COND_EXC_INE_UN:
3051 case OP_COND_EXC_ILT:
3052 case OP_COND_EXC_ILT_UN:
3053 case OP_COND_EXC_IGT:
3054 case OP_COND_EXC_IGT_UN:
3055 case OP_COND_EXC_IGE:
3056 case OP_COND_EXC_IGE_UN:
3057 case OP_COND_EXC_ILE:
3058 case OP_COND_EXC_ILE_UN:
3059 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3062 case OP_COND_EXC_OV:
3063 case OP_COND_EXC_NC:
3064 case OP_COND_EXC_NO:
3065 case OP_COND_EXC_IC:
3066 case OP_COND_EXC_IOV:
3067 case OP_COND_EXC_INC:
3068 case OP_COND_EXC_INO:
3081 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3084 /* floating point opcodes */
3087 if (cfg->compile_aot) {
3088 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3090 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3092 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3095 /* FIXME: we can optimize the imm load by dealing with part of
3096 * the displacement in LDFD (aligning to 512).
3098 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3099 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3103 if (cfg->compile_aot) {
3104 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3106 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3109 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3110 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3113 case OP_STORER8_MEMBASE_REG:
3114 /* This is generated by the local regalloc pass which runs after the lowering pass */
3115 if (!arm_is_fpimm8 (ins->inst_offset)) {
3116 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3117 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3119 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3122 case OP_LOADR8_MEMBASE:
3123 /* This is generated by the local regalloc pass which runs after the lowering pass */
3124 if (!arm_is_fpimm8 (ins->inst_offset)) {
3125 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3126 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3128 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3131 case OP_STORER4_MEMBASE_REG:
3132 g_assert (arm_is_fpimm8 (ins->inst_offset));
3133 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3135 case OP_LOADR4_MEMBASE:
3136 g_assert (arm_is_fpimm8 (ins->inst_offset));
3137 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3139 case OP_ICONV_TO_R_UN: {
3141 tmpreg = ins->dreg == 0? 1: 0;
3142 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3143 ARM_FLTD (code, ins->dreg, ins->sreg1);
3144 ARM_B_COND (code, ARMCOND_GE, 8);
3145 /* save the temp register */
3146 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3147 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3148 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3149 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3150 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3151 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3152 /* skip the constant pool */
3155 *(int*)code = 0x41f00000;
3160 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3161 * adfltd fdest, fdest, ftemp
3165 case OP_ICONV_TO_R4:
3166 ARM_FLTS (code, ins->dreg, ins->sreg1);
3168 case OP_ICONV_TO_R8:
3169 ARM_FLTD (code, ins->dreg, ins->sreg1);
3171 #elif defined(ARM_FPU_VFP)
3173 if (cfg->compile_aot) {
3174 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3176 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3178 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3181 /* FIXME: we can optimize the imm load by dealing with part of
3182 * the displacement in LDFD (aligning to 512).
3184 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3185 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3189 if (cfg->compile_aot) {
3190 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3192 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3194 ARM_CVTS (code, ins->dreg, ins->dreg);
3196 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3197 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
3198 ARM_CVTS (code, ins->dreg, ins->dreg);
3201 case OP_STORER8_MEMBASE_REG:
3202 g_assert (arm_is_fpimm8 (ins->inst_offset));
3203 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3205 case OP_LOADR8_MEMBASE:
3206 g_assert (arm_is_fpimm8 (ins->inst_offset));
3207 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3209 case OP_STORER4_MEMBASE_REG:
3210 g_assert (arm_is_fpimm8 (ins->inst_offset));
3211 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3213 case OP_LOADR4_MEMBASE:
3214 g_assert (arm_is_fpimm8 (ins->inst_offset));
3215 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3217 case OP_ICONV_TO_R_UN: {
3218 g_assert_not_reached ();
3221 case OP_ICONV_TO_R4:
3222 g_assert_not_reached ();
3223 //ARM_FLTS (code, ins->dreg, ins->sreg1);
3225 case OP_ICONV_TO_R8:
3226 g_assert_not_reached ();
3227 //ARM_FLTD (code, ins->dreg, ins->sreg1);
3230 case OP_FCONV_TO_I1:
3231 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
3233 case OP_FCONV_TO_U1:
3234 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
3236 case OP_FCONV_TO_I2:
3237 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
3239 case OP_FCONV_TO_U2:
3240 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
3242 case OP_FCONV_TO_I4:
3244 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
3246 case OP_FCONV_TO_U4:
3248 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
3250 case OP_FCONV_TO_I8:
3251 case OP_FCONV_TO_U8:
3252 g_assert_not_reached ();
3253 /* Implemented as helper calls */
3255 case OP_LCONV_TO_R_UN:
3256 g_assert_not_reached ();
3257 /* Implemented as helper calls */
3259 case OP_LCONV_TO_OVF_I:
3260 case OP_LCONV_TO_OVF_I4_2: {
3261 guint32 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
3263 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3266 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3267 high_bit_not_set = code;
3268 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
3270 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
3271 valid_negative = code;
3272 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
3273 invalid_negative = code;
3274 ARM_B_COND (code, ARMCOND_AL, 0);
3276 arm_patch (high_bit_not_set, code);
3278 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
3279 valid_positive = code;
3280 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
3282 arm_patch (invalid_negative, code);
3283 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
3285 arm_patch (valid_negative, code);
3286 arm_patch (valid_positive, code);
3288 if (ins->dreg != ins->sreg1)
3289 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3294 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3297 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3300 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3303 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
3306 ARM_MNFD (code, ins->dreg, ins->sreg1);
3308 #elif defined(ARM_FPU_VFP)
3310 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
3313 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
3316 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
3319 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
3322 ARM_NEGD (code, ins->dreg, ins->sreg1);
3327 g_assert_not_reached ();
3331 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3332 #elif defined(ARM_FPU_VFP)
3333 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3338 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3339 #elif defined(ARM_FPU_VFP)
3340 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3342 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3343 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3347 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3348 #elif defined(ARM_FPU_VFP)
3349 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3351 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3352 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3356 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
3357 #elif defined(ARM_FPU_VFP)
3358 ARM_CMPD (code, ins->sreg1, ins->sreg2);
3360 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3361 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3362 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3367 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3368 #elif defined(ARM_FPU_VFP)
3369 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3371 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3372 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3377 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
3378 #elif defined(ARM_FPU_VFP)
3379 ARM_CMPD (code, ins->sreg2, ins->sreg1);
3381 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3382 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
3383 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
3385 /* ARM FPA flags table:
3386 * N Less than ARMCOND_MI
3387 * Z Equal ARMCOND_EQ
3388 * C Greater Than or Equal ARMCOND_CS
3389 * V Unordered ARMCOND_VS
3392 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
3395 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
3398 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3401 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3402 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
3408 g_assert_not_reached ();
3411 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
3414 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
3415 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
3420 if (ins->dreg != ins->sreg1)
3421 ARM_MVFD (code, ins->dreg, ins->sreg1);
3423 g_assert_not_reached ();
3428 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3429 g_assert_not_reached ();
3432 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
3433 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3434 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
3435 g_assert_not_reached ();
3441 last_offset = offset;
3444 cfg->code_len = code - cfg->native_code;
3447 #endif /* DISABLE_JIT */
3450 mono_arch_register_lowlevel_calls (void)
3452 /* The signature doesn't matter */
3453 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
3456 #define patch_lis_ori(ip,val) do {\
3457 guint16 *__lis_ori = (guint16*)(ip); \
3458 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
3459 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
3463 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3465 MonoJumpInfo *patch_info;
3466 gboolean compile_aot = !run_cctors;
3468 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3469 unsigned char *ip = patch_info->ip.i + code;
3470 const unsigned char *target;
3472 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
3473 gpointer *jt = (gpointer*)(ip + 8);
3475 /* jt is the inlined jump table, 2 instructions after ip
3476 * In the normal case we store the absolute addresses,
3477 * otherwise the displacements.
3479 for (i = 0; i < patch_info->data.table->table_size; i++)
3480 jt [i] = code + (int)patch_info->data.table->table [i];
3483 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3486 switch (patch_info->type) {
3487 case MONO_PATCH_INFO_BB:
3488 case MONO_PATCH_INFO_LABEL:
3491 /* No need to patch these */
3496 switch (patch_info->type) {
3497 case MONO_PATCH_INFO_IP:
3498 g_assert_not_reached ();
3499 patch_lis_ori (ip, ip);
3501 case MONO_PATCH_INFO_METHOD_REL:
3502 g_assert_not_reached ();
3503 *((gpointer *)(ip)) = code + patch_info->data.offset;
3505 case MONO_PATCH_INFO_METHODCONST:
3506 case MONO_PATCH_INFO_CLASS:
3507 case MONO_PATCH_INFO_IMAGE:
3508 case MONO_PATCH_INFO_FIELD:
3509 case MONO_PATCH_INFO_VTABLE:
3510 case MONO_PATCH_INFO_IID:
3511 case MONO_PATCH_INFO_SFLDA:
3512 case MONO_PATCH_INFO_LDSTR:
3513 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
3514 case MONO_PATCH_INFO_LDTOKEN:
3515 g_assert_not_reached ();
3516 /* from OP_AOTCONST : lis + ori */
3517 patch_lis_ori (ip, target);
3519 case MONO_PATCH_INFO_R4:
3520 case MONO_PATCH_INFO_R8:
3521 g_assert_not_reached ();
3522 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
3524 case MONO_PATCH_INFO_EXC_NAME:
3525 g_assert_not_reached ();
3526 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
3528 case MONO_PATCH_INFO_NONE:
3529 case MONO_PATCH_INFO_BB_OVF:
3530 case MONO_PATCH_INFO_EXC_OVF:
3531 /* everything is dealt with at epilog output time */
3536 arm_patch (ip, target);
3541 * Stack frame layout:
3543 * ------------------- fp
3544 * MonoLMF structure or saved registers
3545 * -------------------
3547 * -------------------
3549 * -------------------
3550 * optional 8 bytes for tracing
3551 * -------------------
3552 * param area size is cfg->param_area
3553 * ------------------- sp
3556 mono_arch_emit_prolog (MonoCompile *cfg)
3558 MonoMethod *method = cfg->method;
3560 MonoMethodSignature *sig;
3562 int alloc_size, pos, max_offset, i, rot_amount;
3569 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3572 sig = mono_method_signature (method);
3573 cfg->code_size = 256 + sig->param_count * 20;
3574 code = cfg->native_code = g_malloc (cfg->code_size);
3576 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
3578 alloc_size = cfg->stack_offset;
3581 if (!method->save_lmf) {
3582 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
3583 prev_sp_offset = 8; /* ip and lr */
3584 for (i = 0; i < 16; ++i) {
3585 if (cfg->used_int_regs & (1 << i))
3586 prev_sp_offset += 4;
3589 ARM_PUSH (code, 0x5ff0);
3590 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
3591 pos += sizeof (MonoLMF) - prev_sp_offset;
3595 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
3596 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
3597 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
3598 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
3601 /* the stack used in the pushed regs */
3602 if (prev_sp_offset & 4)
3604 cfg->stack_usage = alloc_size;
3606 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3607 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3609 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3610 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3613 if (cfg->frame_reg != ARMREG_SP)
3614 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3615 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3616 prev_sp_offset += alloc_size;
3618 /* compute max_offset in order to use short forward jumps
3619 * we could skip do it on arm because the immediate displacement
3620 * for jumps is large enough, it may be useful later for constant pools
3623 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3624 MonoInst *ins = bb->code;
3625 bb->max_offset = max_offset;
3627 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3630 MONO_BB_FOR_EACH_INS (bb, ins)
3631 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3634 /* store runtime generic context */
3635 if (cfg->rgctx_var) {
3636 MonoInst *ins = cfg->rgctx_var;
3638 g_assert (ins->opcode == OP_REGOFFSET);
3640 if (arm_is_imm12 (ins->inst_offset)) {
3641 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
3643 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3644 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
3648 /* load arguments allocated to register from the stack */
3651 cinfo = calculate_sizes (sig, sig->pinvoke);
3653 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3654 ArgInfo *ainfo = &cinfo->ret;
3655 inst = cfg->vret_addr;
3656 g_assert (arm_is_imm12 (inst->inst_offset));
3657 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3659 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3660 ArgInfo *ainfo = cinfo->args + i;
3661 inst = cfg->args [pos];
3663 if (cfg->verbose_level > 2)
3664 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3665 if (inst->opcode == OP_REGVAR) {
3666 if (ainfo->regtype == RegTypeGeneral)
3667 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3668 else if (ainfo->regtype == RegTypeFP) {
3669 g_assert_not_reached ();
3670 } else if (ainfo->regtype == RegTypeBase) {
3671 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3672 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3674 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3675 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3678 g_assert_not_reached ();
3680 if (cfg->verbose_level > 2)
3681 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3683 /* the argument should be put on the stack: FIXME handle size != word */
3684 if (ainfo->regtype == RegTypeGeneral) {
3685 switch (ainfo->size) {
3687 if (arm_is_imm12 (inst->inst_offset))
3688 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3690 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3691 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3695 if (arm_is_imm8 (inst->inst_offset)) {
3696 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3698 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3699 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3703 g_assert (arm_is_imm12 (inst->inst_offset));
3704 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3705 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3706 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3709 if (arm_is_imm12 (inst->inst_offset)) {
3710 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3712 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3713 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3717 } else if (ainfo->regtype == RegTypeBaseGen) {
3718 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3719 g_assert (arm_is_imm12 (inst->inst_offset));
3720 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3721 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3722 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3723 } else if (ainfo->regtype == RegTypeBase) {
3724 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3725 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3727 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
3728 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3731 switch (ainfo->size) {
3733 if (arm_is_imm8 (inst->inst_offset)) {
3734 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3736 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3737 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3741 if (arm_is_imm8 (inst->inst_offset)) {
3742 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3744 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3745 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3749 if (arm_is_imm12 (inst->inst_offset)) {
3750 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3752 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3753 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3755 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
3756 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3758 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
3759 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
3761 if (arm_is_imm12 (inst->inst_offset + 4)) {
3762 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3764 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
3765 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3769 if (arm_is_imm12 (inst->inst_offset)) {
3770 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3772 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3773 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
3777 } else if (ainfo->regtype == RegTypeFP) {
3778 g_assert_not_reached ();
3779 } else if (ainfo->regtype == RegTypeStructByVal) {
3780 int doffset = inst->inst_offset;
3784 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
3785 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3786 if (arm_is_imm12 (doffset)) {
3787 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3789 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3790 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3792 soffset += sizeof (gpointer);
3793 doffset += sizeof (gpointer);
3795 if (ainfo->vtsize) {
3796 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3797 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3798 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3800 } else if (ainfo->regtype == RegTypeStructByAddr) {
3801 g_assert_not_reached ();
3802 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3803 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3805 g_assert_not_reached ();
3810 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
3811 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
3812 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3813 (gpointer)"mono_jit_thread_attach");
3814 code = emit_call_seq (cfg, code);
3817 if (method->save_lmf) {
3819 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3820 (gpointer)"mono_get_lmf_addr");
3821 code = emit_call_seq (cfg, code);
3822 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3823 /* lmf_offset is the offset from the previous stack pointer,
3824 * alloc_size is the total stack space allocated, so the offset
3825 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3826 * The pointer to the struct is put in r1 (new_lmf).
3827 * r2 is used as scratch
3828 * The callee-saved registers are already in the MonoLMF structure
3830 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3831 /* r0 is the result from mono_get_lmf_addr () */
3832 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3833 /* new_lmf->previous_lmf = *lmf_addr */
3834 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3835 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3836 /* *(lmf_addr) = r1 */
3837 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3838 /* Skip method (only needed for trampoline LMF frames) */
3839 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3840 /* save the current IP */
3841 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3842 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3846 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3848 cfg->code_len = code - cfg->native_code;
3849 g_assert (cfg->code_len < cfg->code_size);
3856 mono_arch_emit_epilog (MonoCompile *cfg)
3858 MonoMethod *method = cfg->method;
3859 int pos, i, rot_amount;
3860 int max_epilog_size = 16 + 20*4;
3863 if (cfg->method->save_lmf)
3864 max_epilog_size += 128;
3866 if (mono_jit_trace_calls != NULL)
3867 max_epilog_size += 50;
3869 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3870 max_epilog_size += 50;
3872 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3873 cfg->code_size *= 2;
3874 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3875 mono_jit_stats.code_reallocs++;
3879 * Keep in sync with OP_JMP
3881 code = cfg->native_code + cfg->code_len;
3883 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
3884 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3888 if (method->save_lmf) {
3890 /* all but r0-r3, sp and pc */
3891 pos += sizeof (MonoLMF) - (4 * 10);
3893 /* r2 contains the pointer to the current LMF */
3894 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
3895 /* ip = previous_lmf */
3896 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3898 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3899 /* *(lmf_addr) = previous_lmf */
3900 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3901 /* FIXME: speedup: there is no actual need to restore the registers if
3902 * we didn't actually change them (idea from Zoltan).
3905 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
3906 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
3907 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
3909 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
3910 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
3912 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
3913 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3915 /* FIXME: add v4 thumb interworking support */
3916 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
3919 cfg->code_len = code - cfg->native_code;
3921 g_assert (cfg->code_len < cfg->code_size);
3925 /* remove once throw_exception_by_name is eliminated */
3927 exception_id_by_name (const char *name)
3929 if (strcmp (name, "IndexOutOfRangeException") == 0)
3930 return MONO_EXC_INDEX_OUT_OF_RANGE;
3931 if (strcmp (name, "OverflowException") == 0)
3932 return MONO_EXC_OVERFLOW;
3933 if (strcmp (name, "ArithmeticException") == 0)
3934 return MONO_EXC_ARITHMETIC;
3935 if (strcmp (name, "DivideByZeroException") == 0)
3936 return MONO_EXC_DIVIDE_BY_ZERO;
3937 if (strcmp (name, "InvalidCastException") == 0)
3938 return MONO_EXC_INVALID_CAST;
3939 if (strcmp (name, "NullReferenceException") == 0)
3940 return MONO_EXC_NULL_REF;
3941 if (strcmp (name, "ArrayTypeMismatchException") == 0)
3942 return MONO_EXC_ARRAY_TYPE_MISMATCH;
3943 g_error ("Unknown intrinsic exception %s\n", name);
3948 mono_arch_emit_exceptions (MonoCompile *cfg)
3950 MonoJumpInfo *patch_info;
3953 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
3954 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
3955 int max_epilog_size = 50;
3957 /* count the number of exception infos */
3960 * make sure we have enough space for exceptions
3962 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3963 if (patch_info->type == MONO_PATCH_INFO_EXC) {
3964 i = exception_id_by_name (patch_info->data.target);
3965 if (!exc_throw_found [i]) {
3966 max_epilog_size += 32;
3967 exc_throw_found [i] = TRUE;
3972 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3973 cfg->code_size *= 2;
3974 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3975 mono_jit_stats.code_reallocs++;
3978 code = cfg->native_code + cfg->code_len;
3980 /* add code to raise exceptions */
3981 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3982 switch (patch_info->type) {
3983 case MONO_PATCH_INFO_EXC: {
3984 MonoClass *exc_class;
3985 unsigned char *ip = patch_info->ip.i + cfg->native_code;
3987 i = exception_id_by_name (patch_info->data.target);
3988 if (exc_throw_pos [i]) {
3989 arm_patch (ip, exc_throw_pos [i]);
3990 patch_info->type = MONO_PATCH_INFO_NONE;
3993 exc_throw_pos [i] = code;
3995 arm_patch (ip, code);
3997 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
3998 g_assert (exc_class);
4000 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4001 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4002 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4003 patch_info->data.name = "mono_arch_throw_corlib_exception";
4004 patch_info->ip.i = code - cfg->native_code;
4006 *(guint32*)(gpointer)code = exc_class->type_token;
4016 cfg->code_len = code - cfg->native_code;
4018 g_assert (cfg->code_len < cfg->code_size);
4023 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4028 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4033 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4040 mono_arch_print_tree (MonoInst *tree, int arity)
4045 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4051 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4057 mono_arch_get_patch_offset (guint8 *code)
4064 mono_arch_flush_register_windows (void)
4069 mono_arch_fixup_jinfo (MonoCompile *cfg)
4071 /* max encoded stack usage is 64KB * 4 */
4072 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
4073 cfg->jit_info->used_regs |= cfg->stack_usage << 14;
4076 #ifdef MONO_ARCH_HAVE_IMT
4079 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
4081 if (cfg->compile_aot) {
4082 int method_reg = mono_alloc_ireg (cfg);
4085 call->dynamic_imt_arg = TRUE;
4087 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
4088 ins->dreg = method_reg;
4089 ins->inst_p0 = call->method;
4090 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
4091 MONO_ADD_INS (cfg->cbb, ins);
4093 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4094 } else if (cfg->generic_context) {
4096 /* Always pass in a register for simplicity */
4097 call->dynamic_imt_arg = TRUE;
4099 cfg->uses_rgctx_reg = TRUE;
4102 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
4105 int method_reg = mono_alloc_preg (cfg);
4107 MONO_INST_NEW (cfg, ins, OP_PCONST);
4108 ins->inst_p0 = call->method;
4109 ins->dreg = method_reg;
4110 MONO_ADD_INS (cfg->cbb, ins);
4112 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
4118 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
4120 guint32 *code_ptr = (guint32*)code;
4122 /* The IMT value is stored in the code stream right after the LDC instruction. */
4123 if (!IS_LDR_PC (code_ptr [0])) {
4124 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
4125 g_assert (IS_LDR_PC (code_ptr [0]));
4127 if (code_ptr [1] == 0)
4128 /* This is AOTed code, the IMT method is in V5 */
4129 return (MonoMethod*)regs [ARMREG_V5];
4131 return (MonoMethod*) code_ptr [1];
4135 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
4137 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
4141 mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
4143 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
4146 #define ENABLE_WRONG_METHOD_CHECK 0
4147 #define BASE_SIZE (6 * 4)
4148 #define BSEARCH_ENTRY_SIZE (4 * 4)
4149 #define CMP_SIZE (3 * 4)
4150 #define BRANCH_SIZE (1 * 4)
4151 #define CALL_SIZE (2 * 4)
4152 #define WMC_SIZE (5 * 4)
4153 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
4156 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
4158 guint32 delta = DISTANCE (target, code);
4160 g_assert (delta >= 0 && delta <= 0xFFF);
4161 *target = *target | delta;
4167 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
4168 gpointer fail_tramp)
4170 int size, i, extra_space = 0;
4171 arminstr_t *code, *start, *vtable_target = NULL;
4174 g_assert (!fail_tramp);
4176 for (i = 0; i < count; ++i) {
4177 MonoIMTCheckItem *item = imt_entries [i];
4178 if (item->is_equals) {
4179 g_assert (arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot])));
4181 if (item->check_target_idx) {
4182 if (!item->compare_done)
4183 item->chunk_size += CMP_SIZE;
4184 item->chunk_size += BRANCH_SIZE;
4186 #if ENABLE_WRONG_METHOD_CHECK
4187 item->chunk_size += WMC_SIZE;
4190 item->chunk_size += CALL_SIZE;
4192 item->chunk_size += BSEARCH_ENTRY_SIZE;
4193 imt_entries [item->check_target_idx]->compare_done = TRUE;
4195 size += item->chunk_size;
4198 start = code = mono_code_manager_reserve (domain->code_mp, size);
4201 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
4202 for (i = 0; i < count; ++i) {
4203 MonoIMTCheckItem *item = imt_entries [i];
4204 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
4208 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
4209 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
4210 vtable_target = code;
4211 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
4213 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
4214 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
4215 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
4217 for (i = 0; i < count; ++i) {
4218 MonoIMTCheckItem *item = imt_entries [i];
4219 arminstr_t *imt_method = NULL;
4220 item->code_target = (guint8*)code;
4222 if (item->is_equals) {
4223 if (item->check_target_idx) {
4224 if (!item->compare_done) {
4226 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4227 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4229 item->jmp_code = (guint8*)code;
4230 ARM_B_COND (code, ARMCOND_NE, 0);
4232 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4233 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
4235 /*Enable the commented code to assert on wrong method*/
4236 #if ENABLE_WRONG_METHOD_CHECK
4238 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4239 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4240 ARM_B_COND (code, ARMCOND_NE, 1);
4242 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
4243 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]));
4245 #if ENABLE_WRONG_METHOD_CHECK
4251 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
4253 /*must emit after unconditional branch*/
4254 if (vtable_target) {
4255 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
4256 item->chunk_size += 4;
4257 vtable_target = NULL;
4260 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
4262 code += extra_space;
4266 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
4267 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
4269 item->jmp_code = (guint8*)code;
4270 ARM_B_COND (code, ARMCOND_GE, 0);
4275 for (i = 0; i < count; ++i) {
4276 MonoIMTCheckItem *item = imt_entries [i];
4277 if (item->jmp_code) {
4278 if (item->check_target_idx)
4279 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
4281 if (i > 0 && item->is_equals) {
4283 arminstr_t *space_start = (arminstr_t*)(item->code_target + item->chunk_size);
4284 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
4285 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
4292 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
4293 mono_disassemble_code (NULL, (guint8*)start, size, buff);
4298 mono_arch_flush_icache ((guint8*)start, size);
4299 mono_stats.imt_thunks_size += code - start;
4301 g_assert (DISTANCE (start, code) <= size);
4308 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4310 return ctx->regs [reg];