2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
21 #include "mono/arch/arm/arm-fpa-codegen.h"
22 #elif defined(ARM_FPU_VFP)
23 #include "mono/arch/arm/arm-vfp-codegen.h"
26 static int v5_supported = 0;
27 static int thumb_supported = 0;
29 static int mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount);
33 * floating point support: on ARM it is a mess, there are at least 3
34 * different setups, each of which binary incompat with the other.
35 * 1) FPA: old and ugly, but unfortunately what current distros use
36 * the double binary format has the two words swapped. 8 double registers.
37 * Implemented usually by kernel emulation.
38 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
39 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
40 * 3) VFP: the new and actually sensible and useful FP support. Implemented
41 * in HW or kernel-emulated, requires new tools. I think this ios what symbian uses.
43 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
45 int mono_exc_esp_offset = 0;
47 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
48 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
49 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
52 mono_arch_regname (int reg) {
53 static const char * rnames[] = {
54 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
55 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
56 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
59 if (reg >= 0 && reg < 16)
65 mono_arch_fregname (int reg) {
66 static const char * rnames[] = {
67 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
68 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
69 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
70 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
71 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
72 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
75 if (reg >= 0 && reg < 32)
81 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
84 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
85 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
88 g_assert (dreg != sreg);
89 code = mono_arm_emit_load_imm (code, dreg, imm);
90 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
95 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
97 /* we can use r0-r3, since this is called only for incoming args on the stack */
98 if (size > sizeof (gpointer) * 4) {
100 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
101 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
102 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
103 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
104 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
105 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
106 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
107 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
108 ARM_B_COND (code, ARMCOND_NE, 0);
109 arm_patch (code - 4, start_loop);
112 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
113 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
115 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
116 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
122 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
123 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
124 doffset = soffset = 0;
126 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
127 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
133 g_assert (size == 0);
138 emit_call_reg (guint8 *code, int reg)
141 ARM_BLX_REG (code, reg);
143 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
147 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
153 * mono_arch_get_argument_info:
154 * @csig: a method signature
155 * @param_count: the number of parameters to consider
156 * @arg_info: an array to store the result infos
158 * Gathers information on parameters such as size, alignment and
159 * padding. arg_info should be large enought to hold param_count + 1 entries.
161 * Returns the size of the activation frame.
164 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
166 int k, frame_size = 0;
167 int size, align, pad;
170 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
171 frame_size += sizeof (gpointer);
175 arg_info [0].offset = offset;
178 frame_size += sizeof (gpointer);
182 arg_info [0].size = frame_size;
184 for (k = 0; k < param_count; k++) {
187 size = mono_type_native_stack_size (csig->params [k], &align);
189 size = mono_type_stack_size (csig->params [k], &align);
191 /* ignore alignment for now */
194 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
195 arg_info [k].pad = pad;
197 arg_info [k + 1].pad = 0;
198 arg_info [k + 1].size = size;
200 arg_info [k + 1].offset = offset;
204 align = MONO_ARCH_FRAME_ALIGNMENT;
205 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
206 arg_info [k].pad = pad;
212 * Initialize the cpu to execute managed code.
215 mono_arch_cpu_init (void)
220 * Initialize architecture specific code.
223 mono_arch_init (void)
228 * Cleanup architecture specific code.
231 mono_arch_cleanup (void)
236 * This function returns the optimizations supported on this cpu.
239 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
244 FILE *file = fopen ("/proc/cpuinfo", "r");
246 while ((line = fgets (buf, 512, file))) {
247 if (strncmp (line, "Processor", 9) == 0) {
248 char *ver = strstr (line, "(v");
249 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7')) {
254 if (strncmp (line, "Features", 8) == 0) {
255 char *th = strstr (line, "thumb");
257 thumb_supported = TRUE;
265 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
268 /* no arm-specific optimizations yet */
274 is_regsize_var (MonoType *t) {
277 t = mono_type_get_underlying_type (t);
284 case MONO_TYPE_FNPTR:
286 case MONO_TYPE_OBJECT:
287 case MONO_TYPE_STRING:
288 case MONO_TYPE_CLASS:
289 case MONO_TYPE_SZARRAY:
290 case MONO_TYPE_ARRAY:
292 case MONO_TYPE_GENERICINST:
293 if (!mono_type_generic_inst_is_valuetype (t))
296 case MONO_TYPE_VALUETYPE:
303 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
308 for (i = 0; i < cfg->num_varinfo; i++) {
309 MonoInst *ins = cfg->varinfo [i];
310 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
313 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
316 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
319 /* we can only allocate 32 bit values */
320 if (is_regsize_var (ins->inst_vtype)) {
321 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
322 g_assert (i == vmv->idx);
323 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
330 #define USE_EXTRA_TEMPS 0
333 mono_arch_get_global_int_regs (MonoCompile *cfg)
336 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
337 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
338 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
339 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
340 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
341 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
342 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
348 * mono_arch_regalloc_cost:
350 * Return the cost, in number of memory references, of the action of
351 * allocating the variable VMV into a register during global register
355 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
362 mono_arch_flush_icache (guint8 *code, gint size)
364 __asm __volatile ("mov r0, %0\n"
367 "swi 0x9f0002 @ sys_cacheflush"
369 : "r" (code), "r" (code + size), "r" (0)
370 : "r0", "r1", "r3" );
374 #define NOT_IMPLEMENTED(x) \
375 g_error ("FIXME: %s is not yet implemented. (trampoline)", x);
388 guint16 vtsize; /* in param area */
390 guint8 regtype : 4; /* 0 general, 1 basereg, 2 floating point register, see RegType* */
391 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
406 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
409 if (*gr > ARMREG_R3) {
410 ainfo->offset = *stack_size;
411 ainfo->reg = ARMREG_SP; /* in the caller */
412 ainfo->regtype = RegTypeBase;
423 /* first word in r3 and the second on the stack */
424 ainfo->offset = *stack_size;
425 ainfo->reg = ARMREG_SP; /* in the caller */
426 ainfo->regtype = RegTypeBaseGen;
428 } else if (*gr > ARMREG_R3) {
433 ainfo->offset = *stack_size;
434 ainfo->reg = ARMREG_SP; /* in the caller */
435 ainfo->regtype = RegTypeBase;
450 calculate_sizes (MonoMethodSignature *sig, gboolean is_pinvoke)
453 int n = sig->hasthis + sig->param_count;
455 guint32 stack_size = 0;
456 CallInfo *cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * n);
460 /* FIXME: handle returning a struct */
461 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
462 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
463 cinfo->struct_ret = ARMREG_R0;
468 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
471 DEBUG(printf("params: %d\n", sig->param_count));
472 for (i = 0; i < sig->param_count; ++i) {
473 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
474 /* Prevent implicit arguments and sig_cookie from
475 being passed in registers */
477 /* Emit the signature cookie just before the implicit arguments */
478 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
480 DEBUG(printf("param %d: ", i));
481 if (sig->params [i]->byref) {
482 DEBUG(printf("byref\n"));
483 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
487 simpletype = mono_type_get_underlying_type (sig->params [i])->type;
488 switch (simpletype) {
489 case MONO_TYPE_BOOLEAN:
492 cinfo->args [n].size = 1;
493 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
499 cinfo->args [n].size = 2;
500 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
505 cinfo->args [n].size = 4;
506 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
512 case MONO_TYPE_FNPTR:
513 case MONO_TYPE_CLASS:
514 case MONO_TYPE_OBJECT:
515 case MONO_TYPE_STRING:
516 case MONO_TYPE_SZARRAY:
517 case MONO_TYPE_ARRAY:
519 cinfo->args [n].size = sizeof (gpointer);
520 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
523 case MONO_TYPE_GENERICINST:
524 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
525 cinfo->args [n].size = sizeof (gpointer);
526 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
531 case MONO_TYPE_TYPEDBYREF:
532 case MONO_TYPE_VALUETYPE: {
537 if (simpletype == MONO_TYPE_TYPEDBYREF) {
538 size = sizeof (MonoTypedRef);
540 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
542 size = mono_class_native_size (klass, NULL);
544 size = mono_class_value_size (klass, NULL);
546 DEBUG(printf ("load %d bytes struct\n",
547 mono_class_native_size (sig->params [i]->data.klass, NULL)));
550 align_size += (sizeof (gpointer) - 1);
551 align_size &= ~(sizeof (gpointer) - 1);
552 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
553 cinfo->args [n].regtype = RegTypeStructByVal;
554 /* FIXME: align gr and stack_size if needed */
555 if (gr > ARMREG_R3) {
556 cinfo->args [n].size = 0;
557 cinfo->args [n].vtsize = nwords;
559 int rest = ARMREG_R3 - gr + 1;
560 int n_in_regs = rest >= nwords? nwords: rest;
561 cinfo->args [n].size = n_in_regs;
562 cinfo->args [n].vtsize = nwords - n_in_regs;
563 cinfo->args [n].reg = gr;
566 cinfo->args [n].offset = stack_size;
567 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
568 stack_size += nwords * sizeof (gpointer);
575 cinfo->args [n].size = 8;
576 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
580 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
585 simpletype = mono_type_get_underlying_type (sig->ret)->type;
586 switch (simpletype) {
587 case MONO_TYPE_BOOLEAN:
598 case MONO_TYPE_FNPTR:
599 case MONO_TYPE_CLASS:
600 case MONO_TYPE_OBJECT:
601 case MONO_TYPE_SZARRAY:
602 case MONO_TYPE_ARRAY:
603 case MONO_TYPE_STRING:
604 cinfo->ret.reg = ARMREG_R0;
608 cinfo->ret.reg = ARMREG_R0;
612 cinfo->ret.reg = ARMREG_R0;
613 /* FIXME: cinfo->ret.reg = ???;
614 cinfo->ret.regtype = RegTypeFP;*/
616 case MONO_TYPE_GENERICINST:
617 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
618 cinfo->ret.reg = ARMREG_R0;
622 case MONO_TYPE_VALUETYPE:
624 case MONO_TYPE_TYPEDBYREF:
628 g_error ("Can't handle as return value 0x%x", sig->ret->type);
632 /* align stack size to 8 */
633 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
634 stack_size = (stack_size + 7) & ~7;
636 cinfo->stack_usage = stack_size;
642 * Set var information according to the calling convention. arm version.
643 * The locals var stuff should most likely be split in another method.
646 mono_arch_allocate_vars (MonoCompile *m)
648 MonoMethodSignature *sig;
649 MonoMethodHeader *header;
651 int i, offset, size, align, curinst;
652 int frame_reg = ARMREG_FP;
654 /* FIXME: this will change when we use FP as gcc does */
655 m->flags |= MONO_CFG_HAS_SPILLUP;
657 /* allow room for the vararg method args: void* and long/double */
658 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
659 m->param_area = MAX (m->param_area, sizeof (gpointer)*8);
661 header = mono_method_get_header (m->method);
664 * We use the frame register also for any method that has
665 * exception clauses. This way, when the handlers are called,
666 * the code will reference local variables using the frame reg instead of
667 * the stack pointer: if we had to restore the stack pointer, we'd
668 * corrupt the method frames that are already on the stack (since
669 * filters get called before stack unwinding happens) when the filter
670 * code would call any method (this also applies to finally etc.).
672 if ((m->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
673 frame_reg = ARMREG_FP;
674 m->frame_reg = frame_reg;
675 if (frame_reg != ARMREG_SP) {
676 m->used_int_regs |= 1 << frame_reg;
679 sig = mono_method_signature (m->method);
683 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
684 m->ret->opcode = OP_REGVAR;
685 m->ret->inst_c0 = ARMREG_R0;
687 /* FIXME: handle long and FP values */
688 switch (mono_type_get_underlying_type (sig->ret)->type) {
692 m->ret->opcode = OP_REGVAR;
693 m->ret->inst_c0 = ARMREG_R0;
697 /* local vars are at a positive offset from the stack pointer */
699 * also note that if the function uses alloca, we use FP
700 * to point at the local variables.
702 offset = 0; /* linkage area */
703 /* align the offset to 16 bytes: not sure this is needed here */
705 //offset &= ~(8 - 1);
707 /* add parameter area size for called functions */
708 offset += m->param_area;
711 if (m->flags & MONO_CFG_HAS_FPOUT)
714 /* allow room to save the return value */
715 if (mono_jit_trace_calls != NULL && mono_trace_eval (m->method))
718 /* the MonoLMF structure is stored just below the stack pointer */
720 if (sig->call_convention == MONO_CALL_VARARG) {
724 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
726 offset += sizeof(gpointer) - 1;
727 offset &= ~(sizeof(gpointer) - 1);
728 inst->inst_offset = offset;
729 inst->opcode = OP_REGOFFSET;
730 inst->inst_basereg = frame_reg;
731 offset += sizeof(gpointer);
732 if (sig->call_convention == MONO_CALL_VARARG)
733 m->sig_cookie += sizeof (gpointer);
736 curinst = m->locals_start;
737 for (i = curinst; i < m->num_varinfo; ++i) {
738 inst = m->varinfo [i];
739 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
742 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
743 * pinvoke wrappers when they call functions returning structure */
744 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
745 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
747 size = mono_type_size (inst->inst_vtype, &align);
749 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
750 * since it loads/stores misaligned words, which don't do the right thing.
752 if (align < 4 && size >= 4)
755 offset &= ~(align - 1);
756 inst->inst_offset = offset;
757 inst->opcode = OP_REGOFFSET;
758 inst->inst_basereg = frame_reg;
760 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
765 inst = m->args [curinst];
766 if (inst->opcode != OP_REGVAR) {
767 inst->opcode = OP_REGOFFSET;
768 inst->inst_basereg = frame_reg;
769 offset += sizeof (gpointer) - 1;
770 offset &= ~(sizeof (gpointer) - 1);
771 inst->inst_offset = offset;
772 offset += sizeof (gpointer);
773 if (sig->call_convention == MONO_CALL_VARARG)
774 m->sig_cookie += sizeof (gpointer);
779 for (i = 0; i < sig->param_count; ++i) {
780 inst = m->args [curinst];
781 if (inst->opcode != OP_REGVAR) {
782 inst->opcode = OP_REGOFFSET;
783 inst->inst_basereg = frame_reg;
784 size = mono_type_size (sig->params [i], &align);
785 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
786 * since it loads/stores misaligned words, which don't do the right thing.
788 if (align < 4 && size >= 4)
791 offset &= ~(align - 1);
792 inst->inst_offset = offset;
794 if ((sig->call_convention == MONO_CALL_VARARG) && (i < sig->sentinelpos))
795 m->sig_cookie += size;
800 /* align the offset to 8 bytes */
805 m->stack_offset = offset;
809 /* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
810 * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
814 * take the arguments and generate the arch-specific
815 * instructions to properly call the function in call.
816 * This includes pushing, moving arguments to the right register
818 * Issue: who does the spilling if needed, and when?
821 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
823 MonoMethodSignature *sig;
828 sig = call->signature;
829 n = sig->param_count + sig->hasthis;
831 cinfo = calculate_sizes (sig, sig->pinvoke);
832 if (cinfo->struct_ret)
833 call->used_iregs |= 1 << cinfo->struct_ret;
835 for (i = 0; i < n; ++i) {
836 ainfo = cinfo->args + i;
837 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
839 cfg->disable_aot = TRUE;
841 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
842 sig_arg->inst_p0 = call->signature;
844 MONO_INST_NEW (cfg, arg, OP_OUTARG);
845 arg->inst_imm = cinfo->sig_cookie.offset;
846 arg->inst_left = sig_arg;
848 /* prepend, so they get reversed */
849 arg->next = call->out_args;
850 call->out_args = arg;
852 if (is_virtual && i == 0) {
853 /* the argument will be attached to the call instrucion */
855 call->used_iregs |= 1 << ainfo->reg;
857 MONO_INST_NEW (cfg, arg, OP_OUTARG);
859 arg->cil_code = in->cil_code;
861 arg->inst_right = (MonoInst*)call;
862 arg->type = in->type;
863 /* prepend, we'll need to reverse them later */
864 arg->next = call->out_args;
865 call->out_args = arg;
866 if (ainfo->regtype == RegTypeGeneral) {
867 arg->backend.reg3 = ainfo->reg;
868 call->used_iregs |= 1 << ainfo->reg;
869 if (arg->type == STACK_I8)
870 call->used_iregs |= 1 << (ainfo->reg + 1);
871 if (arg->type == STACK_R8) {
872 if (ainfo->size == 4) {
873 #ifndef MONO_ARCH_SOFT_FLOAT
874 arg->opcode = OP_OUTARG_R4;
877 call->used_iregs |= 1 << (ainfo->reg + 1);
879 cfg->flags |= MONO_CFG_HAS_FPOUT;
881 } else if (ainfo->regtype == RegTypeStructByAddr) {
882 /* FIXME: where si the data allocated? */
883 arg->backend.reg3 = ainfo->reg;
884 call->used_iregs |= 1 << ainfo->reg;
885 g_assert_not_reached ();
886 } else if (ainfo->regtype == RegTypeStructByVal) {
888 /* mark the used regs */
889 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
890 call->used_iregs |= 1 << (ainfo->reg + cur_reg);
892 arg->opcode = OP_OUTARG_VT;
893 /* vtsize and offset have just 12 bits of encoding in number of words */
894 g_assert (((ainfo->vtsize | (ainfo->offset / 4)) & 0xfffff000) == 0);
895 arg->backend.arg_info = ainfo->reg | (ainfo->size << 4) | (ainfo->vtsize << 8) | ((ainfo->offset / 4) << 20);
896 } else if (ainfo->regtype == RegTypeBase) {
897 arg->opcode = OP_OUTARG_MEMBASE;
898 arg->backend.arg_info = (ainfo->offset << 8) | ainfo->size;
899 } else if (ainfo->regtype == RegTypeBaseGen) {
900 call->used_iregs |= 1 << ARMREG_R3;
901 arg->opcode = OP_OUTARG_MEMBASE;
902 arg->backend.arg_info = (ainfo->offset << 8) | 0xff;
903 if (arg->type == STACK_R8)
904 cfg->flags |= MONO_CFG_HAS_FPOUT;
905 } else if (ainfo->regtype == RegTypeFP) {
906 arg->backend.reg3 = ainfo->reg;
907 /* FP args are passed in int regs */
908 call->used_iregs |= 1 << ainfo->reg;
909 if (ainfo->size == 8) {
910 arg->opcode = OP_OUTARG_R8;
911 call->used_iregs |= 1 << (ainfo->reg + 1);
913 arg->opcode = OP_OUTARG_R4;
915 cfg->flags |= MONO_CFG_HAS_FPOUT;
917 g_assert_not_reached ();
922 * Reverse the call->out_args list.
925 MonoInst *prev = NULL, *list = call->out_args, *next;
932 call->out_args = prev;
934 call->stack_usage = cinfo->stack_usage;
935 cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
936 cfg->flags |= MONO_CFG_HAS_CALLS;
938 * should set more info in call, such as the stack space
939 * used by the args that needs to be added back to esp
947 * Allow tracing to work with this interface (with an optional argument)
951 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
955 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
956 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
957 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
958 code = emit_call_reg (code, ARMREG_R2);
971 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
974 int save_mode = SAVE_NONE;
976 MonoMethod *method = cfg->method;
977 int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
978 int save_offset = cfg->param_area;
982 offset = code - cfg->native_code;
983 /* we need about 16 instructions */
984 if (offset > (cfg->code_size - 16 * 4)) {
986 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
987 code = cfg->native_code + offset;
991 /* special case string .ctor icall */
992 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
993 save_mode = SAVE_ONE;
995 save_mode = SAVE_NONE;
999 save_mode = SAVE_TWO;
1003 save_mode = SAVE_FP;
1005 case MONO_TYPE_VALUETYPE:
1006 save_mode = SAVE_STRUCT;
1009 save_mode = SAVE_ONE;
1013 switch (save_mode) {
1015 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1016 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1017 if (enable_arguments) {
1018 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
1019 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1023 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1024 if (enable_arguments) {
1025 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1029 /* FIXME: what reg? */
1030 if (enable_arguments) {
1031 /* FIXME: what reg? */
1035 if (enable_arguments) {
1036 /* FIXME: get the actual address */
1037 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
1045 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1046 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
1047 code = emit_call_reg (code, ARMREG_IP);
1049 switch (save_mode) {
1051 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1052 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
1055 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
1069 * The immediate field for cond branches is big enough for all reasonable methods
1071 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
1072 if (ins->flags & MONO_INST_BRLABEL) { \
1073 if (0 && ins->inst_i0->inst_c0) { \
1074 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_i0->inst_c0) & 0xffffff); \
1076 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1077 ARM_B_COND (code, (condcode), 0); \
1080 if (0 && ins->inst_true_bb->native_offset) { \
1081 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
1083 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1084 ARM_B_COND (code, (condcode), 0); \
1088 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
1090 /* emit an exception if condition is fail
1092 * We assign the extra code used to throw the implicit exceptions
1093 * to cfg->bb_exit as far as the big branch handling is concerned
1095 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
1097 mono_add_patch_info (cfg, code - cfg->native_code, \
1098 MONO_PATCH_INFO_EXC, exc_name); \
1099 ARM_BL_COND (code, (condcode), 0); \
1102 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
1105 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1107 MonoInst *ins, *last_ins = NULL;
1112 switch (ins->opcode) {
1114 /* remove unnecessary multiplication with 1 */
1115 if (ins->inst_imm == 1) {
1116 if (ins->dreg != ins->sreg1) {
1117 ins->opcode = OP_MOVE;
1119 last_ins->next = ins->next;
1124 int power2 = mono_is_power_of_two (ins->inst_imm);
1126 ins->opcode = OP_SHL_IMM;
1127 ins->inst_imm = power2;
1131 case OP_LOAD_MEMBASE:
1132 case OP_LOADI4_MEMBASE:
1134 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1135 * OP_LOAD_MEMBASE offset(basereg), reg
1137 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1138 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1139 ins->inst_basereg == last_ins->inst_destbasereg &&
1140 ins->inst_offset == last_ins->inst_offset) {
1141 if (ins->dreg == last_ins->sreg1) {
1142 last_ins->next = ins->next;
1146 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1147 ins->opcode = OP_MOVE;
1148 ins->sreg1 = last_ins->sreg1;
1152 * Note: reg1 must be different from the basereg in the second load
1153 * OP_LOAD_MEMBASE offset(basereg), reg1
1154 * OP_LOAD_MEMBASE offset(basereg), reg2
1156 * OP_LOAD_MEMBASE offset(basereg), reg1
1157 * OP_MOVE reg1, reg2
1159 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1160 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1161 ins->inst_basereg != last_ins->dreg &&
1162 ins->inst_basereg == last_ins->inst_basereg &&
1163 ins->inst_offset == last_ins->inst_offset) {
1165 if (ins->dreg == last_ins->dreg) {
1166 last_ins->next = ins->next;
1170 ins->opcode = OP_MOVE;
1171 ins->sreg1 = last_ins->dreg;
1174 //g_assert_not_reached ();
1178 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1179 * OP_LOAD_MEMBASE offset(basereg), reg
1181 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1182 * OP_ICONST reg, imm
1184 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1185 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1186 ins->inst_basereg == last_ins->inst_destbasereg &&
1187 ins->inst_offset == last_ins->inst_offset) {
1188 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1189 ins->opcode = OP_ICONST;
1190 ins->inst_c0 = last_ins->inst_imm;
1191 g_assert_not_reached (); // check this rule
1195 case OP_LOADU1_MEMBASE:
1196 case OP_LOADI1_MEMBASE:
1197 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1198 ins->inst_basereg == last_ins->inst_destbasereg &&
1199 ins->inst_offset == last_ins->inst_offset) {
1200 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
1201 ins->sreg1 = last_ins->sreg1;
1204 case OP_LOADU2_MEMBASE:
1205 case OP_LOADI2_MEMBASE:
1206 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1207 ins->inst_basereg == last_ins->inst_destbasereg &&
1208 ins->inst_offset == last_ins->inst_offset) {
1209 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
1210 ins->sreg1 = last_ins->sreg1;
1217 ins->opcode = OP_MOVE;
1221 if (ins->dreg == ins->sreg1) {
1223 last_ins->next = ins->next;
1228 * OP_MOVE sreg, dreg
1229 * OP_MOVE dreg, sreg
1231 if (last_ins && last_ins->opcode == OP_MOVE &&
1232 ins->sreg1 == last_ins->dreg &&
1233 ins->dreg == last_ins->sreg1) {
1234 last_ins->next = ins->next;
1243 bb->last_ins = last_ins;
1247 * the branch_cc_table should maintain the order of these
1261 branch_cc_table [] = {
1277 insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
1281 bb->code = to_insert;
1282 to_insert->next = ins;
1284 to_insert->next = ins->next;
1285 ins->next = to_insert;
1289 #define NEW_INS(cfg,dest,op) do { \
1290 (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
1291 (dest)->opcode = (op); \
1292 insert_after_ins (bb, last_ins, (dest)); \
1296 map_to_reg_reg_op (int op)
1305 case OP_COMPARE_IMM:
1319 case OP_LOAD_MEMBASE:
1320 return OP_LOAD_MEMINDEX;
1321 case OP_LOADI4_MEMBASE:
1322 return OP_LOADI4_MEMINDEX;
1323 case OP_LOADU4_MEMBASE:
1324 return OP_LOADU4_MEMINDEX;
1325 case OP_LOADU1_MEMBASE:
1326 return OP_LOADU1_MEMINDEX;
1327 case OP_LOADI2_MEMBASE:
1328 return OP_LOADI2_MEMINDEX;
1329 case OP_LOADU2_MEMBASE:
1330 return OP_LOADU2_MEMINDEX;
1331 case OP_LOADI1_MEMBASE:
1332 return OP_LOADI1_MEMINDEX;
1333 case OP_STOREI1_MEMBASE_REG:
1334 return OP_STOREI1_MEMINDEX;
1335 case OP_STOREI2_MEMBASE_REG:
1336 return OP_STOREI2_MEMINDEX;
1337 case OP_STOREI4_MEMBASE_REG:
1338 return OP_STOREI4_MEMINDEX;
1339 case OP_STORE_MEMBASE_REG:
1340 return OP_STORE_MEMINDEX;
1341 case OP_STORER4_MEMBASE_REG:
1342 return OP_STORER4_MEMINDEX;
1343 case OP_STORER8_MEMBASE_REG:
1344 return OP_STORER8_MEMINDEX;
1345 case OP_STORE_MEMBASE_IMM:
1346 return OP_STORE_MEMBASE_REG;
1347 case OP_STOREI1_MEMBASE_IMM:
1348 return OP_STOREI1_MEMBASE_REG;
1349 case OP_STOREI2_MEMBASE_IMM:
1350 return OP_STOREI2_MEMBASE_REG;
1351 case OP_STOREI4_MEMBASE_IMM:
1352 return OP_STOREI4_MEMBASE_REG;
1354 g_assert_not_reached ();
1358 * Remove from the instruction list the instructions that can't be
1359 * represented with very simple instructions with no register
1363 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1365 MonoInst *ins, *temp, *last_ins = NULL;
1366 int rot_amount, imm8, low_imm;
1368 /* setup the virtual reg allocator */
1369 if (bb->max_vreg > cfg->rs->next_vreg)
1370 cfg->rs->next_vreg = bb->max_vreg;
1375 switch (ins->opcode) {
1379 case OP_COMPARE_IMM:
1386 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
1387 NEW_INS (cfg, temp, OP_ICONST);
1388 temp->inst_c0 = ins->inst_imm;
1389 temp->dreg = mono_regstate_next_int (cfg->rs);
1390 ins->sreg2 = temp->dreg;
1391 ins->opcode = map_to_reg_reg_op (ins->opcode);
1395 if (ins->inst_imm == 1) {
1396 ins->opcode = OP_MOVE;
1399 if (ins->inst_imm == 0) {
1400 ins->opcode = OP_ICONST;
1404 imm8 = mono_is_power_of_two (ins->inst_imm);
1406 ins->opcode = OP_SHL_IMM;
1407 ins->inst_imm = imm8;
1410 NEW_INS (cfg, temp, OP_ICONST);
1411 temp->inst_c0 = ins->inst_imm;
1412 temp->dreg = mono_regstate_next_int (cfg->rs);
1413 ins->sreg2 = temp->dreg;
1414 ins->opcode = CEE_MUL;
1416 case OP_LOAD_MEMBASE:
1417 case OP_LOADI4_MEMBASE:
1418 case OP_LOADU4_MEMBASE:
1419 case OP_LOADU1_MEMBASE:
1420 /* we can do two things: load the immed in a register
1421 * and use an indexed load, or see if the immed can be
1422 * represented as an ad_imm + a load with a smaller offset
1423 * that fits. We just do the first for now, optimize later.
1425 if (arm_is_imm12 (ins->inst_offset))
1427 NEW_INS (cfg, temp, OP_ICONST);
1428 temp->inst_c0 = ins->inst_offset;
1429 temp->dreg = mono_regstate_next_int (cfg->rs);
1430 ins->sreg2 = temp->dreg;
1431 ins->opcode = map_to_reg_reg_op (ins->opcode);
1433 case OP_LOADI2_MEMBASE:
1434 case OP_LOADU2_MEMBASE:
1435 case OP_LOADI1_MEMBASE:
1436 if (arm_is_imm8 (ins->inst_offset))
1438 NEW_INS (cfg, temp, OP_ICONST);
1439 temp->inst_c0 = ins->inst_offset;
1440 temp->dreg = mono_regstate_next_int (cfg->rs);
1441 ins->sreg2 = temp->dreg;
1442 ins->opcode = map_to_reg_reg_op (ins->opcode);
1444 case OP_LOADR4_MEMBASE:
1445 case OP_LOADR8_MEMBASE:
1446 if (arm_is_fpimm8 (ins->inst_offset))
1448 low_imm = ins->inst_offset & 0x1ff;
1449 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
1450 NEW_INS (cfg, temp, OP_ADD_IMM);
1451 temp->inst_imm = ins->inst_offset & ~0x1ff;
1452 temp->sreg1 = ins->inst_basereg;
1453 temp->dreg = mono_regstate_next_int (cfg->rs);
1454 ins->inst_basereg = temp->dreg;
1455 ins->inst_offset = low_imm;
1458 /* VFP/FPA doesn't have indexed load instructions */
1459 g_assert_not_reached ();
1461 case OP_STORE_MEMBASE_REG:
1462 case OP_STOREI4_MEMBASE_REG:
1463 case OP_STOREI1_MEMBASE_REG:
1464 if (arm_is_imm12 (ins->inst_offset))
1466 NEW_INS (cfg, temp, OP_ICONST);
1467 temp->inst_c0 = ins->inst_offset;
1468 temp->dreg = mono_regstate_next_int (cfg->rs);
1469 ins->sreg2 = temp->dreg;
1470 ins->opcode = map_to_reg_reg_op (ins->opcode);
1472 case OP_STOREI2_MEMBASE_REG:
1473 if (arm_is_imm8 (ins->inst_offset))
1475 NEW_INS (cfg, temp, OP_ICONST);
1476 temp->inst_c0 = ins->inst_offset;
1477 temp->dreg = mono_regstate_next_int (cfg->rs);
1478 ins->sreg2 = temp->dreg;
1479 ins->opcode = map_to_reg_reg_op (ins->opcode);
1481 case OP_STORER4_MEMBASE_REG:
1482 case OP_STORER8_MEMBASE_REG:
1483 if (arm_is_fpimm8 (ins->inst_offset))
1485 low_imm = ins->inst_offset & 0x1ff;
1486 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
1487 NEW_INS (cfg, temp, OP_ADD_IMM);
1488 temp->inst_imm = ins->inst_offset & ~0x1ff;
1489 temp->sreg1 = ins->inst_destbasereg;
1490 temp->dreg = mono_regstate_next_int (cfg->rs);
1491 ins->inst_destbasereg = temp->dreg;
1492 ins->inst_offset = low_imm;
1495 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
1496 /* VFP/FPA doesn't have indexed store instructions */
1497 g_assert_not_reached ();
1499 case OP_STORE_MEMBASE_IMM:
1500 case OP_STOREI1_MEMBASE_IMM:
1501 case OP_STOREI2_MEMBASE_IMM:
1502 case OP_STOREI4_MEMBASE_IMM:
1503 NEW_INS (cfg, temp, OP_ICONST);
1504 temp->inst_c0 = ins->inst_imm;
1505 temp->dreg = mono_regstate_next_int (cfg->rs);
1506 ins->sreg1 = temp->dreg;
1507 ins->opcode = map_to_reg_reg_op (ins->opcode);
1509 goto loop_start; /* make it handle the possibly big ins->inst_offset */
1514 bb->last_ins = last_ins;
1515 bb->max_vreg = cfg->rs->next_vreg;
1520 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1524 mono_arch_lowering_pass (cfg, bb);
1525 mono_local_regalloc (cfg, bb);
1529 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
1531 /* sreg is a float, dreg is an integer reg */
1533 ARM_FIXZ (code, dreg, sreg);
1534 #elif defined(ARM_FPU_VFP)
1536 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
1538 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
1539 ARM_FMRS (code, dreg, ARM_VFP_F0);
1543 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
1544 else if (size == 2) {
1545 ARM_SHL_IMM (code, dreg, dreg, 16);
1546 ARM_SHR_IMM (code, dreg, dreg, 16);
1550 ARM_SHL_IMM (code, dreg, dreg, 24);
1551 ARM_SAR_IMM (code, dreg, dreg, 24);
1552 } else if (size == 2) {
1553 ARM_SHL_IMM (code, dreg, dreg, 16);
1554 ARM_SAR_IMM (code, dreg, dreg, 16);
1562 const guchar *target;
1567 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
1570 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
1571 PatchData *pdata = (PatchData*)user_data;
1572 guchar *code = data;
1573 guint32 *thunks = data;
1574 guint32 *endthunks = (guint32*)(code + bsize);
1576 int difflow, diffhigh;
1578 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
1579 difflow = (char*)pdata->code - (char*)thunks;
1580 diffhigh = (char*)pdata->code - (char*)endthunks;
1581 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
1585 * The thunk is composed of 3 words:
1586 * load constant from thunks [2] into ARM_IP
1589 * Note that the LR register is already setup
1591 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
1592 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
1593 while (thunks < endthunks) {
1594 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
1595 if (thunks [2] == (guint32)pdata->target) {
1596 arm_patch (pdata->code, (guchar*)thunks);
1597 mono_arch_flush_icache (pdata->code, 4);
1600 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
1601 /* found a free slot instead: emit thunk */
1602 code = (guchar*)thunks;
1603 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
1604 if (thumb_supported)
1605 ARM_BX (code, ARMREG_IP);
1607 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
1608 thunks [2] = (guint32)pdata->target;
1609 mono_arch_flush_icache ((guchar*)thunks, 12);
1611 arm_patch (pdata->code, (guchar*)thunks);
1612 mono_arch_flush_icache (pdata->code, 4);
1616 /* skip 12 bytes, the size of the thunk */
1620 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
1626 handle_thunk (int absolute, guchar *code, const guchar *target) {
1627 MonoDomain *domain = mono_domain_get ();
1631 pdata.target = target;
1632 pdata.absolute = absolute;
1635 mono_domain_lock (domain);
1636 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
1639 /* this uses the first available slot */
1641 mono_code_manager_foreach (domain->code_mp, search_thunk_slot, &pdata);
1643 mono_domain_unlock (domain);
1645 if (pdata.found != 1)
1646 g_print ("thunk failed for %p from %p\n", target, code);
1647 g_assert (pdata.found == 1);
1651 arm_patch (guchar *code, const guchar *target)
1653 guint32 *code32 = (void*)code;
1654 guint32 ins = *code32;
1655 guint32 prim = (ins >> 25) & 7;
1656 guint32 tval = GPOINTER_TO_UINT (target);
1658 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1659 if (prim == 5) { /* 101b */
1660 /* the diff starts 8 bytes from the branch opcode */
1661 gint diff = target - code - 8;
1663 gint tmask = 0xffffffff;
1664 if (tval & 1) { /* entering thumb mode */
1665 diff = target - 1 - code - 8;
1666 g_assert (thumb_supported);
1667 tbits = 0xf << 28; /* bl->blx bit pattern */
1668 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
1669 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
1673 tmask = ~(1 << 24); /* clear the link bit */
1674 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
1679 if (diff <= 33554431) {
1681 ins = (ins & 0xff000000) | diff;
1683 *code32 = ins | tbits;
1687 /* diff between 0 and -33554432 */
1688 if (diff >= -33554432) {
1690 ins = (ins & 0xff000000) | (diff & ~0xff000000);
1692 *code32 = ins | tbits;
1697 handle_thunk (TRUE, code, target);
1702 * The alternative call sequences looks like this:
1704 * ldr ip, [pc] // loads the address constant
1705 * b 1f // jumps around the constant
1706 * address constant embedded in the code
1711 * There are two cases for patching:
1712 * a) at the end of method emission: in this case code points to the start
1713 * of the call sequence
1714 * b) during runtime patching of the call site: in this case code points
1715 * to the mov pc, ip instruction
1717 * We have to handle also the thunk jump code sequence:
1721 * address constant // execution never reaches here
1723 if ((ins & 0x0ffffff0) == 0x12fff10) {
1724 /* branch and exchange: the address is constructed in a reg */
1725 g_assert_not_reached ();
1728 guint32 *tmp = ccode;
1729 guint8 *emit = (guint8*)tmp;
1730 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
1731 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
1732 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
1733 ARM_BX (emit, ARMREG_IP);
1734 if (ins == ccode [2]) {
1735 g_assert_not_reached (); // should be -2 ...
1736 code32 [-1] = (guint32)target;
1739 if (ins == ccode [0]) {
1740 /* handles both thunk jump code and the far call sequence */
1741 code32 [2] = (guint32)target;
1744 g_assert_not_reached ();
1746 // g_print ("patched with 0x%08x\n", ins);
1750 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
1751 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
1752 * to be used with the emit macros.
1753 * Return -1 otherwise.
1756 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
1759 for (i = 0; i < 31; i+= 2) {
1760 res = (val << (32 - i)) | (val >> i);
1763 *rot_amount = i? 32 - i: 0;
1770 * Emits in code a sequence of instructions that load the value 'val'
1771 * into the dreg register. Uses at most 4 instructions.
1774 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
1776 int imm8, rot_amount;
1778 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
1779 /* skip the constant pool */
1785 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
1786 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
1787 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
1788 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
1791 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
1793 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
1795 if (val & 0xFF0000) {
1796 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
1798 if (val & 0xFF000000) {
1799 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
1801 } else if (val & 0xFF00) {
1802 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
1803 if (val & 0xFF0000) {
1804 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
1806 if (val & 0xFF000000) {
1807 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
1809 } else if (val & 0xFF0000) {
1810 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
1811 if (val & 0xFF000000) {
1812 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
1815 //g_assert_not_reached ();
1821 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
1826 guint8 *code = cfg->native_code + cfg->code_len;
1827 MonoInst *last_ins = NULL;
1828 guint last_offset = 0;
1830 int imm8, rot_amount;
1832 if (cfg->opt & MONO_OPT_PEEPHOLE)
1833 peephole_pass (cfg, bb);
1835 /* we don't align basic blocks of loops on arm */
1837 if (cfg->verbose_level > 2)
1838 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
1840 cpos = bb->max_offset;
1842 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
1843 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
1844 //g_assert (!mono_compile_aot);
1847 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
1848 /* this is not thread save, but good enough */
1849 /* fixme: howto handle overflows? */
1850 //x86_inc_mem (code, &cov->data [bb->dfn].count);
1855 offset = code - cfg->native_code;
1857 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
1859 if (offset > (cfg->code_size - max_len - 16)) {
1860 cfg->code_size *= 2;
1861 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1862 code = cfg->native_code + offset;
1864 // if (ins->cil_code)
1865 // g_print ("cil code\n");
1866 mono_debug_record_line_number (cfg, ins, offset);
1868 switch (ins->opcode) {
1869 case OP_MEMORY_BARRIER:
1872 g_assert_not_reached ();
1875 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
1876 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
1879 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
1880 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
1882 case OP_STOREI1_MEMBASE_IMM:
1883 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
1884 g_assert (arm_is_imm12 (ins->inst_offset));
1885 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
1887 case OP_STOREI2_MEMBASE_IMM:
1888 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
1889 g_assert (arm_is_imm8 (ins->inst_offset));
1890 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
1892 case OP_STORE_MEMBASE_IMM:
1893 case OP_STOREI4_MEMBASE_IMM:
1894 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
1895 g_assert (arm_is_imm12 (ins->inst_offset));
1896 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
1898 case OP_STOREI1_MEMBASE_REG:
1899 g_assert (arm_is_imm12 (ins->inst_offset));
1900 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
1902 case OP_STOREI2_MEMBASE_REG:
1903 g_assert (arm_is_imm8 (ins->inst_offset));
1904 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
1906 case OP_STORE_MEMBASE_REG:
1907 case OP_STOREI4_MEMBASE_REG:
1908 /* this case is special, since it happens for spill code after lowering has been called */
1909 if (arm_is_imm12 (ins->inst_offset)) {
1910 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
1912 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
1913 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
1916 case OP_STOREI1_MEMINDEX:
1917 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
1919 case OP_STOREI2_MEMINDEX:
1920 /* note: the args are reversed in the macro */
1921 ARM_STRH_REG_REG (code, ins->inst_destbasereg, ins->sreg1, ins->sreg2);
1923 case OP_STORE_MEMINDEX:
1924 case OP_STOREI4_MEMINDEX:
1925 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
1930 g_assert_not_reached ();
1933 g_assert_not_reached ();
1935 case OP_LOAD_MEMINDEX:
1936 case OP_LOADI4_MEMINDEX:
1937 case OP_LOADU4_MEMINDEX:
1938 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
1940 case OP_LOADI1_MEMINDEX:
1941 /* note: the args are reversed in the macro */
1942 ARM_LDRSB_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
1944 case OP_LOADU1_MEMINDEX:
1945 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
1947 case OP_LOADI2_MEMINDEX:
1948 /* note: the args are reversed in the macro */
1949 ARM_LDRSH_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
1951 case OP_LOADU2_MEMINDEX:
1952 /* note: the args are reversed in the macro */
1953 ARM_LDRH_REG_REG (code, ins->inst_basereg, ins->dreg, ins->sreg2);
1955 case OP_LOAD_MEMBASE:
1956 case OP_LOADI4_MEMBASE:
1957 case OP_LOADU4_MEMBASE:
1958 /* this case is special, since it happens for spill code after lowering has been called */
1959 if (arm_is_imm12 (ins->inst_offset)) {
1960 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
1962 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
1963 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
1966 case OP_LOADI1_MEMBASE:
1967 g_assert (arm_is_imm8 (ins->inst_offset));
1968 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
1970 case OP_LOADU1_MEMBASE:
1971 g_assert (arm_is_imm12 (ins->inst_offset));
1972 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
1974 case OP_LOADU2_MEMBASE:
1975 g_assert (arm_is_imm8 (ins->inst_offset));
1976 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
1978 case OP_LOADI2_MEMBASE:
1979 g_assert (arm_is_imm8 (ins->inst_offset));
1980 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
1983 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
1984 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
1987 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
1988 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
1991 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
1994 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
1995 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
1998 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
2000 case OP_COMPARE_IMM:
2001 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2002 g_assert (imm8 >= 0);
2003 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
2006 *(int*)code = 0xe7f001f0;
2007 *(int*)code = 0xef9f0001;
2012 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2015 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2018 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2021 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2022 g_assert (imm8 >= 0);
2023 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2026 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2027 g_assert (imm8 >= 0);
2028 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2031 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2032 g_assert (imm8 >= 0);
2033 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2036 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2037 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2039 case CEE_ADD_OVF_UN:
2040 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2041 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2044 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2045 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2047 case CEE_SUB_OVF_UN:
2048 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2049 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2051 case OP_ADD_OVF_CARRY:
2052 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2053 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2055 case OP_ADD_OVF_UN_CARRY:
2056 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2057 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2059 case OP_SUB_OVF_CARRY:
2060 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2061 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
2063 case OP_SUB_OVF_UN_CARRY:
2064 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2065 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
2068 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2071 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2072 g_assert (imm8 >= 0);
2073 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2076 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2079 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2082 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2083 g_assert (imm8 >= 0);
2084 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2087 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2088 g_assert (imm8 >= 0);
2089 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2091 case OP_ARM_RSBS_IMM:
2092 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2093 g_assert (imm8 >= 0);
2094 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2096 case OP_ARM_RSC_IMM:
2097 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2098 g_assert (imm8 >= 0);
2099 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2102 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2105 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2106 g_assert (imm8 >= 0);
2107 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2115 /* crappy ARM arch doesn't have a DIV instruction */
2116 g_assert_not_reached ();
2118 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2121 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2122 g_assert (imm8 >= 0);
2123 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2126 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2129 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
2130 g_assert (imm8 >= 0);
2131 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
2134 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2138 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2141 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2145 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2149 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
2152 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2155 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
2158 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
2161 if (ins->dreg == ins->sreg2)
2162 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2164 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
2167 g_assert_not_reached ();
2170 /* FIXME: handle ovf/ sreg2 != dreg */
2171 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2173 case CEE_MUL_OVF_UN:
2174 /* FIXME: handle ovf/ sreg2 != dreg */
2175 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
2179 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
2182 g_assert_not_reached ();
2183 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2189 if (ins->dreg != ins->sreg1)
2190 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2193 int saved = ins->sreg2;
2194 if (ins->sreg2 == ARM_LSW_REG) {
2195 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
2198 if (ins->sreg1 != ARM_LSW_REG)
2199 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
2200 if (saved != ARM_MSW_REG)
2201 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
2207 ARM_MVFD (code, ins->dreg, ins->sreg1);
2208 #elif defined(ARM_FPU_VFP)
2209 ARM_CPYD (code, ins->dreg, ins->sreg1);
2212 case OP_FCONV_TO_R4:
2214 ARM_MVFS (code, ins->dreg, ins->sreg1);
2215 #elif defined(ARM_FPU_VFP)
2216 ARM_CVTD (code, ins->dreg, ins->sreg1);
2217 ARM_CVTS (code, ins->dreg, ins->dreg);
2222 * Keep in sync with mono_arch_emit_epilog
2224 g_assert (!cfg->method->save_lmf);
2225 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
2226 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
2227 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2231 /* ensure ins->sreg1 is not NULL */
2232 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
2236 if (ppc_is_imm16 (cfg->sig_cookie + cfg->stack_usage)) {
2237 ppc_addi (code, ppc_r11, cfg->frame_reg, cfg->sig_cookie + cfg->stack_usage);
2239 ppc_load (code, ppc_r11, cfg->sig_cookie + cfg->stack_usage);
2240 ppc_add (code, ppc_r11, cfg->frame_reg, ppc_r11);
2242 ppc_stw (code, ppc_r11, 0, ins->sreg1);
2251 call = (MonoCallInst*)ins;
2252 if (ins->flags & MONO_INST_HAS_METHOD)
2253 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
2255 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
2256 if (cfg->method->dynamic) {
2257 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2259 *(gpointer*)code = NULL;
2261 code = emit_call_reg (code, ARMREG_IP);
2269 case OP_VOIDCALL_REG:
2271 code = emit_call_reg (code, ins->sreg1);
2273 case OP_FCALL_MEMBASE:
2274 case OP_LCALL_MEMBASE:
2275 case OP_VCALL_MEMBASE:
2276 case OP_VOIDCALL_MEMBASE:
2277 case OP_CALL_MEMBASE:
2278 g_assert (arm_is_imm12 (ins->inst_offset));
2279 g_assert (ins->sreg1 != ARMREG_LR);
2280 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
2281 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
2284 g_assert_not_reached ();
2287 /* keep alignment */
2288 int alloca_waste = cfg->param_area;
2291 /* round the size to 8 bytes */
2292 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
2293 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
2294 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
2295 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
2296 /* memzero the area: dreg holds the size, sp is the pointer */
2297 if (ins->flags & MONO_INST_INIT) {
2298 guint8 *start_loop, *branch_to_cond;
2299 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
2300 branch_to_cond = code;
2303 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
2304 arm_patch (branch_to_cond, code);
2305 /* decrement by 4 and set flags */
2306 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
2307 ARM_B_COND (code, ARMCOND_LT, 0);
2308 arm_patch (code - 4, start_loop);
2310 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
2314 g_assert_not_reached ();
2315 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_LR);
2318 if (ins->sreg1 != ARMREG_R0)
2319 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2320 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2321 (gpointer)"mono_arch_throw_exception");
2322 if (cfg->method->dynamic) {
2323 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2325 *(gpointer*)code = NULL;
2327 code = emit_call_reg (code, ARMREG_IP);
2334 if (ins->sreg1 != ARMREG_R0)
2335 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2336 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2337 (gpointer)"mono_arch_rethrow_exception");
2338 if (cfg->method->dynamic) {
2339 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2341 *(gpointer*)code = NULL;
2343 code = emit_call_reg (code, ARMREG_IP);
2349 case OP_START_HANDLER:
2350 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2351 ARM_STR_IMM (code, ARMREG_LR, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2353 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2354 ARM_STR_REG_REG (code, ARMREG_LR, ins->inst_left->inst_basereg, ARMREG_IP);
2358 if (ins->sreg1 != ARMREG_R0)
2359 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
2360 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2361 ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2363 g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
2364 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2365 ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
2367 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2370 if (arm_is_imm12 (ins->inst_left->inst_offset)) {
2371 ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
2373 g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
2374 code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
2375 ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
2377 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2379 case OP_CALL_HANDLER:
2380 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2384 ins->inst_c0 = code - cfg->native_code;
2387 if (ins->flags & MONO_INST_BRLABEL) {
2388 /*if (ins->inst_i0->inst_c0) {
2390 //x86_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
2392 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
2396 /*if (ins->inst_target_bb->native_offset) {
2398 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
2400 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2406 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
2410 * In the normal case we have:
2411 * ldr pc, [pc, ins->sreg1 << 2]
2414 * ldr lr, [pc, ins->sreg1 << 2]
2416 * After follows the data.
2417 * FIXME: add aot support.
2419 max_len += 4 * GPOINTER_TO_INT (ins->klass);
2420 if (offset > (cfg->code_size - max_len - 16)) {
2421 cfg->code_size += max_len;
2422 cfg->code_size *= 2;
2423 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2424 code = cfg->native_code + offset;
2426 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
2428 code += 4 * GPOINTER_TO_INT (ins->klass);
2431 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
2432 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
2435 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2436 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
2439 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2440 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
2443 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2444 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
2447 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2448 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
2450 case OP_COND_EXC_EQ:
2451 case OP_COND_EXC_NE_UN:
2452 case OP_COND_EXC_LT:
2453 case OP_COND_EXC_LT_UN:
2454 case OP_COND_EXC_GT:
2455 case OP_COND_EXC_GT_UN:
2456 case OP_COND_EXC_GE:
2457 case OP_COND_EXC_GE_UN:
2458 case OP_COND_EXC_LE:
2459 case OP_COND_EXC_LE_UN:
2460 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
2463 case OP_COND_EXC_OV:
2464 case OP_COND_EXC_NC:
2465 case OP_COND_EXC_NO:
2466 g_assert_not_reached ();
2478 EMIT_COND_BRANCH (ins, ins->opcode - CEE_BEQ);
2481 /* floating point opcodes */
2484 /* FIXME: we can optimize the imm load by dealing with part of
2485 * the displacement in LDFD (aligning to 512).
2487 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2488 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
2491 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2492 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
2494 case OP_STORER8_MEMBASE_REG:
2495 g_assert (arm_is_fpimm8 (ins->inst_offset));
2496 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2498 case OP_LOADR8_MEMBASE:
2499 g_assert (arm_is_fpimm8 (ins->inst_offset));
2500 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2502 case OP_STORER4_MEMBASE_REG:
2503 g_assert (arm_is_fpimm8 (ins->inst_offset));
2504 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2506 case OP_LOADR4_MEMBASE:
2507 g_assert (arm_is_fpimm8 (ins->inst_offset));
2508 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2510 case CEE_CONV_R_UN: {
2512 tmpreg = ins->dreg == 0? 1: 0;
2513 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
2514 ARM_FLTD (code, ins->dreg, ins->sreg1);
2515 ARM_B_COND (code, ARMCOND_GE, 8);
2516 /* save the temp register */
2517 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
2518 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
2519 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
2520 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
2521 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
2522 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
2523 /* skip the constant pool */
2526 *(int*)code = 0x41f00000;
2531 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
2532 * adfltd fdest, fdest, ftemp
2537 ARM_FLTS (code, ins->dreg, ins->sreg1);
2540 ARM_FLTD (code, ins->dreg, ins->sreg1);
2542 #elif defined(ARM_FPU_VFP)
2544 /* FIXME: we can optimize the imm load by dealing with part of
2545 * the displacement in LDFD (aligning to 512).
2547 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2548 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
2551 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
2552 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
2553 ARM_CVTS (code, ins->dreg, ins->dreg);
2555 case OP_STORER8_MEMBASE_REG:
2556 g_assert (arm_is_fpimm8 (ins->inst_offset));
2557 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2559 case OP_LOADR8_MEMBASE:
2560 g_assert (arm_is_fpimm8 (ins->inst_offset));
2561 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2563 case OP_STORER4_MEMBASE_REG:
2564 g_assert (arm_is_fpimm8 (ins->inst_offset));
2565 ARM_FSTS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
2567 case OP_LOADR4_MEMBASE:
2568 g_assert (arm_is_fpimm8 (ins->inst_offset));
2569 ARM_FLDS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
2571 case CEE_CONV_R_UN: {
2572 g_assert_not_reached ();
2576 g_assert_not_reached ();
2577 //ARM_FLTS (code, ins->dreg, ins->sreg1);
2580 g_assert_not_reached ();
2581 //ARM_FLTD (code, ins->dreg, ins->sreg1);
2584 case OP_FCONV_TO_I1:
2585 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
2587 case OP_FCONV_TO_U1:
2588 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
2590 case OP_FCONV_TO_I2:
2591 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
2593 case OP_FCONV_TO_U2:
2594 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
2596 case OP_FCONV_TO_I4:
2598 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
2600 case OP_FCONV_TO_U4:
2602 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
2604 case OP_FCONV_TO_I8:
2605 case OP_FCONV_TO_U8:
2606 g_assert_not_reached ();
2607 /* Implemented as helper calls */
2609 case OP_LCONV_TO_R_UN:
2610 g_assert_not_reached ();
2611 /* Implemented as helper calls */
2613 case OP_LCONV_TO_OVF_I: {
2615 guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
2616 // Check if its negative
2617 ppc_cmpi (code, 0, 0, ins->sreg1, 0);
2618 negative_branch = code;
2619 ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 0);
2620 // Its positive msword == 0
2621 ppc_cmpi (code, 0, 0, ins->sreg2, 0);
2622 msword_positive_branch = code;
2623 ppc_bc (code, PPC_BR_TRUE, PPC_BR_EQ, 0);
2625 ovf_ex_target = code;
2626 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_ALWAYS, 0, "OverflowException");
2628 ppc_patch (negative_branch, code);
2629 ppc_cmpi (code, 0, 0, ins->sreg2, -1);
2630 msword_negative_branch = code;
2631 ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 0);
2632 ppc_patch (msword_negative_branch, ovf_ex_target);
2634 ppc_patch (msword_positive_branch, code);
2635 if (ins->dreg != ins->sreg1)
2636 ppc_mr (code, ins->dreg, ins->sreg1);
2638 if (ins->dreg != ins->sreg1)
2639 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
2644 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
2647 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
2650 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
2653 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
2656 ARM_MNFD (code, ins->dreg, ins->sreg1);
2658 #elif defined(ARM_FPU_VFP)
2660 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
2663 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
2666 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
2669 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
2672 ARM_NEGD (code, ins->dreg, ins->sreg1);
2677 g_assert_not_reached ();
2680 /* each fp compare op needs to do its own */
2681 g_assert_not_reached ();
2682 //ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2686 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2687 #elif defined(ARM_FPU_VFP)
2688 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2690 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
2691 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
2695 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2696 #elif defined(ARM_FPU_VFP)
2697 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2699 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2700 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
2704 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2705 #elif defined(ARM_FPU_VFP)
2706 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2708 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2709 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
2710 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
2715 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2716 #elif defined(ARM_FPU_VFP)
2717 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2719 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2720 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
2725 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2726 #elif defined(ARM_FPU_VFP)
2727 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2729 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
2730 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
2731 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
2733 /* ARM FPA flags table:
2734 * N Less than ARMCOND_MI
2735 * Z Equal ARMCOND_EQ
2736 * C Greater Than or Equal ARMCOND_CS
2737 * V Unordered ARMCOND_VS
2741 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2742 #elif defined(ARM_FPU_VFP)
2743 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2745 EMIT_COND_BRANCH (ins, CEE_BEQ - CEE_BEQ);
2749 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2750 #elif defined(ARM_FPU_VFP)
2751 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2753 EMIT_COND_BRANCH (ins, CEE_BNE_UN - CEE_BEQ);
2757 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2758 #elif defined(ARM_FPU_VFP)
2759 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2761 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
2765 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2766 #elif defined(ARM_FPU_VFP)
2767 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2769 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
2770 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
2774 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2775 #elif defined(ARM_FPU_VFP)
2776 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2778 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
2782 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2783 #elif defined(ARM_FPU_VFP)
2784 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2786 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
2787 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
2791 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2792 #elif defined(ARM_FPU_VFP)
2793 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2795 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
2799 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
2800 #elif defined(ARM_FPU_VFP)
2801 ARM_CMPD (code, ins->sreg1, ins->sreg2);
2803 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
2804 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
2808 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2809 #elif defined(ARM_FPU_VFP)
2810 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2812 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); /* swapped */
2816 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
2817 #elif defined(ARM_FPU_VFP)
2818 ARM_CMPD (code, ins->sreg2, ins->sreg1);
2820 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
2821 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); /* swapped */
2824 /*ppc_stfd (code, ins->sreg1, -8, ppc_sp);
2825 ppc_lwz (code, ppc_r11, -8, ppc_sp);
2826 ppc_rlwinm (code, ppc_r11, ppc_r11, 0, 1, 31);
2827 ppc_addis (code, ppc_r11, ppc_r11, -32752);
2828 ppc_rlwinmd (code, ppc_r11, ppc_r11, 1, 31, 31);
2829 EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");*/
2830 g_assert_not_reached ();
2834 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
2835 g_assert_not_reached ();
2838 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
2839 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
2840 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
2841 g_assert_not_reached ();
2847 last_offset = offset;
2852 cfg->code_len = code - cfg->native_code;
2856 mono_arch_register_lowlevel_calls (void)
2860 #define patch_lis_ori(ip,val) do {\
2861 guint16 *__lis_ori = (guint16*)(ip); \
2862 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
2863 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
2867 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
2869 MonoJumpInfo *patch_info;
2871 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
2872 unsigned char *ip = patch_info->ip.i + code;
2873 const unsigned char *target;
2875 if (patch_info->type == MONO_PATCH_INFO_SWITCH) {
2876 gpointer *jt = (gpointer*)(ip + 8);
2878 /* jt is the inlined jump table, 2 instructions after ip
2879 * In the normal case we store the absolute addresses,
2880 * otherwise the displacements.
2882 for (i = 0; i < patch_info->data.table->table_size; i++) {
2883 jt [i] = code + (int)patch_info->data.table->table [i];
2887 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
2889 switch (patch_info->type) {
2890 case MONO_PATCH_INFO_IP:
2891 g_assert_not_reached ();
2892 patch_lis_ori (ip, ip);
2894 case MONO_PATCH_INFO_METHOD_REL:
2895 g_assert_not_reached ();
2896 *((gpointer *)(ip)) = code + patch_info->data.offset;
2898 case MONO_PATCH_INFO_METHODCONST:
2899 case MONO_PATCH_INFO_CLASS:
2900 case MONO_PATCH_INFO_IMAGE:
2901 case MONO_PATCH_INFO_FIELD:
2902 case MONO_PATCH_INFO_VTABLE:
2903 case MONO_PATCH_INFO_IID:
2904 case MONO_PATCH_INFO_SFLDA:
2905 case MONO_PATCH_INFO_LDSTR:
2906 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
2907 case MONO_PATCH_INFO_LDTOKEN:
2908 g_assert_not_reached ();
2909 /* from OP_AOTCONST : lis + ori */
2910 patch_lis_ori (ip, target);
2912 case MONO_PATCH_INFO_R4:
2913 case MONO_PATCH_INFO_R8:
2914 g_assert_not_reached ();
2915 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
2917 case MONO_PATCH_INFO_EXC_NAME:
2918 g_assert_not_reached ();
2919 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
2921 case MONO_PATCH_INFO_NONE:
2922 case MONO_PATCH_INFO_BB_OVF:
2923 case MONO_PATCH_INFO_EXC_OVF:
2924 /* everything is dealt with at epilog output time */
2929 arm_patch (ip, target);
2934 * Stack frame layout:
2936 * ------------------- fp
2937 * MonoLMF structure or saved registers
2938 * -------------------
2940 * -------------------
2942 * -------------------
2943 * optional 8 bytes for tracing
2944 * -------------------
2945 * param area size is cfg->param_area
2946 * ------------------- sp
2949 mono_arch_emit_prolog (MonoCompile *cfg)
2951 MonoMethod *method = cfg->method;
2953 MonoMethodSignature *sig;
2955 int alloc_size, pos, max_offset, i, rot_amount;
2962 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
2965 sig = mono_method_signature (method);
2966 cfg->code_size = 256 + sig->param_count * 20;
2967 code = cfg->native_code = g_malloc (cfg->code_size);
2969 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
2971 alloc_size = cfg->stack_offset;
2974 if (!method->save_lmf) {
2975 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
2976 prev_sp_offset = 8; /* ip and lr */
2977 for (i = 0; i < 16; ++i) {
2978 if (cfg->used_int_regs & (1 << i))
2979 prev_sp_offset += 4;
2982 ARM_PUSH (code, 0x5ff0);
2983 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
2984 pos += sizeof (MonoLMF) - prev_sp_offset;
2988 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
2989 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
2990 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
2991 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
2994 /* the stack used in the pushed regs */
2995 if (prev_sp_offset & 4)
2997 cfg->stack_usage = alloc_size;
2999 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
3000 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
3002 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
3003 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3006 if (cfg->frame_reg != ARMREG_SP)
3007 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
3008 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
3009 prev_sp_offset += alloc_size;
3011 /* compute max_offset in order to use short forward jumps
3012 * we could skip do it on arm because the immediate displacement
3013 * for jumps is large enough, it may be useful later for constant pools
3016 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
3017 MonoInst *ins = bb->code;
3018 bb->max_offset = max_offset;
3020 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
3024 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3029 /* load arguments allocated to register from the stack */
3032 cinfo = calculate_sizes (sig, sig->pinvoke);
3034 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
3035 ArgInfo *ainfo = &cinfo->ret;
3037 g_assert (arm_is_imm12 (inst->inst_offset));
3038 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3040 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3041 ArgInfo *ainfo = cinfo->args + i;
3042 inst = cfg->args [pos];
3044 if (cfg->verbose_level > 2)
3045 g_print ("Saving argument %d (type: %d)\n", i, ainfo->regtype);
3046 if (inst->opcode == OP_REGVAR) {
3047 if (ainfo->regtype == RegTypeGeneral)
3048 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3049 else if (ainfo->regtype == RegTypeFP) {
3050 g_assert_not_reached ();
3051 } else if (ainfo->regtype == RegTypeBase) {
3052 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3053 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3055 g_assert_not_reached ();
3057 if (cfg->verbose_level > 2)
3058 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
3060 /* the argument should be put on the stack: FIXME handle size != word */
3061 if (ainfo->regtype == RegTypeGeneral) {
3062 switch (ainfo->size) {
3064 if (arm_is_imm12 (inst->inst_offset))
3065 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3067 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3068 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3072 if (arm_is_imm8 (inst->inst_offset)) {
3073 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3075 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3076 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
3077 ARM_STRH_IMM (code, ainfo->reg, ARMREG_IP, 0);
3081 g_assert (arm_is_imm12 (inst->inst_offset));
3082 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3083 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3084 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3087 if (arm_is_imm12 (inst->inst_offset)) {
3088 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3090 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3091 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3095 } else if (ainfo->regtype == RegTypeBaseGen) {
3096 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3097 g_assert (arm_is_imm12 (inst->inst_offset));
3098 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3099 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3100 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
3101 } else if (ainfo->regtype == RegTypeBase) {
3102 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
3103 switch (ainfo->size) {
3105 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3106 g_assert (arm_is_imm12 (inst->inst_offset));
3107 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3110 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3111 if (arm_is_imm8 (inst->inst_offset)) {
3112 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3114 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3115 ARM_ADD_REG_REG (code, ARMREG_IP, ARMREG_IP, inst->inst_basereg);
3116 ARM_STRH_IMM (code, ARMREG_LR, ARMREG_IP, 0);
3120 g_assert (arm_is_imm12 (inst->inst_offset));
3121 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3122 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3123 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4));
3124 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3125 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
3126 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
3129 g_assert (arm_is_imm12 (inst->inst_offset));
3130 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3131 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
3134 } else if (ainfo->regtype == RegTypeFP) {
3135 g_assert_not_reached ();
3136 } else if (ainfo->regtype == RegTypeStructByVal) {
3137 int doffset = inst->inst_offset;
3141 if (mono_class_from_mono_type (inst->inst_vtype))
3142 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3143 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3144 g_assert (arm_is_imm12 (doffset));
3145 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3146 soffset += sizeof (gpointer);
3147 doffset += sizeof (gpointer);
3149 if (ainfo->vtsize) {
3150 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3151 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
3152 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
3154 } else if (ainfo->regtype == RegTypeStructByAddr) {
3155 g_assert_not_reached ();
3156 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
3157 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
3159 g_assert_not_reached ();
3164 if (method->save_lmf) {
3166 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3167 (gpointer)"mono_get_lmf_addr");
3168 if (cfg->method->dynamic) {
3169 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3171 *(gpointer*)code = NULL;
3173 code = emit_call_reg (code, ARMREG_IP);
3177 /* we build the MonoLMF structure on the stack - see mini-arm.h */
3178 /* lmf_offset is the offset from the previous stack pointer,
3179 * alloc_size is the total stack space allocated, so the offset
3180 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
3181 * The pointer to the struct is put in r1 (new_lmf).
3182 * r2 is used as scratch
3183 * The callee-saved registers are already in the MonoLMF structure
3185 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
3186 /* r0 is the result from mono_get_lmf_addr () */
3187 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3188 /* new_lmf->previous_lmf = *lmf_addr */
3189 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3190 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3191 /* *(lmf_addr) = r1 */
3192 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3193 /* save method info */
3194 code = mono_arm_emit_load_imm (code, ARMREG_R2, GPOINTER_TO_INT (method));
3195 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, method));
3196 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
3197 /* save the current IP */
3198 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
3199 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
3203 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3205 cfg->code_len = code - cfg->native_code;
3206 g_assert (cfg->code_len < cfg->code_size);
3213 mono_arch_emit_epilog (MonoCompile *cfg)
3215 MonoMethod *method = cfg->method;
3216 int pos, i, rot_amount;
3217 int max_epilog_size = 16 + 20*4;
3220 if (cfg->method->save_lmf)
3221 max_epilog_size += 128;
3223 if (mono_jit_trace_calls != NULL)
3224 max_epilog_size += 50;
3226 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3227 max_epilog_size += 50;
3229 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3230 cfg->code_size *= 2;
3231 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3232 mono_jit_stats.code_reallocs++;
3236 * Keep in sync with OP_JMP
3238 code = cfg->native_code + cfg->code_len;
3240 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
3241 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3245 if (method->save_lmf) {
3247 /* all but r0-r3, sp and pc */
3248 pos += sizeof (MonoLMF) - (4 * 10);
3250 /* r2 contains the pointer to the current LMF */
3251 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
3252 /* ip = previous_lmf */
3253 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3255 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
3256 /* *(lmf_addr) = previous_lmf */
3257 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
3258 /* FIXME: speedup: there is no actual need to restore the registers if
3259 * we didn't actually change them (idea from Zoltan).
3262 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
3263 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
3264 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
3266 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
3267 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
3269 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
3270 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
3272 /* FIXME: add v4 thumb interworking support */
3273 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
3276 cfg->code_len = code - cfg->native_code;
3278 g_assert (cfg->code_len < cfg->code_size);
3282 /* remove once throw_exception_by_name is eliminated */
3284 exception_id_by_name (const char *name)
3286 if (strcmp (name, "IndexOutOfRangeException") == 0)
3287 return MONO_EXC_INDEX_OUT_OF_RANGE;
3288 if (strcmp (name, "OverflowException") == 0)
3289 return MONO_EXC_OVERFLOW;
3290 if (strcmp (name, "ArithmeticException") == 0)
3291 return MONO_EXC_ARITHMETIC;
3292 if (strcmp (name, "DivideByZeroException") == 0)
3293 return MONO_EXC_DIVIDE_BY_ZERO;
3294 if (strcmp (name, "InvalidCastException") == 0)
3295 return MONO_EXC_INVALID_CAST;
3296 if (strcmp (name, "NullReferenceException") == 0)
3297 return MONO_EXC_NULL_REF;
3298 if (strcmp (name, "ArrayTypeMismatchException") == 0)
3299 return MONO_EXC_ARRAY_TYPE_MISMATCH;
3300 g_error ("Unknown intrinsic exception %s\n", name);
3305 mono_arch_emit_exceptions (MonoCompile *cfg)
3307 MonoJumpInfo *patch_info;
3310 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
3311 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
3312 int max_epilog_size = 50;
3314 /* count the number of exception infos */
3317 * make sure we have enough space for exceptions
3318 * 12 is the simulated call to throw_exception_by_name
3320 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3321 if (patch_info->type == MONO_PATCH_INFO_EXC) {
3322 i = exception_id_by_name (patch_info->data.target);
3323 if (!exc_throw_found [i]) {
3324 max_epilog_size += 12;
3325 exc_throw_found [i] = TRUE;
3330 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3331 cfg->code_size *= 2;
3332 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3333 mono_jit_stats.code_reallocs++;
3336 code = cfg->native_code + cfg->code_len;
3338 /* add code to raise exceptions */
3339 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3340 switch (patch_info->type) {
3341 case MONO_PATCH_INFO_EXC: {
3342 unsigned char *ip = patch_info->ip.i + cfg->native_code;
3343 const char *ex_name = patch_info->data.target;
3344 i = exception_id_by_name (patch_info->data.target);
3345 if (exc_throw_pos [i]) {
3346 arm_patch (ip, exc_throw_pos [i]);
3347 patch_info->type = MONO_PATCH_INFO_NONE;
3350 exc_throw_pos [i] = code;
3352 arm_patch (ip, code);
3353 //*(int*)code = 0xef9f0001;
3355 /*mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);*/
3356 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
3357 /* we got here from a conditional call, so the calling ip is set in lr already */
3358 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
3359 patch_info->data.name = "mono_arch_throw_exception_by_name";
3360 patch_info->ip.i = code - cfg->native_code;
3362 *(gconstpointer*)code = ex_name;
3372 cfg->code_len = code - cfg->native_code;
3374 g_assert (cfg->code_len < cfg->code_size);
3379 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
3384 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
3389 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
3392 int this_dreg = ARMREG_R0;
3395 this_dreg = ARMREG_R1;
3397 /* add the this argument */
3398 if (this_reg != -1) {
3400 MONO_INST_NEW (cfg, this, OP_SETREG);
3401 this->type = this_type;
3402 this->sreg1 = this_reg;
3403 this->dreg = mono_regstate_next_int (cfg->rs);
3404 mono_bblock_add_inst (cfg->cbb, this);
3405 mono_call_inst_add_outarg_reg (cfg, inst, this->dreg, this_dreg, FALSE);
3410 MONO_INST_NEW (cfg, vtarg, OP_SETREG);
3411 vtarg->type = STACK_MP;
3412 vtarg->sreg1 = vt_reg;
3413 vtarg->dreg = mono_regstate_next_int (cfg->rs);
3414 mono_bblock_add_inst (cfg->cbb, vtarg);
3415 mono_call_inst_add_outarg_reg (cfg, inst, vtarg->dreg, ARMREG_R0, FALSE);
3420 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3422 MonoInst *ins = NULL;
3423 if (cmethod->klass == mono_defaults.thread_class &&
3424 strcmp (cmethod->name, "MemoryBarrier") == 0) {
3425 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
3431 mono_arch_print_tree (MonoInst *tree, int arity)
3436 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
3442 mono_arch_get_thread_intrinsic (MonoCompile* cfg)
3448 mono_arch_flush_register_windows (void)
3453 mono_arch_fixup_jinfo (MonoCompile *cfg)
3455 /* max encoded stack usage is 64KB * 4 */
3456 g_assert ((cfg->stack_usage & ~(0xffff << 2)) == 0);
3457 cfg->jit_info->used_regs |= cfg->stack_usage << 14;