2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
140 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
142 int imm8, rot_amount;
143 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
144 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
147 g_assert (dreg != sreg);
148 code = mono_arm_emit_load_imm (code, dreg, imm);
149 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
154 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
156 /* we can use r0-r3, since this is called only for incoming args on the stack */
157 if (size > sizeof (gpointer) * 4) {
159 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
160 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
161 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
162 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
163 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
164 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
165 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
166 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
167 ARM_B_COND (code, ARMCOND_NE, 0);
168 arm_patch (code - 4, start_loop);
171 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
172 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
174 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
175 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
181 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
182 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
183 doffset = soffset = 0;
185 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
186 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
192 g_assert (size == 0);
197 emit_call_reg (guint8 *code, int reg)
200 ARM_BLX_REG (code, reg);
202 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
206 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
212 emit_call_seq (MonoCompile *cfg, guint8 *code)
214 if (cfg->method->dynamic) {
215 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
217 *(gpointer*)code = NULL;
219 code = emit_call_reg (code, ARMREG_IP);
227 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
229 switch (ins->opcode) {
232 case OP_FCALL_MEMBASE:
234 if (ins->dreg != ARM_FPA_F0)
235 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
236 #elif defined(ARM_FPU_VFP)
237 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
238 ARM_FMSR (code, ins->dreg, ARMREG_R0);
239 ARM_CVTS (code, ins->dreg, ins->dreg);
241 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
251 * mono_arch_get_argument_info:
252 * @csig: a method signature
253 * @param_count: the number of parameters to consider
254 * @arg_info: an array to store the result infos
256 * Gathers information on parameters such as size, alignment and
257 * padding. arg_info should be large enought to hold param_count + 1 entries.
259 * Returns the size of the activation frame.
262 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
264 int k, frame_size = 0;
265 guint32 size, align, pad;
268 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
269 frame_size += sizeof (gpointer);
273 arg_info [0].offset = offset;
276 frame_size += sizeof (gpointer);
280 arg_info [0].size = frame_size;
282 for (k = 0; k < param_count; k++) {
283 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
285 /* ignore alignment for now */
288 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
289 arg_info [k].pad = pad;
291 arg_info [k + 1].pad = 0;
292 arg_info [k + 1].size = size;
294 arg_info [k + 1].offset = offset;
298 align = MONO_ARCH_FRAME_ALIGNMENT;
299 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
300 arg_info [k].pad = pad;
306 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
310 reg = (ldr >> 16 ) & 0xf;
311 offset = ldr & 0xfff;
312 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
314 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
315 o = (gpointer)regs [reg];
317 *displacement = offset;
322 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
324 guint32* code = (guint32*)code_ptr;
326 /* Locate the address of the method-specific trampoline. The call using
327 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
328 looks something like this:
337 The call sequence could be also:
340 function pointer literal
344 Note that on ARM5+ we can use one instruction instead of the last two.
345 Therefore, we need to locate the 'ldr rA' instruction to know which
346 register was used to hold the method addrs.
349 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
352 /* Three possible code sequences can happen here:
356 * ldr pc, [rX - #offset]
362 * ldr pc, [rX - #offset]
364 * direct branch with bl:
368 * direct branch with mov:
372 * We only need to identify interface and virtual calls, the others can be ignored.
375 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
376 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
378 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
379 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
384 #define MAX_ARCH_DELEGATE_PARAMS 3
387 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
389 guint8 *code, *start;
392 start = code = mono_global_codeman_reserve (12);
394 /* Replace the this argument with the target */
395 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
396 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
397 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
399 g_assert ((code - start) <= 12);
401 mono_arch_flush_icache (start, 12);
405 size = 8 + param_count * 4;
406 start = code = mono_global_codeman_reserve (size);
408 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
409 /* slide down the arguments */
410 for (i = 0; i < param_count; ++i) {
411 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
413 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
415 g_assert ((code - start) <= size);
417 mono_arch_flush_icache (start, size);
421 *code_size = code - start;
427 * mono_arch_get_delegate_invoke_impls:
429 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
433 mono_arch_get_delegate_invoke_impls (void)
440 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
441 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
443 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
444 code = get_delegate_invoke_impl (FALSE, i, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
452 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
454 guint8 *code, *start;
456 /* FIXME: Support more cases */
457 if (MONO_TYPE_ISSTRUCT (sig->ret))
461 static guint8* cached = NULL;
462 mono_mini_arch_lock ();
464 mono_mini_arch_unlock ();
469 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
471 start = get_delegate_invoke_impl (TRUE, 0, NULL);
473 mono_mini_arch_unlock ();
476 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
479 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
481 for (i = 0; i < sig->param_count; ++i)
482 if (!mono_is_regsize_var (sig->params [i]))
485 mono_mini_arch_lock ();
486 code = cache [sig->param_count];
488 mono_mini_arch_unlock ();
493 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
494 start = mono_aot_get_named_code (name);
497 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
499 cache [sig->param_count] = start;
500 mono_mini_arch_unlock ();
508 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
510 /* FIXME: handle returning a struct */
511 if (MONO_TYPE_ISSTRUCT (sig->ret))
512 return (gpointer)regs [ARMREG_R1];
513 return (gpointer)regs [ARMREG_R0];
517 * Initialize the cpu to execute managed code.
520 mono_arch_cpu_init (void)
525 * Initialize architecture specific code.
528 mono_arch_init (void)
530 InitializeCriticalSection (&mini_arch_mutex);
532 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
533 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
534 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
538 * Cleanup architecture specific code.
541 mono_arch_cleanup (void)
546 * This function returns the optimizations supported on this cpu.
549 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
553 thumb_supported = TRUE;
558 FILE *file = fopen ("/proc/cpuinfo", "r");
560 while ((line = fgets (buf, 512, file))) {
561 if (strncmp (line, "Processor", 9) == 0) {
562 char *ver = strstr (line, "(v");
563 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
565 if (ver && (ver [2] == '7'))
569 if (strncmp (line, "Features", 8) == 0) {
570 char *th = strstr (line, "thumb");
572 thumb_supported = TRUE;
580 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
584 /* no arm-specific optimizations yet */
590 is_regsize_var (MonoType *t) {
593 t = mini_type_get_underlying_type (NULL, t);
600 case MONO_TYPE_FNPTR:
602 case MONO_TYPE_OBJECT:
603 case MONO_TYPE_STRING:
604 case MONO_TYPE_CLASS:
605 case MONO_TYPE_SZARRAY:
606 case MONO_TYPE_ARRAY:
608 case MONO_TYPE_GENERICINST:
609 if (!mono_type_generic_inst_is_valuetype (t))
612 case MONO_TYPE_VALUETYPE:
619 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
624 for (i = 0; i < cfg->num_varinfo; i++) {
625 MonoInst *ins = cfg->varinfo [i];
626 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
629 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
632 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
635 /* we can only allocate 32 bit values */
636 if (is_regsize_var (ins->inst_vtype)) {
637 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
638 g_assert (i == vmv->idx);
639 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
646 #define USE_EXTRA_TEMPS 0
649 mono_arch_get_global_int_regs (MonoCompile *cfg)
654 * FIXME: Interface calls might go through a static rgctx trampoline which
655 * sets V5, but it doesn't save it, so we need to save it ourselves, and
658 if (cfg->flags & MONO_CFG_HAS_CALLS)
659 cfg->uses_rgctx_reg = TRUE;
661 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
662 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
663 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
664 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
665 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
666 /* V5 is reserved for passing the vtable/rgctx/IMT method */
667 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
668 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
669 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
675 * mono_arch_regalloc_cost:
677 * Return the cost, in number of memory references, of the action of
678 * allocating the variable VMV into a register during global register
682 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
688 #ifndef __GNUC_PREREQ
689 #define __GNUC_PREREQ(maj, min) (0)
693 mono_arch_flush_icache (guint8 *code, gint size)
696 sys_icache_invalidate (code, size);
697 #elif __GNUC_PREREQ(4, 1)
698 __clear_cache (code, code + size);
699 #elif defined(PLATFORM_ANDROID)
700 const int syscall = 0xf0002;
708 : "r" (code), "r" (code + size), "r" (syscall)
709 : "r0", "r1", "r7", "r2"
712 __asm __volatile ("mov r0, %0\n"
715 "swi 0x9f0002 @ sys_cacheflush"
717 : "r" (code), "r" (code + size), "r" (0)
718 : "r0", "r1", "r3" );
735 guint16 vtsize; /* in param area */
738 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
745 gboolean vtype_retaddr;
754 /*#define __alignof__(a) sizeof(a)*/
755 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
761 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
764 if (*gr > ARMREG_R3) {
765 ainfo->offset = *stack_size;
766 ainfo->reg = ARMREG_SP; /* in the caller */
767 ainfo->storage = RegTypeBase;
770 ainfo->storage = RegTypeGeneral;
774 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
777 int i8_align = __alignof__ (gint64);
781 gboolean split = i8_align == 4;
783 gboolean split = TRUE;
786 if (*gr == ARMREG_R3 && split) {
787 /* first word in r3 and the second on the stack */
788 ainfo->offset = *stack_size;
789 ainfo->reg = ARMREG_SP; /* in the caller */
790 ainfo->storage = RegTypeBaseGen;
792 } else if (*gr >= ARMREG_R3) {
794 /* darwin aligns longs to 4 byte only */
800 ainfo->offset = *stack_size;
801 ainfo->reg = ARMREG_SP; /* in the caller */
802 ainfo->storage = RegTypeBase;
806 if (i8_align == 8 && ((*gr) & 1))
809 ainfo->storage = RegTypeIRegPair;
818 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
821 int n = sig->hasthis + sig->param_count;
822 MonoType *simpletype;
823 guint32 stack_size = 0;
827 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
829 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
834 /* FIXME: handle returning a struct */
835 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
838 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
839 cinfo->ret.storage = RegTypeStructByVal;
841 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
842 cinfo->struct_ret = ARMREG_R0;
843 cinfo->vtype_retaddr = TRUE;
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
852 DEBUG(printf("params: %d\n", sig->param_count));
853 for (i = 0; i < sig->param_count; ++i) {
854 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
855 /* Prevent implicit arguments and sig_cookie from
856 being passed in registers */
858 /* Emit the signature cookie just before the implicit arguments */
859 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
861 DEBUG(printf("param %d: ", i));
862 if (sig->params [i]->byref) {
863 DEBUG(printf("byref\n"));
864 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
868 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
869 switch (simpletype->type) {
870 case MONO_TYPE_BOOLEAN:
873 cinfo->args [n].size = 1;
874 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
880 cinfo->args [n].size = 2;
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
886 cinfo->args [n].size = 4;
887 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
893 case MONO_TYPE_FNPTR:
894 case MONO_TYPE_CLASS:
895 case MONO_TYPE_OBJECT:
896 case MONO_TYPE_STRING:
897 case MONO_TYPE_SZARRAY:
898 case MONO_TYPE_ARRAY:
900 cinfo->args [n].size = sizeof (gpointer);
901 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
904 case MONO_TYPE_GENERICINST:
905 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
906 cinfo->args [n].size = sizeof (gpointer);
907 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
912 case MONO_TYPE_TYPEDBYREF:
913 case MONO_TYPE_VALUETYPE: {
919 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
920 size = sizeof (MonoTypedRef);
921 align = sizeof (gpointer);
923 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
925 size = mono_class_native_size (klass, &align);
927 size = mono_class_value_size (klass, &align);
929 DEBUG(printf ("load %d bytes struct\n",
930 mono_class_native_size (sig->params [i]->data.klass, NULL)));
933 align_size += (sizeof (gpointer) - 1);
934 align_size &= ~(sizeof (gpointer) - 1);
935 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
936 cinfo->args [n].storage = RegTypeStructByVal;
937 /* FIXME: align stack_size if needed */
939 if (align >= 8 && (gr & 1))
942 if (gr > ARMREG_R3) {
943 cinfo->args [n].size = 0;
944 cinfo->args [n].vtsize = nwords;
946 int rest = ARMREG_R3 - gr + 1;
947 int n_in_regs = rest >= nwords? nwords: rest;
949 cinfo->args [n].size = n_in_regs;
950 cinfo->args [n].vtsize = nwords - n_in_regs;
951 cinfo->args [n].reg = gr;
955 cinfo->args [n].offset = stack_size;
956 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
957 stack_size += nwords * sizeof (gpointer);
964 cinfo->args [n].size = 8;
965 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
969 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
973 /* Handle the case where there are no implicit arguments */
974 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
975 /* Prevent implicit arguments and sig_cookie from
976 being passed in registers */
978 /* Emit the signature cookie just before the implicit arguments */
979 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
983 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
984 switch (simpletype->type) {
985 case MONO_TYPE_BOOLEAN:
996 case MONO_TYPE_FNPTR:
997 case MONO_TYPE_CLASS:
998 case MONO_TYPE_OBJECT:
999 case MONO_TYPE_SZARRAY:
1000 case MONO_TYPE_ARRAY:
1001 case MONO_TYPE_STRING:
1002 cinfo->ret.storage = RegTypeGeneral;
1003 cinfo->ret.reg = ARMREG_R0;
1007 cinfo->ret.storage = RegTypeIRegPair;
1008 cinfo->ret.reg = ARMREG_R0;
1012 cinfo->ret.storage = RegTypeFP;
1013 cinfo->ret.reg = ARMREG_R0;
1014 /* FIXME: cinfo->ret.reg = ???;
1015 cinfo->ret.storage = RegTypeFP;*/
1017 case MONO_TYPE_GENERICINST:
1018 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1024 case MONO_TYPE_VALUETYPE:
1025 case MONO_TYPE_TYPEDBYREF:
1026 if (cinfo->ret.storage != RegTypeStructByVal)
1027 cinfo->ret.storage = RegTypeStructByAddr;
1029 case MONO_TYPE_VOID:
1032 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1036 /* align stack size to 8 */
1037 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1038 stack_size = (stack_size + 7) & ~7;
1040 cinfo->stack_usage = stack_size;
1046 * Set var information according to the calling convention. arm version.
1047 * The locals var stuff should most likely be split in another method.
1050 mono_arch_allocate_vars (MonoCompile *cfg)
1052 MonoMethodSignature *sig;
1053 MonoMethodHeader *header;
1055 int i, offset, size, align, curinst;
1056 int frame_reg = ARMREG_FP;
1060 sig = mono_method_signature (cfg->method);
1062 if (!cfg->arch.cinfo)
1063 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1064 cinfo = cfg->arch.cinfo;
1066 /* FIXME: this will change when we use FP as gcc does */
1067 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1069 /* allow room for the vararg method args: void* and long/double */
1070 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1071 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1073 header = mono_method_get_header (cfg->method);
1076 * We use the frame register also for any method that has
1077 * exception clauses. This way, when the handlers are called,
1078 * the code will reference local variables using the frame reg instead of
1079 * the stack pointer: if we had to restore the stack pointer, we'd
1080 * corrupt the method frames that are already on the stack (since
1081 * filters get called before stack unwinding happens) when the filter
1082 * code would call any method (this also applies to finally etc.).
1084 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1085 frame_reg = ARMREG_FP;
1086 cfg->frame_reg = frame_reg;
1087 if (frame_reg != ARMREG_SP) {
1088 cfg->used_int_regs |= 1 << frame_reg;
1091 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1092 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1093 cfg->used_int_regs |= (1 << ARMREG_V5);
1097 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1098 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1099 case MONO_TYPE_VOID:
1102 cfg->ret->opcode = OP_REGVAR;
1103 cfg->ret->inst_c0 = ARMREG_R0;
1107 /* local vars are at a positive offset from the stack pointer */
1109 * also note that if the function uses alloca, we use FP
1110 * to point at the local variables.
1112 offset = 0; /* linkage area */
1113 /* align the offset to 16 bytes: not sure this is needed here */
1115 //offset &= ~(8 - 1);
1117 /* add parameter area size for called functions */
1118 offset += cfg->param_area;
1121 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1124 /* allow room to save the return value */
1125 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1128 /* the MonoLMF structure is stored just below the stack pointer */
1129 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1130 if (cinfo->ret.storage == RegTypeStructByVal) {
1131 cfg->ret->opcode = OP_REGOFFSET;
1132 cfg->ret->inst_basereg = cfg->frame_reg;
1133 offset += sizeof (gpointer) - 1;
1134 offset &= ~(sizeof (gpointer) - 1);
1135 cfg->ret->inst_offset = - offset;
1137 inst = cfg->vret_addr;
1138 offset += sizeof(gpointer) - 1;
1139 offset &= ~(sizeof(gpointer) - 1);
1140 inst->inst_offset = offset;
1141 inst->opcode = OP_REGOFFSET;
1142 inst->inst_basereg = frame_reg;
1143 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1144 printf ("vret_addr =");
1145 mono_print_ins (cfg->vret_addr);
1148 offset += sizeof(gpointer);
1151 curinst = cfg->locals_start;
1152 for (i = curinst; i < cfg->num_varinfo; ++i) {
1153 inst = cfg->varinfo [i];
1154 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1157 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1158 * pinvoke wrappers when they call functions returning structure */
1159 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1160 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1164 size = mono_type_size (inst->inst_vtype, &align);
1166 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1167 * since it loads/stores misaligned words, which don't do the right thing.
1169 if (align < 4 && size >= 4)
1171 offset += align - 1;
1172 offset &= ~(align - 1);
1173 inst->inst_offset = offset;
1174 inst->opcode = OP_REGOFFSET;
1175 inst->inst_basereg = frame_reg;
1177 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1182 inst = cfg->args [curinst];
1183 if (inst->opcode != OP_REGVAR) {
1184 inst->opcode = OP_REGOFFSET;
1185 inst->inst_basereg = frame_reg;
1186 offset += sizeof (gpointer) - 1;
1187 offset &= ~(sizeof (gpointer) - 1);
1188 inst->inst_offset = offset;
1189 offset += sizeof (gpointer);
1194 if (sig->call_convention == MONO_CALL_VARARG) {
1198 /* Allocate a local slot to hold the sig cookie address */
1199 offset += align - 1;
1200 offset &= ~(align - 1);
1201 cfg->sig_cookie = offset;
1205 for (i = 0; i < sig->param_count; ++i) {
1206 inst = cfg->args [curinst];
1208 if (inst->opcode != OP_REGVAR) {
1209 inst->opcode = OP_REGOFFSET;
1210 inst->inst_basereg = frame_reg;
1211 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align < 4 && size >= 4)
1218 /* The code in the prolog () stores words when storing vtypes received in a register */
1219 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1221 offset += align - 1;
1222 offset &= ~(align - 1);
1223 inst->inst_offset = offset;
1229 /* align the offset to 8 bytes */
1234 cfg->stack_offset = offset;
1238 mono_arch_create_vars (MonoCompile *cfg)
1240 MonoMethodSignature *sig;
1243 sig = mono_method_signature (cfg->method);
1245 if (!cfg->arch.cinfo)
1246 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1247 cinfo = cfg->arch.cinfo;
1249 if (cinfo->ret.storage == RegTypeStructByVal)
1250 cfg->ret_var_is_local = TRUE;
1252 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1253 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1254 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1255 printf ("vret_addr = ");
1256 mono_print_ins (cfg->vret_addr);
1260 if (cfg->gen_seq_points && cfg->compile_aot) {
1261 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1262 ins->flags |= MONO_INST_VOLATILE;
1263 cfg->arch.seq_point_info_var = ins;
1265 /* Allocate a separate variable for this to save 1 load per seq point */
1266 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1267 ins->flags |= MONO_INST_VOLATILE;
1268 cfg->arch.ss_trigger_page_var = ins;
1273 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1275 MonoMethodSignature *tmp_sig;
1278 if (call->tail_call)
1281 /* FIXME: Add support for signature tokens to AOT */
1282 cfg->disable_aot = TRUE;
1284 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1287 * mono_ArgIterator_Setup assumes the signature cookie is
1288 * passed first and all the arguments which were before it are
1289 * passed on the stack after the signature. So compensate by
1290 * passing a different signature.
1292 tmp_sig = mono_metadata_signature_dup (call->signature);
1293 tmp_sig->param_count -= call->signature->sentinelpos;
1294 tmp_sig->sentinelpos = 0;
1295 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1297 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1298 sig_arg->dreg = mono_alloc_ireg (cfg);
1299 sig_arg->inst_p0 = tmp_sig;
1300 MONO_ADD_INS (cfg->cbb, sig_arg);
1302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1307 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1312 LLVMCallInfo *linfo;
1314 n = sig->param_count + sig->hasthis;
1316 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1318 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1321 * LLVM always uses the native ABI while we use our own ABI, the
1322 * only difference is the handling of vtypes:
1323 * - we only pass/receive them in registers in some cases, and only
1324 * in 1 or 2 integer registers.
1326 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP) {
1327 cfg->exception_message = g_strdup ("unknown ret conv");
1328 cfg->disable_llvm = TRUE;
1332 for (i = 0; i < n; ++i) {
1333 ainfo = cinfo->args + i;
1335 linfo->args [i].storage = LLVMArgNone;
1337 switch (ainfo->storage) {
1338 case RegTypeGeneral:
1339 case RegTypeIRegPair:
1340 linfo->args [i].storage = LLVMArgInIReg;
1343 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1344 cfg->disable_llvm = TRUE;
1354 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1357 MonoMethodSignature *sig;
1361 sig = call->signature;
1362 n = sig->param_count + sig->hasthis;
1364 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1366 for (i = 0; i < n; ++i) {
1367 ArgInfo *ainfo = cinfo->args + i;
1370 if (i >= sig->hasthis)
1371 t = sig->params [i - sig->hasthis];
1373 t = &mono_defaults.int_class->byval_arg;
1374 t = mini_type_get_underlying_type (NULL, t);
1376 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1377 /* Emit the signature cookie just before the implicit arguments */
1378 emit_sig_cookie (cfg, call, cinfo);
1381 in = call->args [i];
1383 switch (ainfo->storage) {
1384 case RegTypeGeneral:
1385 case RegTypeIRegPair:
1386 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1387 MONO_INST_NEW (cfg, ins, OP_MOVE);
1388 ins->dreg = mono_alloc_ireg (cfg);
1389 ins->sreg1 = in->dreg + 1;
1390 MONO_ADD_INS (cfg->cbb, ins);
1391 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1393 MONO_INST_NEW (cfg, ins, OP_MOVE);
1394 ins->dreg = mono_alloc_ireg (cfg);
1395 ins->sreg1 = in->dreg + 2;
1396 MONO_ADD_INS (cfg->cbb, ins);
1397 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1398 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1399 #ifndef MONO_ARCH_SOFT_FLOAT
1403 if (ainfo->size == 4) {
1404 #ifdef MONO_ARCH_SOFT_FLOAT
1405 /* mono_emit_call_args () have already done the r8->r4 conversion */
1406 /* The converted value is in an int vreg */
1407 MONO_INST_NEW (cfg, ins, OP_MOVE);
1408 ins->dreg = mono_alloc_ireg (cfg);
1409 ins->sreg1 = in->dreg;
1410 MONO_ADD_INS (cfg->cbb, ins);
1411 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1413 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1414 creg = mono_alloc_ireg (cfg);
1415 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1416 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1419 #ifdef MONO_ARCH_SOFT_FLOAT
1420 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1421 ins->dreg = mono_alloc_ireg (cfg);
1422 ins->sreg1 = in->dreg;
1423 MONO_ADD_INS (cfg->cbb, ins);
1424 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1426 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1427 ins->dreg = mono_alloc_ireg (cfg);
1428 ins->sreg1 = in->dreg;
1429 MONO_ADD_INS (cfg->cbb, ins);
1430 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1432 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1433 creg = mono_alloc_ireg (cfg);
1434 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1435 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1436 creg = mono_alloc_ireg (cfg);
1437 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1438 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1441 cfg->flags |= MONO_CFG_HAS_FPOUT;
1443 MONO_INST_NEW (cfg, ins, OP_MOVE);
1444 ins->dreg = mono_alloc_ireg (cfg);
1445 ins->sreg1 = in->dreg;
1446 MONO_ADD_INS (cfg->cbb, ins);
1448 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1451 case RegTypeStructByAddr:
1454 /* FIXME: where si the data allocated? */
1455 arg->backend.reg3 = ainfo->reg;
1456 call->used_iregs |= 1 << ainfo->reg;
1457 g_assert_not_reached ();
1460 case RegTypeStructByVal:
1461 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1462 ins->opcode = OP_OUTARG_VT;
1463 ins->sreg1 = in->dreg;
1464 ins->klass = in->klass;
1465 ins->inst_p0 = call;
1466 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1467 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1468 MONO_ADD_INS (cfg->cbb, ins);
1471 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1472 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1473 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1474 if (t->type == MONO_TYPE_R8) {
1475 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1477 #ifdef MONO_ARCH_SOFT_FLOAT
1478 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1480 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1484 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1487 case RegTypeBaseGen:
1488 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1489 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1490 MONO_INST_NEW (cfg, ins, OP_MOVE);
1491 ins->dreg = mono_alloc_ireg (cfg);
1492 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1493 MONO_ADD_INS (cfg->cbb, ins);
1494 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1495 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1498 #ifdef MONO_ARCH_SOFT_FLOAT
1499 g_assert_not_reached ();
1502 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1503 creg = mono_alloc_ireg (cfg);
1504 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1505 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1506 creg = mono_alloc_ireg (cfg);
1507 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1509 cfg->flags |= MONO_CFG_HAS_FPOUT;
1511 g_assert_not_reached ();
1518 arg->backend.reg3 = ainfo->reg;
1519 /* FP args are passed in int regs */
1520 call->used_iregs |= 1 << ainfo->reg;
1521 if (ainfo->size == 8) {
1522 arg->opcode = OP_OUTARG_R8;
1523 call->used_iregs |= 1 << (ainfo->reg + 1);
1525 arg->opcode = OP_OUTARG_R4;
1528 cfg->flags |= MONO_CFG_HAS_FPOUT;
1532 g_assert_not_reached ();
1536 /* Handle the case where there are no implicit arguments */
1537 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1538 emit_sig_cookie (cfg, call, cinfo);
1540 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1543 if (cinfo->ret.storage == RegTypeStructByVal) {
1544 /* The JIT will transform this into a normal call */
1545 call->vret_in_reg = TRUE;
1547 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1548 vtarg->sreg1 = call->vret_var->dreg;
1549 vtarg->dreg = mono_alloc_preg (cfg);
1550 MONO_ADD_INS (cfg->cbb, vtarg);
1552 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1556 call->stack_usage = cinfo->stack_usage;
1562 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1564 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1565 ArgInfo *ainfo = ins->inst_p1;
1566 int ovf_size = ainfo->vtsize;
1567 int doffset = ainfo->offset;
1568 int i, soffset, dreg;
1571 for (i = 0; i < ainfo->size; ++i) {
1572 dreg = mono_alloc_ireg (cfg);
1573 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1574 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1575 soffset += sizeof (gpointer);
1577 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1579 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1583 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1585 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1588 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1591 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1592 ins->sreg1 = val->dreg + 1;
1593 ins->sreg2 = val->dreg + 2;
1594 MONO_ADD_INS (cfg->cbb, ins);
1597 #ifdef MONO_ARCH_SOFT_FLOAT
1598 if (ret->type == MONO_TYPE_R8) {
1601 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1602 ins->dreg = cfg->ret->dreg;
1603 ins->sreg1 = val->dreg;
1604 MONO_ADD_INS (cfg->cbb, ins);
1607 if (ret->type == MONO_TYPE_R4) {
1608 /* Already converted to an int in method_to_ir () */
1609 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1612 #elif defined(ARM_FPU_VFP)
1613 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1616 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1617 ins->dreg = cfg->ret->dreg;
1618 ins->sreg1 = val->dreg;
1619 MONO_ADD_INS (cfg->cbb, ins);
1623 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1624 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1631 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1635 mono_arch_is_inst_imm (gint64 imm)
1640 #define DYN_CALL_STACK_ARGS 6
1643 MonoMethodSignature *sig;
1648 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1654 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1658 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1661 switch (cinfo->ret.storage) {
1663 case RegTypeGeneral:
1664 case RegTypeIRegPair:
1665 case RegTypeStructByAddr:
1670 #elif defined(ARM_FPU_VFP)
1679 for (i = 0; i < cinfo->nargs; ++i) {
1680 switch (cinfo->args [i].storage) {
1681 case RegTypeGeneral:
1683 case RegTypeIRegPair:
1686 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1689 case RegTypeStructByVal:
1690 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1698 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1699 for (i = 0; i < sig->param_count; ++i) {
1700 MonoType *t = sig->params [i];
1708 #ifdef MONO_ARCH_SOFT_FLOAT
1727 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1729 ArchDynCallInfo *info;
1732 cinfo = get_call_info (NULL, sig, FALSE);
1734 if (!dyn_call_supported (cinfo, sig)) {
1739 info = g_new0 (ArchDynCallInfo, 1);
1740 // FIXME: Preprocess the info to speed up start_dyn_call ()
1742 info->cinfo = cinfo;
1744 return (MonoDynCallInfo*)info;
1748 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1750 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1752 g_free (ainfo->cinfo);
1757 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1759 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1760 DynCallArgs *p = (DynCallArgs*)buf;
1761 int arg_index, greg, i, j;
1762 MonoMethodSignature *sig = dinfo->sig;
1764 g_assert (buf_len >= sizeof (DynCallArgs));
1772 if (dinfo->cinfo->vtype_retaddr)
1773 p->regs [greg ++] = (mgreg_t)ret;
1776 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1778 for (i = 0; i < sig->param_count; i++) {
1779 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1780 gpointer *arg = args [arg_index ++];
1781 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1784 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1786 else if (ainfo->storage == RegTypeBase)
1787 slot = PARAM_REGS + (ainfo->offset / 4);
1789 g_assert_not_reached ();
1792 p->regs [slot] = (mgreg_t)*arg;
1797 case MONO_TYPE_STRING:
1798 case MONO_TYPE_CLASS:
1799 case MONO_TYPE_ARRAY:
1800 case MONO_TYPE_SZARRAY:
1801 case MONO_TYPE_OBJECT:
1805 p->regs [slot] = (mgreg_t)*arg;
1807 case MONO_TYPE_BOOLEAN:
1809 p->regs [slot] = *(guint8*)arg;
1812 p->regs [slot] = *(gint8*)arg;
1815 p->regs [slot] = *(gint16*)arg;
1818 case MONO_TYPE_CHAR:
1819 p->regs [slot] = *(guint16*)arg;
1822 p->regs [slot] = *(gint32*)arg;
1825 p->regs [slot] = *(guint32*)arg;
1829 p->regs [slot ++] = (mgreg_t)arg [0];
1830 p->regs [slot] = (mgreg_t)arg [1];
1833 p->regs [slot] = *(mgreg_t*)arg;
1836 p->regs [slot ++] = (mgreg_t)arg [0];
1837 p->regs [slot] = (mgreg_t)arg [1];
1839 case MONO_TYPE_GENERICINST:
1840 if (MONO_TYPE_IS_REFERENCE (t)) {
1841 p->regs [slot] = (mgreg_t)*arg;
1846 case MONO_TYPE_VALUETYPE:
1847 g_assert (ainfo->storage == RegTypeStructByVal);
1849 if (ainfo->size == 0)
1850 slot = PARAM_REGS + (ainfo->offset / 4);
1854 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1855 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1858 g_assert_not_reached ();
1864 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1866 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1867 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1868 guint8 *ret = ((DynCallArgs*)buf)->ret;
1869 mgreg_t res = ((DynCallArgs*)buf)->res;
1870 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1872 switch (mono_type_get_underlying_type (sig->ret)->type) {
1873 case MONO_TYPE_VOID:
1874 *(gpointer*)ret = NULL;
1876 case MONO_TYPE_STRING:
1877 case MONO_TYPE_CLASS:
1878 case MONO_TYPE_ARRAY:
1879 case MONO_TYPE_SZARRAY:
1880 case MONO_TYPE_OBJECT:
1884 *(gpointer*)ret = (gpointer)res;
1890 case MONO_TYPE_BOOLEAN:
1891 *(guint8*)ret = res;
1894 *(gint16*)ret = res;
1897 case MONO_TYPE_CHAR:
1898 *(guint16*)ret = res;
1901 *(gint32*)ret = res;
1904 *(guint32*)ret = res;
1908 /* This handles endianness as well */
1909 ((gint32*)ret) [0] = res;
1910 ((gint32*)ret) [1] = res2;
1912 case MONO_TYPE_GENERICINST:
1913 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1914 *(gpointer*)ret = (gpointer)res;
1919 case MONO_TYPE_VALUETYPE:
1920 g_assert (ainfo->cinfo->vtype_retaddr);
1923 #if defined(ARM_FPU_VFP)
1925 *(float*)ret = *(float*)&res;
1927 case MONO_TYPE_R8: {
1933 *(double*)ret = *(double*)®s;
1938 g_assert_not_reached ();
1943 * Allow tracing to work with this interface (with an optional argument)
1947 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1951 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1952 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1953 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1954 code = emit_call_reg (code, ARMREG_R2);
1967 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1970 int save_mode = SAVE_NONE;
1972 MonoMethod *method = cfg->method;
1973 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1974 int save_offset = cfg->param_area;
1978 offset = code - cfg->native_code;
1979 /* we need about 16 instructions */
1980 if (offset > (cfg->code_size - 16 * 4)) {
1981 cfg->code_size *= 2;
1982 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1983 code = cfg->native_code + offset;
1986 case MONO_TYPE_VOID:
1987 /* special case string .ctor icall */
1988 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1989 save_mode = SAVE_ONE;
1991 save_mode = SAVE_NONE;
1995 save_mode = SAVE_TWO;
1999 save_mode = SAVE_FP;
2001 case MONO_TYPE_VALUETYPE:
2002 save_mode = SAVE_STRUCT;
2005 save_mode = SAVE_ONE;
2009 switch (save_mode) {
2011 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2012 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2013 if (enable_arguments) {
2014 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2015 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2019 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2020 if (enable_arguments) {
2021 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2025 /* FIXME: what reg? */
2026 if (enable_arguments) {
2027 /* FIXME: what reg? */
2031 if (enable_arguments) {
2032 /* FIXME: get the actual address */
2033 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2041 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2042 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2043 code = emit_call_reg (code, ARMREG_IP);
2045 switch (save_mode) {
2047 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2048 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2051 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2065 * The immediate field for cond branches is big enough for all reasonable methods
2067 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2068 if (0 && ins->inst_true_bb->native_offset) { \
2069 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2071 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2072 ARM_B_COND (code, (condcode), 0); \
2075 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2077 /* emit an exception if condition is fail
2079 * We assign the extra code used to throw the implicit exceptions
2080 * to cfg->bb_exit as far as the big branch handling is concerned
2082 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2084 mono_add_patch_info (cfg, code - cfg->native_code, \
2085 MONO_PATCH_INFO_EXC, exc_name); \
2086 ARM_BL_COND (code, (condcode), 0); \
2089 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2092 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2097 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2099 MonoInst *ins, *n, *last_ins = NULL;
2101 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2102 switch (ins->opcode) {
2105 /* Already done by an arch-independent pass */
2107 case OP_LOAD_MEMBASE:
2108 case OP_LOADI4_MEMBASE:
2110 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2111 * OP_LOAD_MEMBASE offset(basereg), reg
2113 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2114 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2115 ins->inst_basereg == last_ins->inst_destbasereg &&
2116 ins->inst_offset == last_ins->inst_offset) {
2117 if (ins->dreg == last_ins->sreg1) {
2118 MONO_DELETE_INS (bb, ins);
2121 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2122 ins->opcode = OP_MOVE;
2123 ins->sreg1 = last_ins->sreg1;
2127 * Note: reg1 must be different from the basereg in the second load
2128 * OP_LOAD_MEMBASE offset(basereg), reg1
2129 * OP_LOAD_MEMBASE offset(basereg), reg2
2131 * OP_LOAD_MEMBASE offset(basereg), reg1
2132 * OP_MOVE reg1, reg2
2134 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2135 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2136 ins->inst_basereg != last_ins->dreg &&
2137 ins->inst_basereg == last_ins->inst_basereg &&
2138 ins->inst_offset == last_ins->inst_offset) {
2140 if (ins->dreg == last_ins->dreg) {
2141 MONO_DELETE_INS (bb, ins);
2144 ins->opcode = OP_MOVE;
2145 ins->sreg1 = last_ins->dreg;
2148 //g_assert_not_reached ();
2152 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2153 * OP_LOAD_MEMBASE offset(basereg), reg
2155 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2156 * OP_ICONST reg, imm
2158 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2159 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2160 ins->inst_basereg == last_ins->inst_destbasereg &&
2161 ins->inst_offset == last_ins->inst_offset) {
2162 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2163 ins->opcode = OP_ICONST;
2164 ins->inst_c0 = last_ins->inst_imm;
2165 g_assert_not_reached (); // check this rule
2169 case OP_LOADU1_MEMBASE:
2170 case OP_LOADI1_MEMBASE:
2171 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2172 ins->inst_basereg == last_ins->inst_destbasereg &&
2173 ins->inst_offset == last_ins->inst_offset) {
2174 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2175 ins->sreg1 = last_ins->sreg1;
2178 case OP_LOADU2_MEMBASE:
2179 case OP_LOADI2_MEMBASE:
2180 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2181 ins->inst_basereg == last_ins->inst_destbasereg &&
2182 ins->inst_offset == last_ins->inst_offset) {
2183 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2184 ins->sreg1 = last_ins->sreg1;
2188 ins->opcode = OP_MOVE;
2192 if (ins->dreg == ins->sreg1) {
2193 MONO_DELETE_INS (bb, ins);
2197 * OP_MOVE sreg, dreg
2198 * OP_MOVE dreg, sreg
2200 if (last_ins && last_ins->opcode == OP_MOVE &&
2201 ins->sreg1 == last_ins->dreg &&
2202 ins->dreg == last_ins->sreg1) {
2203 MONO_DELETE_INS (bb, ins);
2211 bb->last_ins = last_ins;
2215 * the branch_cc_table should maintain the order of these
2229 branch_cc_table [] = {
2243 #define NEW_INS(cfg,dest,op) do { \
2244 MONO_INST_NEW ((cfg), (dest), (op)); \
2245 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2249 map_to_reg_reg_op (int op)
2258 case OP_COMPARE_IMM:
2260 case OP_ICOMPARE_IMM:
2274 case OP_LOAD_MEMBASE:
2275 return OP_LOAD_MEMINDEX;
2276 case OP_LOADI4_MEMBASE:
2277 return OP_LOADI4_MEMINDEX;
2278 case OP_LOADU4_MEMBASE:
2279 return OP_LOADU4_MEMINDEX;
2280 case OP_LOADU1_MEMBASE:
2281 return OP_LOADU1_MEMINDEX;
2282 case OP_LOADI2_MEMBASE:
2283 return OP_LOADI2_MEMINDEX;
2284 case OP_LOADU2_MEMBASE:
2285 return OP_LOADU2_MEMINDEX;
2286 case OP_LOADI1_MEMBASE:
2287 return OP_LOADI1_MEMINDEX;
2288 case OP_STOREI1_MEMBASE_REG:
2289 return OP_STOREI1_MEMINDEX;
2290 case OP_STOREI2_MEMBASE_REG:
2291 return OP_STOREI2_MEMINDEX;
2292 case OP_STOREI4_MEMBASE_REG:
2293 return OP_STOREI4_MEMINDEX;
2294 case OP_STORE_MEMBASE_REG:
2295 return OP_STORE_MEMINDEX;
2296 case OP_STORER4_MEMBASE_REG:
2297 return OP_STORER4_MEMINDEX;
2298 case OP_STORER8_MEMBASE_REG:
2299 return OP_STORER8_MEMINDEX;
2300 case OP_STORE_MEMBASE_IMM:
2301 return OP_STORE_MEMBASE_REG;
2302 case OP_STOREI1_MEMBASE_IMM:
2303 return OP_STOREI1_MEMBASE_REG;
2304 case OP_STOREI2_MEMBASE_IMM:
2305 return OP_STOREI2_MEMBASE_REG;
2306 case OP_STOREI4_MEMBASE_IMM:
2307 return OP_STOREI4_MEMBASE_REG;
2309 g_assert_not_reached ();
2313 * Remove from the instruction list the instructions that can't be
2314 * represented with very simple instructions with no register
2318 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2320 MonoInst *ins, *temp, *last_ins = NULL;
2321 int rot_amount, imm8, low_imm;
2323 MONO_BB_FOR_EACH_INS (bb, ins) {
2325 switch (ins->opcode) {
2329 case OP_COMPARE_IMM:
2330 case OP_ICOMPARE_IMM:
2344 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2345 NEW_INS (cfg, temp, OP_ICONST);
2346 temp->inst_c0 = ins->inst_imm;
2347 temp->dreg = mono_alloc_ireg (cfg);
2348 ins->sreg2 = temp->dreg;
2349 ins->opcode = mono_op_imm_to_op (ins->opcode);
2351 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2357 if (ins->inst_imm == 1) {
2358 ins->opcode = OP_MOVE;
2361 if (ins->inst_imm == 0) {
2362 ins->opcode = OP_ICONST;
2366 imm8 = mono_is_power_of_two (ins->inst_imm);
2368 ins->opcode = OP_SHL_IMM;
2369 ins->inst_imm = imm8;
2372 NEW_INS (cfg, temp, OP_ICONST);
2373 temp->inst_c0 = ins->inst_imm;
2374 temp->dreg = mono_alloc_ireg (cfg);
2375 ins->sreg2 = temp->dreg;
2376 ins->opcode = OP_IMUL;
2382 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2383 /* ARM sets the C flag to 1 if there was _no_ overflow */
2384 ins->next->opcode = OP_COND_EXC_NC;
2386 case OP_LOCALLOC_IMM:
2387 NEW_INS (cfg, temp, OP_ICONST);
2388 temp->inst_c0 = ins->inst_imm;
2389 temp->dreg = mono_alloc_ireg (cfg);
2390 ins->sreg1 = temp->dreg;
2391 ins->opcode = OP_LOCALLOC;
2393 case OP_LOAD_MEMBASE:
2394 case OP_LOADI4_MEMBASE:
2395 case OP_LOADU4_MEMBASE:
2396 case OP_LOADU1_MEMBASE:
2397 /* we can do two things: load the immed in a register
2398 * and use an indexed load, or see if the immed can be
2399 * represented as an ad_imm + a load with a smaller offset
2400 * that fits. We just do the first for now, optimize later.
2402 if (arm_is_imm12 (ins->inst_offset))
2404 NEW_INS (cfg, temp, OP_ICONST);
2405 temp->inst_c0 = ins->inst_offset;
2406 temp->dreg = mono_alloc_ireg (cfg);
2407 ins->sreg2 = temp->dreg;
2408 ins->opcode = map_to_reg_reg_op (ins->opcode);
2410 case OP_LOADI2_MEMBASE:
2411 case OP_LOADU2_MEMBASE:
2412 case OP_LOADI1_MEMBASE:
2413 if (arm_is_imm8 (ins->inst_offset))
2415 NEW_INS (cfg, temp, OP_ICONST);
2416 temp->inst_c0 = ins->inst_offset;
2417 temp->dreg = mono_alloc_ireg (cfg);
2418 ins->sreg2 = temp->dreg;
2419 ins->opcode = map_to_reg_reg_op (ins->opcode);
2421 case OP_LOADR4_MEMBASE:
2422 case OP_LOADR8_MEMBASE:
2423 if (arm_is_fpimm8 (ins->inst_offset))
2425 low_imm = ins->inst_offset & 0x1ff;
2426 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2427 NEW_INS (cfg, temp, OP_ADD_IMM);
2428 temp->inst_imm = ins->inst_offset & ~0x1ff;
2429 temp->sreg1 = ins->inst_basereg;
2430 temp->dreg = mono_alloc_ireg (cfg);
2431 ins->inst_basereg = temp->dreg;
2432 ins->inst_offset = low_imm;
2435 /* VFP/FPA doesn't have indexed load instructions */
2436 g_assert_not_reached ();
2438 case OP_STORE_MEMBASE_REG:
2439 case OP_STOREI4_MEMBASE_REG:
2440 case OP_STOREI1_MEMBASE_REG:
2441 if (arm_is_imm12 (ins->inst_offset))
2443 NEW_INS (cfg, temp, OP_ICONST);
2444 temp->inst_c0 = ins->inst_offset;
2445 temp->dreg = mono_alloc_ireg (cfg);
2446 ins->sreg2 = temp->dreg;
2447 ins->opcode = map_to_reg_reg_op (ins->opcode);
2449 case OP_STOREI2_MEMBASE_REG:
2450 if (arm_is_imm8 (ins->inst_offset))
2452 NEW_INS (cfg, temp, OP_ICONST);
2453 temp->inst_c0 = ins->inst_offset;
2454 temp->dreg = mono_alloc_ireg (cfg);
2455 ins->sreg2 = temp->dreg;
2456 ins->opcode = map_to_reg_reg_op (ins->opcode);
2458 case OP_STORER4_MEMBASE_REG:
2459 case OP_STORER8_MEMBASE_REG:
2460 if (arm_is_fpimm8 (ins->inst_offset))
2462 low_imm = ins->inst_offset & 0x1ff;
2463 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2464 NEW_INS (cfg, temp, OP_ADD_IMM);
2465 temp->inst_imm = ins->inst_offset & ~0x1ff;
2466 temp->sreg1 = ins->inst_destbasereg;
2467 temp->dreg = mono_alloc_ireg (cfg);
2468 ins->inst_destbasereg = temp->dreg;
2469 ins->inst_offset = low_imm;
2472 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2473 /* VFP/FPA doesn't have indexed store instructions */
2474 g_assert_not_reached ();
2476 case OP_STORE_MEMBASE_IMM:
2477 case OP_STOREI1_MEMBASE_IMM:
2478 case OP_STOREI2_MEMBASE_IMM:
2479 case OP_STOREI4_MEMBASE_IMM:
2480 NEW_INS (cfg, temp, OP_ICONST);
2481 temp->inst_c0 = ins->inst_imm;
2482 temp->dreg = mono_alloc_ireg (cfg);
2483 ins->sreg1 = temp->dreg;
2484 ins->opcode = map_to_reg_reg_op (ins->opcode);
2486 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2488 gboolean swap = FALSE;
2492 /* Optimized away */
2497 /* Some fp compares require swapped operands */
2498 switch (ins->next->opcode) {
2500 ins->next->opcode = OP_FBLT;
2504 ins->next->opcode = OP_FBLT_UN;
2508 ins->next->opcode = OP_FBGE;
2512 ins->next->opcode = OP_FBGE_UN;
2520 ins->sreg1 = ins->sreg2;
2529 bb->last_ins = last_ins;
2530 bb->max_vreg = cfg->next_vreg;
2534 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2538 if (long_ins->opcode == OP_LNEG) {
2540 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2541 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2547 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2549 /* sreg is a float, dreg is an integer reg */
2551 ARM_FIXZ (code, dreg, sreg);
2552 #elif defined(ARM_FPU_VFP)
2554 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2556 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2557 ARM_FMRS (code, dreg, ARM_VFP_F0);
2561 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2562 else if (size == 2) {
2563 ARM_SHL_IMM (code, dreg, dreg, 16);
2564 ARM_SHR_IMM (code, dreg, dreg, 16);
2568 ARM_SHL_IMM (code, dreg, dreg, 24);
2569 ARM_SAR_IMM (code, dreg, dreg, 24);
2570 } else if (size == 2) {
2571 ARM_SHL_IMM (code, dreg, dreg, 16);
2572 ARM_SAR_IMM (code, dreg, dreg, 16);
2580 const guchar *target;
2585 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2588 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2589 PatchData *pdata = (PatchData*)user_data;
2590 guchar *code = data;
2591 guint32 *thunks = data;
2592 guint32 *endthunks = (guint32*)(code + bsize);
2594 int difflow, diffhigh;
2596 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2597 difflow = (char*)pdata->code - (char*)thunks;
2598 diffhigh = (char*)pdata->code - (char*)endthunks;
2599 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2603 * The thunk is composed of 3 words:
2604 * load constant from thunks [2] into ARM_IP
2607 * Note that the LR register is already setup
2609 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2610 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2611 while (thunks < endthunks) {
2612 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2613 if (thunks [2] == (guint32)pdata->target) {
2614 arm_patch (pdata->code, (guchar*)thunks);
2615 mono_arch_flush_icache (pdata->code, 4);
2618 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2619 /* found a free slot instead: emit thunk */
2620 /* ARMREG_IP is fine to use since this can't be an IMT call
2623 code = (guchar*)thunks;
2624 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2625 if (thumb_supported)
2626 ARM_BX (code, ARMREG_IP);
2628 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2629 thunks [2] = (guint32)pdata->target;
2630 mono_arch_flush_icache ((guchar*)thunks, 12);
2632 arm_patch (pdata->code, (guchar*)thunks);
2633 mono_arch_flush_icache (pdata->code, 4);
2637 /* skip 12 bytes, the size of the thunk */
2641 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2647 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2652 domain = mono_domain_get ();
2655 pdata.target = target;
2656 pdata.absolute = absolute;
2659 mono_domain_lock (domain);
2660 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2663 /* this uses the first available slot */
2665 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2667 mono_domain_unlock (domain);
2669 if (pdata.found != 1)
2670 g_print ("thunk failed for %p from %p\n", target, code);
2671 g_assert (pdata.found == 1);
2675 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2677 guint32 *code32 = (void*)code;
2678 guint32 ins = *code32;
2679 guint32 prim = (ins >> 25) & 7;
2680 guint32 tval = GPOINTER_TO_UINT (target);
2682 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2683 if (prim == 5) { /* 101b */
2684 /* the diff starts 8 bytes from the branch opcode */
2685 gint diff = target - code - 8;
2687 gint tmask = 0xffffffff;
2688 if (tval & 1) { /* entering thumb mode */
2689 diff = target - 1 - code - 8;
2690 g_assert (thumb_supported);
2691 tbits = 0xf << 28; /* bl->blx bit pattern */
2692 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2693 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2697 tmask = ~(1 << 24); /* clear the link bit */
2698 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2703 if (diff <= 33554431) {
2705 ins = (ins & 0xff000000) | diff;
2707 *code32 = ins | tbits;
2711 /* diff between 0 and -33554432 */
2712 if (diff >= -33554432) {
2714 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2716 *code32 = ins | tbits;
2721 handle_thunk (domain, TRUE, code, target);
2726 * The alternative call sequences looks like this:
2728 * ldr ip, [pc] // loads the address constant
2729 * b 1f // jumps around the constant
2730 * address constant embedded in the code
2735 * There are two cases for patching:
2736 * a) at the end of method emission: in this case code points to the start
2737 * of the call sequence
2738 * b) during runtime patching of the call site: in this case code points
2739 * to the mov pc, ip instruction
2741 * We have to handle also the thunk jump code sequence:
2745 * address constant // execution never reaches here
2747 if ((ins & 0x0ffffff0) == 0x12fff10) {
2748 /* Branch and exchange: the address is constructed in a reg
2749 * We can patch BX when the code sequence is the following:
2750 * ldr ip, [pc, #0] ; 0x8
2757 guint8 *emit = (guint8*)ccode;
2758 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2760 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2761 ARM_BX (emit, ARMREG_IP);
2763 /*patching from magic trampoline*/
2764 if (ins == ccode [3]) {
2765 g_assert (code32 [-4] == ccode [0]);
2766 g_assert (code32 [-3] == ccode [1]);
2767 g_assert (code32 [-1] == ccode [2]);
2768 code32 [-2] = (guint32)target;
2771 /*patching from JIT*/
2772 if (ins == ccode [0]) {
2773 g_assert (code32 [1] == ccode [1]);
2774 g_assert (code32 [3] == ccode [2]);
2775 g_assert (code32 [4] == ccode [3]);
2776 code32 [2] = (guint32)target;
2779 g_assert_not_reached ();
2780 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2788 guint8 *emit = (guint8*)ccode;
2789 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2791 ARM_BLX_REG (emit, ARMREG_IP);
2793 g_assert (code32 [-3] == ccode [0]);
2794 g_assert (code32 [-2] == ccode [1]);
2795 g_assert (code32 [0] == ccode [2]);
2797 code32 [-1] = (guint32)target;
2800 guint32 *tmp = ccode;
2801 guint8 *emit = (guint8*)tmp;
2802 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2803 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2804 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2805 ARM_BX (emit, ARMREG_IP);
2806 if (ins == ccode [2]) {
2807 g_assert_not_reached (); // should be -2 ...
2808 code32 [-1] = (guint32)target;
2811 if (ins == ccode [0]) {
2812 /* handles both thunk jump code and the far call sequence */
2813 code32 [2] = (guint32)target;
2816 g_assert_not_reached ();
2818 // g_print ("patched with 0x%08x\n", ins);
2822 arm_patch (guchar *code, const guchar *target)
2824 arm_patch_general (NULL, code, target);
2828 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2829 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2830 * to be used with the emit macros.
2831 * Return -1 otherwise.
2834 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2837 for (i = 0; i < 31; i+= 2) {
2838 res = (val << (32 - i)) | (val >> i);
2841 *rot_amount = i? 32 - i: 0;
2848 * Emits in code a sequence of instructions that load the value 'val'
2849 * into the dreg register. Uses at most 4 instructions.
2852 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2854 int imm8, rot_amount;
2856 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2857 /* skip the constant pool */
2863 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2864 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2865 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2866 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2869 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2871 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2875 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2877 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2879 if (val & 0xFF0000) {
2880 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2882 if (val & 0xFF000000) {
2883 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2885 } else if (val & 0xFF00) {
2886 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2887 if (val & 0xFF0000) {
2888 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2890 if (val & 0xFF000000) {
2891 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2893 } else if (val & 0xFF0000) {
2894 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2895 if (val & 0xFF000000) {
2896 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2899 //g_assert_not_reached ();
2905 mono_arm_thumb_supported (void)
2907 return thumb_supported;
2911 * emit_load_volatile_arguments:
2913 * Load volatile arguments from the stack to the original input registers.
2914 * Required before a tail call.
2917 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2919 MonoMethod *method = cfg->method;
2920 MonoMethodSignature *sig;
2925 /* FIXME: Generate intermediate code instead */
2927 sig = mono_method_signature (method);
2929 /* This is the opposite of the code in emit_prolog */
2933 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2935 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2936 ArgInfo *ainfo = &cinfo->ret;
2937 inst = cfg->vret_addr;
2938 g_assert (arm_is_imm12 (inst->inst_offset));
2939 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2941 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2942 ArgInfo *ainfo = cinfo->args + i;
2943 inst = cfg->args [pos];
2945 if (cfg->verbose_level > 2)
2946 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
2947 if (inst->opcode == OP_REGVAR) {
2948 if (ainfo->storage == RegTypeGeneral)
2949 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2950 else if (ainfo->storage == RegTypeFP) {
2951 g_assert_not_reached ();
2952 } else if (ainfo->storage == RegTypeBase) {
2956 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2957 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2959 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2960 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2964 g_assert_not_reached ();
2966 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
2967 switch (ainfo->size) {
2974 g_assert (arm_is_imm12 (inst->inst_offset));
2975 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2976 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2977 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2980 if (arm_is_imm12 (inst->inst_offset)) {
2981 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2983 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2984 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2988 } else if (ainfo->storage == RegTypeBaseGen) {
2991 } else if (ainfo->storage == RegTypeBase) {
2993 } else if (ainfo->storage == RegTypeFP) {
2994 g_assert_not_reached ();
2995 } else if (ainfo->storage == RegTypeStructByVal) {
2996 int doffset = inst->inst_offset;
3000 if (mono_class_from_mono_type (inst->inst_vtype))
3001 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3002 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3003 if (arm_is_imm12 (doffset)) {
3004 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3006 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3007 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3009 soffset += sizeof (gpointer);
3010 doffset += sizeof (gpointer);
3015 } else if (ainfo->storage == RegTypeStructByAddr) {
3032 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3037 guint8 *code = cfg->native_code + cfg->code_len;
3038 MonoInst *last_ins = NULL;
3039 guint last_offset = 0;
3041 int imm8, rot_amount;
3043 /* we don't align basic blocks of loops on arm */
3045 if (cfg->verbose_level > 2)
3046 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3048 cpos = bb->max_offset;
3050 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3051 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3052 //g_assert (!mono_compile_aot);
3055 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3056 /* this is not thread save, but good enough */
3057 /* fixme: howto handle overflows? */
3058 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3061 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3062 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3063 (gpointer)"mono_break");
3064 code = emit_call_seq (cfg, code);
3067 MONO_BB_FOR_EACH_INS (bb, ins) {
3068 offset = code - cfg->native_code;
3070 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3072 if (offset > (cfg->code_size - max_len - 16)) {
3073 cfg->code_size *= 2;
3074 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3075 code = cfg->native_code + offset;
3077 // if (ins->cil_code)
3078 // g_print ("cil code\n");
3079 mono_debug_record_line_number (cfg, ins, offset);
3081 switch (ins->opcode) {
3082 case OP_MEMORY_BARRIER:
3085 #ifdef HAVE_AEABI_READ_TP
3086 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3087 (gpointer)"__aeabi_read_tp");
3088 code = emit_call_seq (cfg, code);
3090 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3092 g_assert_not_reached ();
3096 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3097 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3100 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3101 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3103 case OP_STOREI1_MEMBASE_IMM:
3104 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3105 g_assert (arm_is_imm12 (ins->inst_offset));
3106 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3108 case OP_STOREI2_MEMBASE_IMM:
3109 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3110 g_assert (arm_is_imm8 (ins->inst_offset));
3111 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3113 case OP_STORE_MEMBASE_IMM:
3114 case OP_STOREI4_MEMBASE_IMM:
3115 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3116 g_assert (arm_is_imm12 (ins->inst_offset));
3117 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3119 case OP_STOREI1_MEMBASE_REG:
3120 g_assert (arm_is_imm12 (ins->inst_offset));
3121 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3123 case OP_STOREI2_MEMBASE_REG:
3124 g_assert (arm_is_imm8 (ins->inst_offset));
3125 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3127 case OP_STORE_MEMBASE_REG:
3128 case OP_STOREI4_MEMBASE_REG:
3129 /* this case is special, since it happens for spill code after lowering has been called */
3130 if (arm_is_imm12 (ins->inst_offset)) {
3131 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3133 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3134 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3137 case OP_STOREI1_MEMINDEX:
3138 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3140 case OP_STOREI2_MEMINDEX:
3141 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3143 case OP_STORE_MEMINDEX:
3144 case OP_STOREI4_MEMINDEX:
3145 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3148 g_assert_not_reached ();
3150 case OP_LOAD_MEMINDEX:
3151 case OP_LOADI4_MEMINDEX:
3152 case OP_LOADU4_MEMINDEX:
3153 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3155 case OP_LOADI1_MEMINDEX:
3156 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3158 case OP_LOADU1_MEMINDEX:
3159 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3161 case OP_LOADI2_MEMINDEX:
3162 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3164 case OP_LOADU2_MEMINDEX:
3165 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3167 case OP_LOAD_MEMBASE:
3168 case OP_LOADI4_MEMBASE:
3169 case OP_LOADU4_MEMBASE:
3170 /* this case is special, since it happens for spill code after lowering has been called */
3171 if (arm_is_imm12 (ins->inst_offset)) {
3172 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3174 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3175 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3178 case OP_LOADI1_MEMBASE:
3179 g_assert (arm_is_imm8 (ins->inst_offset));
3180 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3182 case OP_LOADU1_MEMBASE:
3183 g_assert (arm_is_imm12 (ins->inst_offset));
3184 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3186 case OP_LOADU2_MEMBASE:
3187 g_assert (arm_is_imm8 (ins->inst_offset));
3188 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3190 case OP_LOADI2_MEMBASE:
3191 g_assert (arm_is_imm8 (ins->inst_offset));
3192 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3194 case OP_ICONV_TO_I1:
3195 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3196 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3198 case OP_ICONV_TO_I2:
3199 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3200 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3202 case OP_ICONV_TO_U1:
3203 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3205 case OP_ICONV_TO_U2:
3206 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3207 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3211 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3213 case OP_COMPARE_IMM:
3214 case OP_ICOMPARE_IMM:
3215 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3216 g_assert (imm8 >= 0);
3217 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3221 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3222 * So instead of emitting a trap, we emit a call a C function and place a
3225 //*(int*)code = 0xef9f0001;
3228 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3229 (gpointer)"mono_break");
3230 code = emit_call_seq (cfg, code);
3232 case OP_RELAXED_NOP:
3237 case OP_DUMMY_STORE:
3238 case OP_NOT_REACHED:
3241 case OP_SEQ_POINT: {
3243 MonoInst *info_var = cfg->arch.seq_point_info_var;
3244 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3246 int dreg = ARMREG_LR;
3249 * For AOT, we use one got slot per method, which will point to a
3250 * SeqPointInfo structure, containing all the information required
3251 * by the code below.
3253 if (cfg->compile_aot) {
3254 g_assert (info_var);
3255 g_assert (info_var->opcode == OP_REGOFFSET);
3256 g_assert (arm_is_imm12 (info_var->inst_offset));
3260 * Read from the single stepping trigger page. This will cause a
3261 * SIGSEGV when single stepping is enabled.
3262 * We do this _before_ the breakpoint, so single stepping after
3263 * a breakpoint is hit will step to the next IL offset.
3265 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3267 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3268 if (cfg->compile_aot) {
3269 /* Load the trigger page addr from the variable initialized in the prolog */
3270 var = ss_trigger_page_var;
3272 g_assert (var->opcode == OP_REGOFFSET);
3273 g_assert (arm_is_imm12 (var->inst_offset));
3274 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3276 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3278 *(int*)code = (int)ss_trigger_page;
3281 ARM_LDR_IMM (code, dreg, dreg, 0);
3284 il_offset = ins->inst_imm;
3286 if (!cfg->seq_points)
3287 cfg->seq_points = g_ptr_array_new ();
3288 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
3289 g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
3291 if (cfg->compile_aot) {
3292 guint32 offset = code - cfg->native_code;
3295 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3296 /* Add the offset */
3297 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3298 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3300 * Have to emit nops to keep the difference between the offset
3301 * stored in seq_points and breakpoint instruction constant,
3302 * mono_arch_get_ip_for_breakpoint () depends on this.
3305 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3309 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3312 g_assert (!(val & 0xFF000000));
3313 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3314 ARM_LDR_IMM (code, dreg, dreg, 0);
3316 /* What is faster, a branch or a load ? */
3317 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3318 /* The breakpoint instruction */
3319 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3322 * A placeholder for a possible breakpoint inserted by
3323 * mono_arch_set_breakpoint ().
3325 for (i = 0; i < 4; ++i)
3332 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3335 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3339 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3342 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3343 g_assert (imm8 >= 0);
3344 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3348 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3349 g_assert (imm8 >= 0);
3350 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3354 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3355 g_assert (imm8 >= 0);
3356 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3359 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3360 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3362 case OP_IADD_OVF_UN:
3363 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3364 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3367 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3368 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3370 case OP_ISUB_OVF_UN:
3371 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3372 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3374 case OP_ADD_OVF_CARRY:
3375 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3376 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3378 case OP_ADD_OVF_UN_CARRY:
3379 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3380 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3382 case OP_SUB_OVF_CARRY:
3383 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3384 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3386 case OP_SUB_OVF_UN_CARRY:
3387 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3388 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3392 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3395 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3396 g_assert (imm8 >= 0);
3397 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3400 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3404 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3408 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3409 g_assert (imm8 >= 0);
3410 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3414 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3415 g_assert (imm8 >= 0);
3416 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3418 case OP_ARM_RSBS_IMM:
3419 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3420 g_assert (imm8 >= 0);
3421 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3423 case OP_ARM_RSC_IMM:
3424 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3425 g_assert (imm8 >= 0);
3426 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3429 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3433 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3434 g_assert (imm8 >= 0);
3435 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3443 /* crappy ARM arch doesn't have a DIV instruction */
3444 g_assert_not_reached ();
3446 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3450 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3451 g_assert (imm8 >= 0);
3452 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3455 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3459 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3460 g_assert (imm8 >= 0);
3461 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3469 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3470 else if (ins->dreg != ins->sreg1)
3471 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3474 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3479 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3480 else if (ins->dreg != ins->sreg1)
3481 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3484 case OP_ISHR_UN_IMM:
3486 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3487 else if (ins->dreg != ins->sreg1)
3488 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3491 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3494 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3497 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3500 if (ins->dreg == ins->sreg2)
3501 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3503 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3506 g_assert_not_reached ();
3509 /* FIXME: handle ovf/ sreg2 != dreg */
3510 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3511 /* FIXME: MUL doesn't set the C/O flags on ARM */
3513 case OP_IMUL_OVF_UN:
3514 /* FIXME: handle ovf/ sreg2 != dreg */
3515 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3516 /* FIXME: MUL doesn't set the C/O flags on ARM */
3519 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3522 /* Load the GOT offset */
3523 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3524 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3526 *(gpointer*)code = NULL;
3528 /* Load the value from the GOT */
3529 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3531 case OP_ICONV_TO_I4:
3532 case OP_ICONV_TO_U4:
3534 if (ins->dreg != ins->sreg1)
3535 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3538 int saved = ins->sreg2;
3539 if (ins->sreg2 == ARM_LSW_REG) {
3540 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3543 if (ins->sreg1 != ARM_LSW_REG)
3544 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3545 if (saved != ARM_MSW_REG)
3546 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3551 ARM_MVFD (code, ins->dreg, ins->sreg1);
3552 #elif defined(ARM_FPU_VFP)
3553 ARM_CPYD (code, ins->dreg, ins->sreg1);
3556 case OP_FCONV_TO_R4:
3558 ARM_MVFS (code, ins->dreg, ins->sreg1);
3559 #elif defined(ARM_FPU_VFP)
3560 ARM_CVTD (code, ins->dreg, ins->sreg1);
3561 ARM_CVTS (code, ins->dreg, ins->dreg);
3566 * Keep in sync with mono_arch_emit_epilog
3568 g_assert (!cfg->method->save_lmf);
3570 code = emit_load_volatile_arguments (cfg, code);
3572 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3573 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3574 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3575 if (cfg->compile_aot) {
3576 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3578 *(gpointer*)code = NULL;
3580 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3586 /* ensure ins->sreg1 is not NULL */
3587 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3590 g_assert (cfg->sig_cookie < 128);
3591 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3592 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3601 call = (MonoCallInst*)ins;
3602 if (ins->flags & MONO_INST_HAS_METHOD)
3603 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3605 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3606 code = emit_call_seq (cfg, code);
3607 code = emit_move_return_value (cfg, ins, code);
3613 case OP_VOIDCALL_REG:
3615 code = emit_call_reg (code, ins->sreg1);
3616 code = emit_move_return_value (cfg, ins, code);
3618 case OP_FCALL_MEMBASE:
3619 case OP_LCALL_MEMBASE:
3620 case OP_VCALL_MEMBASE:
3621 case OP_VCALL2_MEMBASE:
3622 case OP_VOIDCALL_MEMBASE:
3623 case OP_CALL_MEMBASE:
3624 g_assert (arm_is_imm12 (ins->inst_offset));
3625 g_assert (ins->sreg1 != ARMREG_LR);
3626 call = (MonoCallInst*)ins;
3627 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3628 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3629 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3631 * We can't embed the method in the code stream in PIC code, or
3633 * Instead, we put it in V5 in code emitted by
3634 * mono_arch_emit_imt_argument (), and embed NULL here to
3635 * signal the IMT thunk that the value is in V5.
3637 if (call->dynamic_imt_arg)
3638 *((gpointer*)code) = NULL;
3640 *((gpointer*)code) = (gpointer)call->method;
3643 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3644 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3646 code = emit_move_return_value (cfg, ins, code);
3649 /* keep alignment */
3650 int alloca_waste = cfg->param_area;
3653 /* round the size to 8 bytes */
3654 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3655 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3657 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3658 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3659 /* memzero the area: dreg holds the size, sp is the pointer */
3660 if (ins->flags & MONO_INST_INIT) {
3661 guint8 *start_loop, *branch_to_cond;
3662 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3663 branch_to_cond = code;
3666 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3667 arm_patch (branch_to_cond, code);
3668 /* decrement by 4 and set flags */
3669 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3670 ARM_B_COND (code, ARMCOND_GE, 0);
3671 arm_patch (code - 4, start_loop);
3673 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3678 MonoInst *var = cfg->dyn_call_var;
3680 g_assert (var->opcode == OP_REGOFFSET);
3681 g_assert (arm_is_imm12 (var->inst_offset));
3683 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3684 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3686 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3688 /* Save args buffer */
3689 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3691 /* Set stack slots using R0 as scratch reg */
3692 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3693 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3694 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3695 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3698 /* Set argument registers */
3699 for (i = 0; i < PARAM_REGS; ++i)
3700 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3703 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3704 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3707 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3708 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3709 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3713 if (ins->sreg1 != ARMREG_R0)
3714 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3715 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3716 (gpointer)"mono_arch_throw_exception");
3717 code = emit_call_seq (cfg, code);
3721 if (ins->sreg1 != ARMREG_R0)
3722 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3723 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3724 (gpointer)"mono_arch_rethrow_exception");
3725 code = emit_call_seq (cfg, code);
3728 case OP_START_HANDLER: {
3729 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3731 if (arm_is_imm12 (spvar->inst_offset)) {
3732 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3734 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3735 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3739 case OP_ENDFILTER: {
3740 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3742 if (ins->sreg1 != ARMREG_R0)
3743 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3744 if (arm_is_imm12 (spvar->inst_offset)) {
3745 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3747 g_assert (ARMREG_IP != spvar->inst_basereg);
3748 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3749 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3751 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3754 case OP_ENDFINALLY: {
3755 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3757 if (arm_is_imm12 (spvar->inst_offset)) {
3758 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3760 g_assert (ARMREG_IP != spvar->inst_basereg);
3761 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3762 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3764 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3767 case OP_CALL_HANDLER:
3768 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3772 ins->inst_c0 = code - cfg->native_code;
3775 /*if (ins->inst_target_bb->native_offset) {
3777 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3779 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3784 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3788 * In the normal case we have:
3789 * ldr pc, [pc, ins->sreg1 << 2]
3792 * ldr lr, [pc, ins->sreg1 << 2]
3794 * After follows the data.
3795 * FIXME: add aot support.
3797 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3798 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3799 if (offset > (cfg->code_size - max_len - 16)) {
3800 cfg->code_size += max_len;
3801 cfg->code_size *= 2;
3802 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3803 code = cfg->native_code + offset;
3805 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3807 code += 4 * GPOINTER_TO_INT (ins->klass);
3811 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3812 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3816 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3817 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3821 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3822 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3826 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3827 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3831 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3832 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3834 case OP_COND_EXC_EQ:
3835 case OP_COND_EXC_NE_UN:
3836 case OP_COND_EXC_LT:
3837 case OP_COND_EXC_LT_UN:
3838 case OP_COND_EXC_GT:
3839 case OP_COND_EXC_GT_UN:
3840 case OP_COND_EXC_GE:
3841 case OP_COND_EXC_GE_UN:
3842 case OP_COND_EXC_LE:
3843 case OP_COND_EXC_LE_UN:
3844 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3846 case OP_COND_EXC_IEQ:
3847 case OP_COND_EXC_INE_UN:
3848 case OP_COND_EXC_ILT:
3849 case OP_COND_EXC_ILT_UN:
3850 case OP_COND_EXC_IGT:
3851 case OP_COND_EXC_IGT_UN:
3852 case OP_COND_EXC_IGE:
3853 case OP_COND_EXC_IGE_UN:
3854 case OP_COND_EXC_ILE:
3855 case OP_COND_EXC_ILE_UN:
3856 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3859 case OP_COND_EXC_IC:
3860 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3862 case OP_COND_EXC_OV:
3863 case OP_COND_EXC_IOV:
3864 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3866 case OP_COND_EXC_NC:
3867 case OP_COND_EXC_INC:
3868 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3870 case OP_COND_EXC_NO:
3871 case OP_COND_EXC_INO:
3872 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3884 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3887 /* floating point opcodes */
3890 if (cfg->compile_aot) {
3891 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3893 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3895 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3898 /* FIXME: we can optimize the imm load by dealing with part of
3899 * the displacement in LDFD (aligning to 512).
3901 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3902 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3906 if (cfg->compile_aot) {
3907 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3909 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3912 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3913 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3916 case OP_STORER8_MEMBASE_REG:
3917 /* This is generated by the local regalloc pass which runs after the lowering pass */
3918 if (!arm_is_fpimm8 (ins->inst_offset)) {
3919 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3920 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3921 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3923 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3926 case OP_LOADR8_MEMBASE:
3927 /* This is generated by the local regalloc pass which runs after the lowering pass */
3928 if (!arm_is_fpimm8 (ins->inst_offset)) {
3929 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3930 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3931 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3933 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3936 case OP_STORER4_MEMBASE_REG:
3937 g_assert (arm_is_fpimm8 (ins->inst_offset));
3938 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3940 case OP_LOADR4_MEMBASE:
3941 g_assert (arm_is_fpimm8 (ins->inst_offset));
3942 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3944 case OP_ICONV_TO_R_UN: {
3946 tmpreg = ins->dreg == 0? 1: 0;
3947 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3948 ARM_FLTD (code, ins->dreg, ins->sreg1);
3949 ARM_B_COND (code, ARMCOND_GE, 8);
3950 /* save the temp register */
3951 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3952 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3953 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3954 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3955 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3956 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3957 /* skip the constant pool */
3960 *(int*)code = 0x41f00000;
3965 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3966 * adfltd fdest, fdest, ftemp
3970 case OP_ICONV_TO_R4:
3971 ARM_FLTS (code, ins->dreg, ins->sreg1);
3973 case OP_ICONV_TO_R8:
3974 ARM_FLTD (code, ins->dreg, ins->sreg1);
3977 #elif defined(ARM_FPU_VFP)
3980 if (cfg->compile_aot) {
3981 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3983 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3985 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3988 /* FIXME: we can optimize the imm load by dealing with part of
3989 * the displacement in LDFD (aligning to 512).
3991 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3992 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3996 if (cfg->compile_aot) {
3997 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3999 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4001 ARM_CVTS (code, ins->dreg, ins->dreg);
4003 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4004 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4005 ARM_CVTS (code, ins->dreg, ins->dreg);
4008 case OP_STORER8_MEMBASE_REG:
4009 /* This is generated by the local regalloc pass which runs after the lowering pass */
4010 if (!arm_is_fpimm8 (ins->inst_offset)) {
4011 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4012 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4013 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4015 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4018 case OP_LOADR8_MEMBASE:
4019 /* This is generated by the local regalloc pass which runs after the lowering pass */
4020 if (!arm_is_fpimm8 (ins->inst_offset)) {
4021 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4022 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4023 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4025 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4028 case OP_STORER4_MEMBASE_REG:
4029 g_assert (arm_is_fpimm8 (ins->inst_offset));
4030 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4031 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4033 case OP_LOADR4_MEMBASE:
4034 g_assert (arm_is_fpimm8 (ins->inst_offset));
4035 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4036 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4038 case OP_ICONV_TO_R_UN: {
4039 g_assert_not_reached ();
4042 case OP_ICONV_TO_R4:
4043 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4044 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4045 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4047 case OP_ICONV_TO_R8:
4048 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4049 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4053 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4054 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4055 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4057 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4063 case OP_FCONV_TO_I1:
4064 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4066 case OP_FCONV_TO_U1:
4067 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4069 case OP_FCONV_TO_I2:
4070 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4072 case OP_FCONV_TO_U2:
4073 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4075 case OP_FCONV_TO_I4:
4077 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4079 case OP_FCONV_TO_U4:
4081 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4083 case OP_FCONV_TO_I8:
4084 case OP_FCONV_TO_U8:
4085 g_assert_not_reached ();
4086 /* Implemented as helper calls */
4088 case OP_LCONV_TO_R_UN:
4089 g_assert_not_reached ();
4090 /* Implemented as helper calls */
4092 case OP_LCONV_TO_OVF_I4_2: {
4093 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4095 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4098 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4099 high_bit_not_set = code;
4100 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4102 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4103 valid_negative = code;
4104 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4105 invalid_negative = code;
4106 ARM_B_COND (code, ARMCOND_AL, 0);
4108 arm_patch (high_bit_not_set, code);
4110 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4111 valid_positive = code;
4112 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4114 arm_patch (invalid_negative, code);
4115 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4117 arm_patch (valid_negative, code);
4118 arm_patch (valid_positive, code);
4120 if (ins->dreg != ins->sreg1)
4121 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4126 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4129 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4132 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4135 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4138 ARM_MNFD (code, ins->dreg, ins->sreg1);
4140 #elif defined(ARM_FPU_VFP)
4142 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4145 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4148 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4151 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4154 ARM_NEGD (code, ins->dreg, ins->sreg1);
4159 g_assert_not_reached ();
4163 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4164 #elif defined(ARM_FPU_VFP)
4165 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4171 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4172 #elif defined(ARM_FPU_VFP)
4173 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4181 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4182 #elif defined(ARM_FPU_VFP)
4183 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4186 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4191 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4192 #elif defined(ARM_FPU_VFP)
4193 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4196 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4198 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4203 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4204 #elif defined(ARM_FPU_VFP)
4205 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4208 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4209 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4214 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4215 #elif defined(ARM_FPU_VFP)
4216 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4219 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4220 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4221 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4223 /* ARM FPA flags table:
4224 * N Less than ARMCOND_MI
4225 * Z Equal ARMCOND_EQ
4226 * C Greater Than or Equal ARMCOND_CS
4227 * V Unordered ARMCOND_VS
4230 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4233 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4236 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4239 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4240 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4246 g_assert_not_reached ();
4250 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4252 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4253 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4254 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4258 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4259 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4264 if (ins->dreg != ins->sreg1)
4265 ARM_MVFD (code, ins->dreg, ins->sreg1);
4266 #elif defined(ARM_FPU_VFP)
4267 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4268 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4270 *(guint32*)code = 0xffffffff;
4272 *(guint32*)code = 0x7fefffff;
4274 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4276 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4277 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4279 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4281 ARM_CPYD (code, ins->dreg, ins->sreg1);
4286 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4287 g_assert_not_reached ();
4290 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4291 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4292 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4293 g_assert_not_reached ();
4299 last_offset = offset;
4302 cfg->code_len = code - cfg->native_code;
4305 #endif /* DISABLE_JIT */
4307 #ifdef HAVE_AEABI_READ_TP
4308 void __aeabi_read_tp (void);
4312 mono_arch_register_lowlevel_calls (void)
4314 /* The signature doesn't matter */
4315 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4316 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4318 #ifdef HAVE_AEABI_READ_TP
4319 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4323 #define patch_lis_ori(ip,val) do {\
4324 guint16 *__lis_ori = (guint16*)(ip); \
4325 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4326 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4330 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4332 MonoJumpInfo *patch_info;
4333 gboolean compile_aot = !run_cctors;
4335 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4336 unsigned char *ip = patch_info->ip.i + code;
4337 const unsigned char *target;
4339 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4340 gpointer *jt = (gpointer*)(ip + 8);
4342 /* jt is the inlined jump table, 2 instructions after ip
4343 * In the normal case we store the absolute addresses,
4344 * otherwise the displacements.
4346 for (i = 0; i < patch_info->data.table->table_size; i++)
4347 jt [i] = code + (int)patch_info->data.table->table [i];
4350 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4353 switch (patch_info->type) {
4354 case MONO_PATCH_INFO_BB:
4355 case MONO_PATCH_INFO_LABEL:
4358 /* No need to patch these */
4363 switch (patch_info->type) {
4364 case MONO_PATCH_INFO_IP:
4365 g_assert_not_reached ();
4366 patch_lis_ori (ip, ip);
4368 case MONO_PATCH_INFO_METHOD_REL:
4369 g_assert_not_reached ();
4370 *((gpointer *)(ip)) = code + patch_info->data.offset;
4372 case MONO_PATCH_INFO_METHODCONST:
4373 case MONO_PATCH_INFO_CLASS:
4374 case MONO_PATCH_INFO_IMAGE:
4375 case MONO_PATCH_INFO_FIELD:
4376 case MONO_PATCH_INFO_VTABLE:
4377 case MONO_PATCH_INFO_IID:
4378 case MONO_PATCH_INFO_SFLDA:
4379 case MONO_PATCH_INFO_LDSTR:
4380 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4381 case MONO_PATCH_INFO_LDTOKEN:
4382 g_assert_not_reached ();
4383 /* from OP_AOTCONST : lis + ori */
4384 patch_lis_ori (ip, target);
4386 case MONO_PATCH_INFO_R4:
4387 case MONO_PATCH_INFO_R8:
4388 g_assert_not_reached ();
4389 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4391 case MONO_PATCH_INFO_EXC_NAME:
4392 g_assert_not_reached ();
4393 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4395 case MONO_PATCH_INFO_NONE:
4396 case MONO_PATCH_INFO_BB_OVF:
4397 case MONO_PATCH_INFO_EXC_OVF:
4398 /* everything is dealt with at epilog output time */
4403 arm_patch_general (domain, ip, target);
4408 * Stack frame layout:
4410 * ------------------- fp
4411 * MonoLMF structure or saved registers
4412 * -------------------
4414 * -------------------
4416 * -------------------
4417 * optional 8 bytes for tracing
4418 * -------------------
4419 * param area size is cfg->param_area
4420 * ------------------- sp
4423 mono_arch_emit_prolog (MonoCompile *cfg)
4425 MonoMethod *method = cfg->method;
4427 MonoMethodSignature *sig;
4429 int alloc_size, pos, max_offset, i, rot_amount;
4434 int prev_sp_offset, reg_offset;
4436 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4439 sig = mono_method_signature (method);
4440 cfg->code_size = 256 + sig->param_count * 20;
4441 code = cfg->native_code = g_malloc (cfg->code_size);
4443 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4445 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4447 alloc_size = cfg->stack_offset;
4450 if (!method->save_lmf) {
4451 /* We save SP by storing it into IP and saving IP */
4452 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4453 prev_sp_offset = 8; /* ip and lr */
4454 for (i = 0; i < 16; ++i) {
4455 if (cfg->used_int_regs & (1 << i))
4456 prev_sp_offset += 4;
4458 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4460 for (i = 0; i < 16; ++i) {
4461 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4462 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4467 ARM_PUSH (code, 0x5ff0);
4468 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4469 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4471 for (i = 0; i < 16; ++i) {
4472 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4473 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4477 pos += sizeof (MonoLMF) - prev_sp_offset;
4481 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4482 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4483 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4484 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4487 /* the stack used in the pushed regs */
4488 if (prev_sp_offset & 4)
4490 cfg->stack_usage = alloc_size;
4492 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4493 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4495 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4496 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4498 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4500 if (cfg->frame_reg != ARMREG_SP) {
4501 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4502 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4504 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4505 prev_sp_offset += alloc_size;
4507 /* compute max_offset in order to use short forward jumps
4508 * we could skip do it on arm because the immediate displacement
4509 * for jumps is large enough, it may be useful later for constant pools
4512 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4513 MonoInst *ins = bb->code;
4514 bb->max_offset = max_offset;
4516 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4519 MONO_BB_FOR_EACH_INS (bb, ins)
4520 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4523 /* store runtime generic context */
4524 if (cfg->rgctx_var) {
4525 MonoInst *ins = cfg->rgctx_var;
4527 g_assert (ins->opcode == OP_REGOFFSET);
4529 if (arm_is_imm12 (ins->inst_offset)) {
4530 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4532 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4533 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4537 /* load arguments allocated to register from the stack */
4540 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4542 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4543 ArgInfo *ainfo = &cinfo->ret;
4544 inst = cfg->vret_addr;
4545 g_assert (arm_is_imm12 (inst->inst_offset));
4546 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4549 if (sig->call_convention == MONO_CALL_VARARG) {
4550 ArgInfo *cookie = &cinfo->sig_cookie;
4552 /* Save the sig cookie address */
4553 g_assert (cookie->storage == RegTypeBase);
4555 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4556 g_assert (arm_is_imm12 (cfg->sig_cookie));
4557 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4558 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4561 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4562 ArgInfo *ainfo = cinfo->args + i;
4563 inst = cfg->args [pos];
4565 if (cfg->verbose_level > 2)
4566 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4567 if (inst->opcode == OP_REGVAR) {
4568 if (ainfo->storage == RegTypeGeneral)
4569 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4570 else if (ainfo->storage == RegTypeFP) {
4571 g_assert_not_reached ();
4572 } else if (ainfo->storage == RegTypeBase) {
4573 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4574 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4576 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4577 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4580 g_assert_not_reached ();
4582 if (cfg->verbose_level > 2)
4583 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4585 /* the argument should be put on the stack: FIXME handle size != word */
4586 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4587 switch (ainfo->size) {
4589 if (arm_is_imm12 (inst->inst_offset))
4590 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4592 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4593 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4597 if (arm_is_imm8 (inst->inst_offset)) {
4598 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4600 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4601 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4605 g_assert (arm_is_imm12 (inst->inst_offset));
4606 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4607 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4608 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4611 if (arm_is_imm12 (inst->inst_offset)) {
4612 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4614 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4615 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4619 } else if (ainfo->storage == RegTypeBaseGen) {
4620 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4621 g_assert (arm_is_imm12 (inst->inst_offset));
4622 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4623 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4624 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4625 } else if (ainfo->storage == RegTypeBase) {
4626 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4627 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4629 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4630 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4633 switch (ainfo->size) {
4635 if (arm_is_imm8 (inst->inst_offset)) {
4636 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4638 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4639 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4643 if (arm_is_imm8 (inst->inst_offset)) {
4644 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4646 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4647 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4651 if (arm_is_imm12 (inst->inst_offset)) {
4652 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4654 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4655 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4657 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4658 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4660 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4661 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4663 if (arm_is_imm12 (inst->inst_offset + 4)) {
4664 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4666 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4667 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4671 if (arm_is_imm12 (inst->inst_offset)) {
4672 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4674 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4675 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4679 } else if (ainfo->storage == RegTypeFP) {
4680 g_assert_not_reached ();
4681 } else if (ainfo->storage == RegTypeStructByVal) {
4682 int doffset = inst->inst_offset;
4686 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4687 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4688 if (arm_is_imm12 (doffset)) {
4689 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4691 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4692 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4694 soffset += sizeof (gpointer);
4695 doffset += sizeof (gpointer);
4697 if (ainfo->vtsize) {
4698 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4699 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4700 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4702 } else if (ainfo->storage == RegTypeStructByAddr) {
4703 g_assert_not_reached ();
4704 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4705 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4707 g_assert_not_reached ();
4712 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4713 if (cfg->compile_aot)
4714 /* AOT code is only used in the root domain */
4715 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4717 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4718 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4719 (gpointer)"mono_jit_thread_attach");
4720 code = emit_call_seq (cfg, code);
4723 if (method->save_lmf) {
4724 gboolean get_lmf_fast = FALSE;
4726 #ifdef HAVE_AEABI_READ_TP
4727 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4729 if (lmf_addr_tls_offset != -1) {
4730 get_lmf_fast = TRUE;
4732 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4733 (gpointer)"__aeabi_read_tp");
4734 code = emit_call_seq (cfg, code);
4736 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4737 get_lmf_fast = TRUE;
4740 if (!get_lmf_fast) {
4741 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4742 (gpointer)"mono_get_lmf_addr");
4743 code = emit_call_seq (cfg, code);
4745 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4746 /* lmf_offset is the offset from the previous stack pointer,
4747 * alloc_size is the total stack space allocated, so the offset
4748 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4749 * The pointer to the struct is put in r1 (new_lmf).
4750 * r2 is used as scratch
4751 * The callee-saved registers are already in the MonoLMF structure
4753 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4754 /* r0 is the result from mono_get_lmf_addr () */
4755 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4756 /* new_lmf->previous_lmf = *lmf_addr */
4757 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4758 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4759 /* *(lmf_addr) = r1 */
4760 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4761 /* Skip method (only needed for trampoline LMF frames) */
4762 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4763 /* save the current IP */
4764 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4765 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4769 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4771 if (cfg->arch.seq_point_info_var) {
4772 MonoInst *ins = cfg->arch.seq_point_info_var;
4774 /* Initialize the variable from a GOT slot */
4775 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4776 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4778 *(gpointer*)code = NULL;
4780 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4782 g_assert (ins->opcode == OP_REGOFFSET);
4784 if (arm_is_imm12 (ins->inst_offset)) {
4785 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4787 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4788 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4792 /* Initialize ss_trigger_page_var */
4794 MonoInst *info_var = cfg->arch.seq_point_info_var;
4795 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4796 int dreg = ARMREG_LR;
4799 g_assert (info_var->opcode == OP_REGOFFSET);
4800 g_assert (arm_is_imm12 (info_var->inst_offset));
4802 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4803 /* Load the trigger page addr */
4804 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4805 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4809 cfg->code_len = code - cfg->native_code;
4810 g_assert (cfg->code_len < cfg->code_size);
4817 mono_arch_emit_epilog (MonoCompile *cfg)
4819 MonoMethod *method = cfg->method;
4820 int pos, i, rot_amount;
4821 int max_epilog_size = 16 + 20*4;
4825 if (cfg->method->save_lmf)
4826 max_epilog_size += 128;
4828 if (mono_jit_trace_calls != NULL)
4829 max_epilog_size += 50;
4831 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4832 max_epilog_size += 50;
4834 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4835 cfg->code_size *= 2;
4836 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4837 mono_jit_stats.code_reallocs++;
4841 * Keep in sync with OP_JMP
4843 code = cfg->native_code + cfg->code_len;
4845 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4846 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4850 /* Load returned vtypes into registers if needed */
4851 cinfo = cfg->arch.cinfo;
4852 if (cinfo->ret.storage == RegTypeStructByVal) {
4853 MonoInst *ins = cfg->ret;
4855 if (arm_is_imm12 (ins->inst_offset)) {
4856 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4858 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4859 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4863 if (method->save_lmf) {
4865 /* all but r0-r3, sp and pc */
4866 pos += sizeof (MonoLMF) - (4 * 10);
4868 /* r2 contains the pointer to the current LMF */
4869 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4870 /* ip = previous_lmf */
4871 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4873 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4874 /* *(lmf_addr) = previous_lmf */
4875 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4876 /* FIXME: speedup: there is no actual need to restore the registers if
4877 * we didn't actually change them (idea from Zoltan).
4880 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4881 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4882 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4884 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4885 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4887 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4888 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4890 /* FIXME: add v4 thumb interworking support */
4891 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4894 cfg->code_len = code - cfg->native_code;
4896 g_assert (cfg->code_len < cfg->code_size);
4900 /* remove once throw_exception_by_name is eliminated */
4902 exception_id_by_name (const char *name)
4904 if (strcmp (name, "IndexOutOfRangeException") == 0)
4905 return MONO_EXC_INDEX_OUT_OF_RANGE;
4906 if (strcmp (name, "OverflowException") == 0)
4907 return MONO_EXC_OVERFLOW;
4908 if (strcmp (name, "ArithmeticException") == 0)
4909 return MONO_EXC_ARITHMETIC;
4910 if (strcmp (name, "DivideByZeroException") == 0)
4911 return MONO_EXC_DIVIDE_BY_ZERO;
4912 if (strcmp (name, "InvalidCastException") == 0)
4913 return MONO_EXC_INVALID_CAST;
4914 if (strcmp (name, "NullReferenceException") == 0)
4915 return MONO_EXC_NULL_REF;
4916 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4917 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4918 g_error ("Unknown intrinsic exception %s\n", name);
4923 mono_arch_emit_exceptions (MonoCompile *cfg)
4925 MonoJumpInfo *patch_info;
4928 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4929 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4930 int max_epilog_size = 50;
4932 /* count the number of exception infos */
4935 * make sure we have enough space for exceptions
4937 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4938 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4939 i = exception_id_by_name (patch_info->data.target);
4940 if (!exc_throw_found [i]) {
4941 max_epilog_size += 32;
4942 exc_throw_found [i] = TRUE;
4947 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4948 cfg->code_size *= 2;
4949 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4950 mono_jit_stats.code_reallocs++;
4953 code = cfg->native_code + cfg->code_len;
4955 /* add code to raise exceptions */
4956 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4957 switch (patch_info->type) {
4958 case MONO_PATCH_INFO_EXC: {
4959 MonoClass *exc_class;
4960 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4962 i = exception_id_by_name (patch_info->data.target);
4963 if (exc_throw_pos [i]) {
4964 arm_patch (ip, exc_throw_pos [i]);
4965 patch_info->type = MONO_PATCH_INFO_NONE;
4968 exc_throw_pos [i] = code;
4970 arm_patch (ip, code);
4972 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4973 g_assert (exc_class);
4975 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4976 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4977 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4978 patch_info->data.name = "mono_arch_throw_corlib_exception";
4979 patch_info->ip.i = code - cfg->native_code;
4981 *(guint32*)(gpointer)code = exc_class->type_token;
4991 cfg->code_len = code - cfg->native_code;
4993 g_assert (cfg->code_len < cfg->code_size);
4997 static gboolean tls_offset_inited = FALSE;
5000 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5002 if (!tls_offset_inited) {
5003 tls_offset_inited = TRUE;
5005 lmf_tls_offset = mono_get_lmf_tls_offset ();
5006 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5011 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5016 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5023 mono_arch_print_tree (MonoInst *tree, int arity)
5029 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5031 return mono_get_domain_intrinsic (cfg);
5035 mono_arch_get_patch_offset (guint8 *code)
5042 mono_arch_flush_register_windows (void)
5046 #ifdef MONO_ARCH_HAVE_IMT
5049 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5051 if (cfg->compile_aot) {
5052 int method_reg = mono_alloc_ireg (cfg);
5055 call->dynamic_imt_arg = TRUE;
5058 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5060 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5061 ins->dreg = method_reg;
5062 ins->inst_p0 = call->method;
5063 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5064 MONO_ADD_INS (cfg->cbb, ins);
5066 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5068 } else if (cfg->generic_context) {
5070 /* Always pass in a register for simplicity */
5071 call->dynamic_imt_arg = TRUE;
5073 cfg->uses_rgctx_reg = TRUE;
5076 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5079 int method_reg = mono_alloc_preg (cfg);
5081 MONO_INST_NEW (cfg, ins, OP_PCONST);
5082 ins->inst_p0 = call->method;
5083 ins->dreg = method_reg;
5084 MONO_ADD_INS (cfg->cbb, ins);
5086 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5092 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5094 guint32 *code_ptr = (guint32*)code;
5096 /* The IMT value is stored in the code stream right after the LDC instruction. */
5097 if (!IS_LDR_PC (code_ptr [0])) {
5098 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5099 g_assert (IS_LDR_PC (code_ptr [0]));
5101 if (code_ptr [1] == 0)
5102 /* This is AOTed code, the IMT method is in V5 */
5103 return (MonoMethod*)regs [ARMREG_V5];
5105 return (MonoMethod*) code_ptr [1];
5109 mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
5111 return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
5115 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5117 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5120 #define ENABLE_WRONG_METHOD_CHECK 0
5121 #define BASE_SIZE (6 * 4)
5122 #define BSEARCH_ENTRY_SIZE (4 * 4)
5123 #define CMP_SIZE (3 * 4)
5124 #define BRANCH_SIZE (1 * 4)
5125 #define CALL_SIZE (2 * 4)
5126 #define WMC_SIZE (5 * 4)
5127 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5130 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5132 guint32 delta = DISTANCE (target, code);
5134 g_assert (delta >= 0 && delta <= 0xFFF);
5135 *target = *target | delta;
5141 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5142 gpointer fail_tramp)
5144 int size, i, extra_space = 0;
5145 arminstr_t *code, *start, *vtable_target = NULL;
5146 gboolean large_offsets = FALSE;
5147 guint32 **constant_pool_starts;
5150 constant_pool_starts = g_new0 (guint32*, count);
5153 * We might be called with a fail_tramp from the IMT builder code even if
5154 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5156 //g_assert (!fail_tramp);
5158 for (i = 0; i < count; ++i) {
5159 MonoIMTCheckItem *item = imt_entries [i];
5160 if (item->is_equals) {
5161 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5162 item->chunk_size += 32;
5163 large_offsets = TRUE;
5166 if (item->check_target_idx) {
5167 if (!item->compare_done)
5168 item->chunk_size += CMP_SIZE;
5169 item->chunk_size += BRANCH_SIZE;
5171 #if ENABLE_WRONG_METHOD_CHECK
5172 item->chunk_size += WMC_SIZE;
5175 item->chunk_size += CALL_SIZE;
5177 item->chunk_size += BSEARCH_ENTRY_SIZE;
5178 imt_entries [item->check_target_idx]->compare_done = TRUE;
5180 size += item->chunk_size;
5184 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5186 start = code = mono_domain_code_reserve (domain, size);
5189 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5190 for (i = 0; i < count; ++i) {
5191 MonoIMTCheckItem *item = imt_entries [i];
5192 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5197 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5199 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5200 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5201 vtable_target = code;
5202 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5204 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5205 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5206 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5208 for (i = 0; i < count; ++i) {
5209 MonoIMTCheckItem *item = imt_entries [i];
5210 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5211 gint32 vtable_offset;
5213 item->code_target = (guint8*)code;
5215 if (item->is_equals) {
5216 if (item->check_target_idx) {
5217 if (!item->compare_done) {
5219 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5220 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5222 item->jmp_code = (guint8*)code;
5223 ARM_B_COND (code, ARMCOND_NE, 0);
5225 /*Enable the commented code to assert on wrong method*/
5226 #if ENABLE_WRONG_METHOD_CHECK
5228 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5229 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5230 ARM_B_COND (code, ARMCOND_NE, 1);
5236 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5237 if (!arm_is_imm12 (vtable_offset)) {
5239 * We need to branch to a computed address but we don't have
5240 * a free register to store it, since IP must contain the
5241 * vtable address. So we push the two values to the stack, and
5242 * load them both using LDM.
5244 /* Compute target address */
5245 vtable_offset_ins = code;
5246 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5247 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5248 /* Save it to the fourth slot */
5249 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5250 /* Restore registers and branch */
5251 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5253 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5255 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5257 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5258 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5262 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5264 /*must emit after unconditional branch*/
5265 if (vtable_target) {
5266 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5267 item->chunk_size += 4;
5268 vtable_target = NULL;
5271 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5272 constant_pool_starts [i] = code;
5274 code += extra_space;
5278 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5279 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5281 item->jmp_code = (guint8*)code;
5282 ARM_B_COND (code, ARMCOND_GE, 0);
5287 for (i = 0; i < count; ++i) {
5288 MonoIMTCheckItem *item = imt_entries [i];
5289 if (item->jmp_code) {
5290 if (item->check_target_idx)
5291 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5293 if (i > 0 && item->is_equals) {
5295 arminstr_t *space_start = constant_pool_starts [i];
5296 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5297 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5304 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5305 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5310 g_free (constant_pool_starts);
5312 mono_arch_flush_icache ((guint8*)start, size);
5313 mono_stats.imt_thunks_size += code - start;
5315 g_assert (DISTANCE (start, code) <= size);
5322 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5324 if (reg == ARMREG_SP)
5325 return (gpointer)ctx->esp;
5327 return (gpointer)ctx->regs [reg];
5331 * mono_arch_set_breakpoint:
5333 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5334 * The location should contain code emitted by OP_SEQ_POINT.
5337 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5340 guint32 native_offset = ip - (guint8*)ji->code_start;
5343 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5345 g_assert (native_offset % 4 == 0);
5346 g_assert (info->bp_addrs [native_offset / 4] == 0);
5347 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5349 int dreg = ARMREG_LR;
5351 /* Read from another trigger page */
5352 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5354 *(int*)code = (int)bp_trigger_page;
5356 ARM_LDR_IMM (code, dreg, dreg, 0);
5358 mono_arch_flush_icache (code - 16, 16);
5361 /* This is currently implemented by emitting an SWI instruction, which
5362 * qemu/linux seems to convert to a SIGILL.
5364 *(int*)code = (0xef << 24) | 8;
5366 mono_arch_flush_icache (code - 4, 4);
5372 * mono_arch_clear_breakpoint:
5374 * Clear the breakpoint at IP.
5377 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5383 guint32 native_offset = ip - (guint8*)ji->code_start;
5384 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5386 g_assert (native_offset % 4 == 0);
5387 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5388 info->bp_addrs [native_offset / 4] = 0;
5390 for (i = 0; i < 4; ++i)
5393 mono_arch_flush_icache (ip, code - ip);
5398 * mono_arch_start_single_stepping:
5400 * Start single stepping.
5403 mono_arch_start_single_stepping (void)
5405 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5409 * mono_arch_stop_single_stepping:
5411 * Stop single stepping.
5414 mono_arch_stop_single_stepping (void)
5416 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5420 #define DBG_SIGNAL SIGBUS
5422 #define DBG_SIGNAL SIGSEGV
5426 * mono_arch_is_single_step_event:
5428 * Return whenever the machine state in SIGCTX corresponds to a single
5432 mono_arch_is_single_step_event (void *info, void *sigctx)
5434 siginfo_t *sinfo = info;
5436 /* Sometimes the address is off by 4 */
5437 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5444 * mono_arch_is_breakpoint_event:
5446 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5449 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5451 siginfo_t *sinfo = info;
5453 if (sinfo->si_signo == DBG_SIGNAL) {
5454 /* Sometimes the address is off by 4 */
5455 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5465 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5467 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5478 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5480 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5488 * mono_arch_skip_breakpoint:
5490 * See mini-amd64.c for docs.
5493 mono_arch_skip_breakpoint (MonoContext *ctx)
5495 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5499 * mono_arch_skip_single_step:
5501 * See mini-amd64.c for docs.
5504 mono_arch_skip_single_step (MonoContext *ctx)
5506 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5510 * mono_arch_get_seq_point_info:
5512 * See mini-amd64.c for docs.
5515 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5520 // FIXME: Add a free function
5522 mono_domain_lock (domain);
5523 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5525 mono_domain_unlock (domain);
5528 ji = mono_jit_info_table_find (domain, (char*)code);
5531 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5533 info->ss_trigger_page = ss_trigger_page;
5534 info->bp_trigger_page = bp_trigger_page;
5536 mono_domain_lock (domain);
5537 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5539 mono_domain_unlock (domain);