2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
140 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
142 int imm8, rot_amount;
143 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
144 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
147 g_assert (dreg != sreg);
148 code = mono_arm_emit_load_imm (code, dreg, imm);
149 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
154 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
156 /* we can use r0-r3, since this is called only for incoming args on the stack */
157 if (size > sizeof (gpointer) * 4) {
159 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
160 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
161 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
162 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
163 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
164 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
165 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
166 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
167 ARM_B_COND (code, ARMCOND_NE, 0);
168 arm_patch (code - 4, start_loop);
171 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
172 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
174 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
175 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
181 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
182 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
183 doffset = soffset = 0;
185 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
186 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
192 g_assert (size == 0);
197 emit_call_reg (guint8 *code, int reg)
200 ARM_BLX_REG (code, reg);
202 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
206 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
212 emit_call_seq (MonoCompile *cfg, guint8 *code)
214 if (cfg->method->dynamic) {
215 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
217 *(gpointer*)code = NULL;
219 code = emit_call_reg (code, ARMREG_IP);
227 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
229 switch (ins->opcode) {
232 case OP_FCALL_MEMBASE:
234 if (ins->dreg != ARM_FPA_F0)
235 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
236 #elif defined(ARM_FPU_VFP)
237 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
238 ARM_FMSR (code, ins->dreg, ARMREG_R0);
239 ARM_CVTS (code, ins->dreg, ins->dreg);
241 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
251 * mono_arch_get_argument_info:
252 * @csig: a method signature
253 * @param_count: the number of parameters to consider
254 * @arg_info: an array to store the result infos
256 * Gathers information on parameters such as size, alignment and
257 * padding. arg_info should be large enought to hold param_count + 1 entries.
259 * Returns the size of the activation frame.
262 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
264 int k, frame_size = 0;
265 guint32 size, align, pad;
268 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
269 frame_size += sizeof (gpointer);
273 arg_info [0].offset = offset;
276 frame_size += sizeof (gpointer);
280 arg_info [0].size = frame_size;
282 for (k = 0; k < param_count; k++) {
283 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
285 /* ignore alignment for now */
288 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
289 arg_info [k].pad = pad;
291 arg_info [k + 1].pad = 0;
292 arg_info [k + 1].size = size;
294 arg_info [k + 1].offset = offset;
298 align = MONO_ARCH_FRAME_ALIGNMENT;
299 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
300 arg_info [k].pad = pad;
306 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
310 reg = (ldr >> 16 ) & 0xf;
311 offset = ldr & 0xfff;
312 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
314 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
315 o = (gpointer)regs [reg];
317 *displacement = offset;
322 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
324 guint32* code = (guint32*)code_ptr;
326 /* Locate the address of the method-specific trampoline. The call using
327 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
328 looks something like this:
337 The call sequence could be also:
340 function pointer literal
344 Note that on ARM5+ we can use one instruction instead of the last two.
345 Therefore, we need to locate the 'ldr rA' instruction to know which
346 register was used to hold the method addrs.
349 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
352 /* Three possible code sequences can happen here:
356 * ldr pc, [rX - #offset]
362 * ldr pc, [rX - #offset]
364 * direct branch with bl:
368 * direct branch with mov:
372 * We only need to identify interface and virtual calls, the others can be ignored.
375 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
376 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
378 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
379 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
384 #define MAX_ARCH_DELEGATE_PARAMS 3
387 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
389 guint8 *code, *start;
392 start = code = mono_global_codeman_reserve (12);
394 /* Replace the this argument with the target */
395 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
396 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
397 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
399 g_assert ((code - start) <= 12);
401 mono_arch_flush_icache (start, 12);
405 size = 8 + param_count * 4;
406 start = code = mono_global_codeman_reserve (size);
408 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
409 /* slide down the arguments */
410 for (i = 0; i < param_count; ++i) {
411 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
413 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
415 g_assert ((code - start) <= size);
417 mono_arch_flush_icache (start, size);
421 *code_size = code - start;
427 * mono_arch_get_delegate_invoke_impls:
429 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
433 mono_arch_get_delegate_invoke_impls (void)
440 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
441 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
443 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
444 code = get_delegate_invoke_impl (FALSE, i, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
452 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
454 guint8 *code, *start;
456 /* FIXME: Support more cases */
457 if (MONO_TYPE_ISSTRUCT (sig->ret))
461 static guint8* cached = NULL;
462 mono_mini_arch_lock ();
464 mono_mini_arch_unlock ();
469 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
471 start = get_delegate_invoke_impl (TRUE, 0, NULL);
473 mono_mini_arch_unlock ();
476 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
479 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
481 for (i = 0; i < sig->param_count; ++i)
482 if (!mono_is_regsize_var (sig->params [i]))
485 mono_mini_arch_lock ();
486 code = cache [sig->param_count];
488 mono_mini_arch_unlock ();
493 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
494 start = mono_aot_get_named_code (name);
497 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
499 cache [sig->param_count] = start;
500 mono_mini_arch_unlock ();
508 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
510 /* FIXME: handle returning a struct */
511 if (MONO_TYPE_ISSTRUCT (sig->ret))
512 return (gpointer)regs [ARMREG_R1];
513 return (gpointer)regs [ARMREG_R0];
517 * Initialize the cpu to execute managed code.
520 mono_arch_cpu_init (void)
525 * Initialize architecture specific code.
528 mono_arch_init (void)
530 InitializeCriticalSection (&mini_arch_mutex);
532 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
533 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
534 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
538 * Cleanup architecture specific code.
541 mono_arch_cleanup (void)
546 * This function returns the optimizations supported on this cpu.
549 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
553 thumb_supported = TRUE;
558 FILE *file = fopen ("/proc/cpuinfo", "r");
560 while ((line = fgets (buf, 512, file))) {
561 if (strncmp (line, "Processor", 9) == 0) {
562 char *ver = strstr (line, "(v");
563 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
565 if (ver && (ver [2] == '7'))
569 if (strncmp (line, "Features", 8) == 0) {
570 char *th = strstr (line, "thumb");
572 thumb_supported = TRUE;
580 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
584 /* no arm-specific optimizations yet */
590 is_regsize_var (MonoType *t) {
593 t = mini_type_get_underlying_type (NULL, t);
600 case MONO_TYPE_FNPTR:
602 case MONO_TYPE_OBJECT:
603 case MONO_TYPE_STRING:
604 case MONO_TYPE_CLASS:
605 case MONO_TYPE_SZARRAY:
606 case MONO_TYPE_ARRAY:
608 case MONO_TYPE_GENERICINST:
609 if (!mono_type_generic_inst_is_valuetype (t))
612 case MONO_TYPE_VALUETYPE:
619 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
624 for (i = 0; i < cfg->num_varinfo; i++) {
625 MonoInst *ins = cfg->varinfo [i];
626 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
629 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
632 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
635 /* we can only allocate 32 bit values */
636 if (is_regsize_var (ins->inst_vtype)) {
637 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
638 g_assert (i == vmv->idx);
639 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
646 #define USE_EXTRA_TEMPS 0
649 mono_arch_get_global_int_regs (MonoCompile *cfg)
654 * FIXME: Interface calls might go through a static rgctx trampoline which
655 * sets V5, but it doesn't save it, so we need to save it ourselves, and
658 if (cfg->flags & MONO_CFG_HAS_CALLS)
659 cfg->uses_rgctx_reg = TRUE;
661 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
662 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
663 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
664 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
665 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
666 /* V5 is reserved for passing the vtable/rgctx/IMT method */
667 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
668 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
669 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
675 * mono_arch_regalloc_cost:
677 * Return the cost, in number of memory references, of the action of
678 * allocating the variable VMV into a register during global register
682 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
688 #ifndef __GNUC_PREREQ
689 #define __GNUC_PREREQ(maj, min) (0)
693 mono_arch_flush_icache (guint8 *code, gint size)
696 sys_icache_invalidate (code, size);
697 #elif __GNUC_PREREQ(4, 1)
698 __clear_cache (code, code + size);
699 #elif defined(PLATFORM_ANDROID)
700 const int syscall = 0xf0002;
708 : "r" (code), "r" (code + size), "r" (syscall)
709 : "r0", "r1", "r7", "r2"
712 __asm __volatile ("mov r0, %0\n"
715 "swi 0x9f0002 @ sys_cacheflush"
717 : "r" (code), "r" (code + size), "r" (0)
718 : "r0", "r1", "r3" );
735 guint16 vtsize; /* in param area */
738 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
745 gboolean vtype_retaddr;
754 /*#define __alignof__(a) sizeof(a)*/
755 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
761 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
764 if (*gr > ARMREG_R3) {
765 ainfo->offset = *stack_size;
766 ainfo->reg = ARMREG_SP; /* in the caller */
767 ainfo->storage = RegTypeBase;
770 ainfo->storage = RegTypeGeneral;
774 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
777 int i8_align = __alignof__ (gint64);
781 gboolean split = i8_align == 4;
783 gboolean split = TRUE;
786 if (*gr == ARMREG_R3 && split) {
787 /* first word in r3 and the second on the stack */
788 ainfo->offset = *stack_size;
789 ainfo->reg = ARMREG_SP; /* in the caller */
790 ainfo->storage = RegTypeBaseGen;
792 } else if (*gr >= ARMREG_R3) {
794 /* darwin aligns longs to 4 byte only */
800 ainfo->offset = *stack_size;
801 ainfo->reg = ARMREG_SP; /* in the caller */
802 ainfo->storage = RegTypeBase;
806 if (i8_align == 8 && ((*gr) & 1))
809 ainfo->storage = RegTypeIRegPair;
818 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
821 int n = sig->hasthis + sig->param_count;
822 MonoType *simpletype;
823 guint32 stack_size = 0;
827 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
829 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
834 /* FIXME: handle returning a struct */
835 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
838 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
839 cinfo->ret.storage = RegTypeStructByVal;
841 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
842 cinfo->struct_ret = ARMREG_R0;
843 cinfo->vtype_retaddr = TRUE;
849 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
852 DEBUG(printf("params: %d\n", sig->param_count));
853 for (i = 0; i < sig->param_count; ++i) {
854 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
855 /* Prevent implicit arguments and sig_cookie from
856 being passed in registers */
858 /* Emit the signature cookie just before the implicit arguments */
859 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
861 DEBUG(printf("param %d: ", i));
862 if (sig->params [i]->byref) {
863 DEBUG(printf("byref\n"));
864 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
868 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
869 switch (simpletype->type) {
870 case MONO_TYPE_BOOLEAN:
873 cinfo->args [n].size = 1;
874 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
880 cinfo->args [n].size = 2;
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
886 cinfo->args [n].size = 4;
887 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
893 case MONO_TYPE_FNPTR:
894 case MONO_TYPE_CLASS:
895 case MONO_TYPE_OBJECT:
896 case MONO_TYPE_STRING:
897 case MONO_TYPE_SZARRAY:
898 case MONO_TYPE_ARRAY:
900 cinfo->args [n].size = sizeof (gpointer);
901 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
904 case MONO_TYPE_GENERICINST:
905 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
906 cinfo->args [n].size = sizeof (gpointer);
907 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
912 case MONO_TYPE_TYPEDBYREF:
913 case MONO_TYPE_VALUETYPE: {
919 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
920 size = sizeof (MonoTypedRef);
921 align = sizeof (gpointer);
923 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
925 size = mono_class_native_size (klass, &align);
927 size = mono_class_value_size (klass, &align);
929 DEBUG(printf ("load %d bytes struct\n",
930 mono_class_native_size (sig->params [i]->data.klass, NULL)));
933 align_size += (sizeof (gpointer) - 1);
934 align_size &= ~(sizeof (gpointer) - 1);
935 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
936 cinfo->args [n].storage = RegTypeStructByVal;
937 /* FIXME: align stack_size if needed */
939 if (align >= 8 && (gr & 1))
942 if (gr > ARMREG_R3) {
943 cinfo->args [n].size = 0;
944 cinfo->args [n].vtsize = nwords;
946 int rest = ARMREG_R3 - gr + 1;
947 int n_in_regs = rest >= nwords? nwords: rest;
949 cinfo->args [n].size = n_in_regs;
950 cinfo->args [n].vtsize = nwords - n_in_regs;
951 cinfo->args [n].reg = gr;
955 cinfo->args [n].offset = stack_size;
956 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
957 stack_size += nwords * sizeof (gpointer);
964 cinfo->args [n].size = 8;
965 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
969 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
973 /* Handle the case where there are no implicit arguments */
974 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
975 /* Prevent implicit arguments and sig_cookie from
976 being passed in registers */
978 /* Emit the signature cookie just before the implicit arguments */
979 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
983 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
984 switch (simpletype->type) {
985 case MONO_TYPE_BOOLEAN:
996 case MONO_TYPE_FNPTR:
997 case MONO_TYPE_CLASS:
998 case MONO_TYPE_OBJECT:
999 case MONO_TYPE_SZARRAY:
1000 case MONO_TYPE_ARRAY:
1001 case MONO_TYPE_STRING:
1002 cinfo->ret.storage = RegTypeGeneral;
1003 cinfo->ret.reg = ARMREG_R0;
1007 cinfo->ret.storage = RegTypeIRegPair;
1008 cinfo->ret.reg = ARMREG_R0;
1012 cinfo->ret.storage = RegTypeFP;
1013 cinfo->ret.reg = ARMREG_R0;
1014 /* FIXME: cinfo->ret.reg = ???;
1015 cinfo->ret.storage = RegTypeFP;*/
1017 case MONO_TYPE_GENERICINST:
1018 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1024 case MONO_TYPE_VALUETYPE:
1025 case MONO_TYPE_TYPEDBYREF:
1026 if (cinfo->ret.storage != RegTypeStructByVal)
1027 cinfo->ret.storage = RegTypeStructByAddr;
1029 case MONO_TYPE_VOID:
1032 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1036 /* align stack size to 8 */
1037 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1038 stack_size = (stack_size + 7) & ~7;
1040 cinfo->stack_usage = stack_size;
1046 * Set var information according to the calling convention. arm version.
1047 * The locals var stuff should most likely be split in another method.
1050 mono_arch_allocate_vars (MonoCompile *cfg)
1052 MonoMethodSignature *sig;
1053 MonoMethodHeader *header;
1055 int i, offset, size, align, curinst;
1056 int frame_reg = ARMREG_FP;
1060 sig = mono_method_signature (cfg->method);
1062 if (!cfg->arch.cinfo)
1063 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1064 cinfo = cfg->arch.cinfo;
1066 /* FIXME: this will change when we use FP as gcc does */
1067 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1069 /* allow room for the vararg method args: void* and long/double */
1070 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1071 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1073 header = mono_method_get_header (cfg->method);
1076 * We use the frame register also for any method that has
1077 * exception clauses. This way, when the handlers are called,
1078 * the code will reference local variables using the frame reg instead of
1079 * the stack pointer: if we had to restore the stack pointer, we'd
1080 * corrupt the method frames that are already on the stack (since
1081 * filters get called before stack unwinding happens) when the filter
1082 * code would call any method (this also applies to finally etc.).
1084 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1085 frame_reg = ARMREG_FP;
1086 cfg->frame_reg = frame_reg;
1087 if (frame_reg != ARMREG_SP) {
1088 cfg->used_int_regs |= 1 << frame_reg;
1091 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1092 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1093 cfg->used_int_regs |= (1 << ARMREG_V5);
1097 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1098 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1099 case MONO_TYPE_VOID:
1102 cfg->ret->opcode = OP_REGVAR;
1103 cfg->ret->inst_c0 = ARMREG_R0;
1107 /* local vars are at a positive offset from the stack pointer */
1109 * also note that if the function uses alloca, we use FP
1110 * to point at the local variables.
1112 offset = 0; /* linkage area */
1113 /* align the offset to 16 bytes: not sure this is needed here */
1115 //offset &= ~(8 - 1);
1117 /* add parameter area size for called functions */
1118 offset += cfg->param_area;
1121 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1124 /* allow room to save the return value */
1125 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1128 /* the MonoLMF structure is stored just below the stack pointer */
1129 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1130 if (cinfo->ret.storage == RegTypeStructByVal) {
1131 cfg->ret->opcode = OP_REGOFFSET;
1132 cfg->ret->inst_basereg = cfg->frame_reg;
1133 offset += sizeof (gpointer) - 1;
1134 offset &= ~(sizeof (gpointer) - 1);
1135 cfg->ret->inst_offset = - offset;
1137 inst = cfg->vret_addr;
1138 offset += sizeof(gpointer) - 1;
1139 offset &= ~(sizeof(gpointer) - 1);
1140 inst->inst_offset = offset;
1141 inst->opcode = OP_REGOFFSET;
1142 inst->inst_basereg = frame_reg;
1143 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1144 printf ("vret_addr =");
1145 mono_print_ins (cfg->vret_addr);
1148 offset += sizeof(gpointer);
1151 curinst = cfg->locals_start;
1152 for (i = curinst; i < cfg->num_varinfo; ++i) {
1153 inst = cfg->varinfo [i];
1154 if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR)
1157 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1158 * pinvoke wrappers when they call functions returning structure */
1159 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1160 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &ualign);
1164 size = mono_type_size (inst->inst_vtype, &align);
1166 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1167 * since it loads/stores misaligned words, which don't do the right thing.
1169 if (align < 4 && size >= 4)
1171 offset += align - 1;
1172 offset &= ~(align - 1);
1173 inst->inst_offset = offset;
1174 inst->opcode = OP_REGOFFSET;
1175 inst->inst_basereg = frame_reg;
1177 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1182 inst = cfg->args [curinst];
1183 if (inst->opcode != OP_REGVAR) {
1184 inst->opcode = OP_REGOFFSET;
1185 inst->inst_basereg = frame_reg;
1186 offset += sizeof (gpointer) - 1;
1187 offset &= ~(sizeof (gpointer) - 1);
1188 inst->inst_offset = offset;
1189 offset += sizeof (gpointer);
1194 if (sig->call_convention == MONO_CALL_VARARG) {
1198 /* Allocate a local slot to hold the sig cookie address */
1199 offset += align - 1;
1200 offset &= ~(align - 1);
1201 cfg->sig_cookie = offset;
1205 for (i = 0; i < sig->param_count; ++i) {
1206 inst = cfg->args [curinst];
1208 if (inst->opcode != OP_REGVAR) {
1209 inst->opcode = OP_REGOFFSET;
1210 inst->inst_basereg = frame_reg;
1211 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1213 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1214 * since it loads/stores misaligned words, which don't do the right thing.
1216 if (align < 4 && size >= 4)
1218 /* The code in the prolog () stores words when storing vtypes received in a register */
1219 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1221 offset += align - 1;
1222 offset &= ~(align - 1);
1223 inst->inst_offset = offset;
1229 /* align the offset to 8 bytes */
1234 cfg->stack_offset = offset;
1238 mono_arch_create_vars (MonoCompile *cfg)
1240 MonoMethodSignature *sig;
1243 sig = mono_method_signature (cfg->method);
1245 if (!cfg->arch.cinfo)
1246 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1247 cinfo = cfg->arch.cinfo;
1249 if (cinfo->ret.storage == RegTypeStructByVal)
1250 cfg->ret_var_is_local = TRUE;
1252 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1253 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1254 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1255 printf ("vret_addr = ");
1256 mono_print_ins (cfg->vret_addr);
1260 if (cfg->gen_seq_points && cfg->compile_aot) {
1261 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1262 ins->flags |= MONO_INST_VOLATILE;
1263 cfg->arch.seq_point_info_var = ins;
1265 /* Allocate a separate variable for this to save 1 load per seq point */
1266 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1267 ins->flags |= MONO_INST_VOLATILE;
1268 cfg->arch.ss_trigger_page_var = ins;
1273 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1275 MonoMethodSignature *tmp_sig;
1278 if (call->tail_call)
1281 /* FIXME: Add support for signature tokens to AOT */
1282 cfg->disable_aot = TRUE;
1284 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1287 * mono_ArgIterator_Setup assumes the signature cookie is
1288 * passed first and all the arguments which were before it are
1289 * passed on the stack after the signature. So compensate by
1290 * passing a different signature.
1292 tmp_sig = mono_metadata_signature_dup (call->signature);
1293 tmp_sig->param_count -= call->signature->sentinelpos;
1294 tmp_sig->sentinelpos = 0;
1295 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1297 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1298 sig_arg->dreg = mono_alloc_ireg (cfg);
1299 sig_arg->inst_p0 = tmp_sig;
1300 MONO_ADD_INS (cfg->cbb, sig_arg);
1302 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1307 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1312 LLVMCallInfo *linfo;
1314 n = sig->param_count + sig->hasthis;
1316 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1318 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1321 * LLVM always uses the native ABI while we use our own ABI, the
1322 * only difference is the handling of vtypes:
1323 * - we only pass/receive them in registers in some cases, and only
1324 * in 1 or 2 integer registers.
1326 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1327 cfg->exception_message = g_strdup ("unknown ret conv");
1328 cfg->disable_llvm = TRUE;
1332 for (i = 0; i < n; ++i) {
1333 ainfo = cinfo->args + i;
1335 linfo->args [i].storage = LLVMArgNone;
1337 switch (ainfo->storage) {
1338 case RegTypeGeneral:
1339 case RegTypeIRegPair:
1341 linfo->args [i].storage = LLVMArgInIReg;
1344 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1345 cfg->disable_llvm = TRUE;
1355 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1358 MonoMethodSignature *sig;
1362 sig = call->signature;
1363 n = sig->param_count + sig->hasthis;
1365 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1367 for (i = 0; i < n; ++i) {
1368 ArgInfo *ainfo = cinfo->args + i;
1371 if (i >= sig->hasthis)
1372 t = sig->params [i - sig->hasthis];
1374 t = &mono_defaults.int_class->byval_arg;
1375 t = mini_type_get_underlying_type (NULL, t);
1377 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1378 /* Emit the signature cookie just before the implicit arguments */
1379 emit_sig_cookie (cfg, call, cinfo);
1382 in = call->args [i];
1384 switch (ainfo->storage) {
1385 case RegTypeGeneral:
1386 case RegTypeIRegPair:
1387 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1388 MONO_INST_NEW (cfg, ins, OP_MOVE);
1389 ins->dreg = mono_alloc_ireg (cfg);
1390 ins->sreg1 = in->dreg + 1;
1391 MONO_ADD_INS (cfg->cbb, ins);
1392 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1394 MONO_INST_NEW (cfg, ins, OP_MOVE);
1395 ins->dreg = mono_alloc_ireg (cfg);
1396 ins->sreg1 = in->dreg + 2;
1397 MONO_ADD_INS (cfg->cbb, ins);
1398 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1399 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1400 #ifndef MONO_ARCH_SOFT_FLOAT
1404 if (ainfo->size == 4) {
1405 #ifdef MONO_ARCH_SOFT_FLOAT
1406 /* mono_emit_call_args () have already done the r8->r4 conversion */
1407 /* The converted value is in an int vreg */
1408 MONO_INST_NEW (cfg, ins, OP_MOVE);
1409 ins->dreg = mono_alloc_ireg (cfg);
1410 ins->sreg1 = in->dreg;
1411 MONO_ADD_INS (cfg->cbb, ins);
1412 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1414 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1415 creg = mono_alloc_ireg (cfg);
1416 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1417 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1420 #ifdef MONO_ARCH_SOFT_FLOAT
1421 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1422 ins->dreg = mono_alloc_ireg (cfg);
1423 ins->sreg1 = in->dreg;
1424 MONO_ADD_INS (cfg->cbb, ins);
1425 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1427 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1428 ins->dreg = mono_alloc_ireg (cfg);
1429 ins->sreg1 = in->dreg;
1430 MONO_ADD_INS (cfg->cbb, ins);
1431 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1433 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1434 creg = mono_alloc_ireg (cfg);
1435 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1436 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1437 creg = mono_alloc_ireg (cfg);
1438 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1439 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1442 cfg->flags |= MONO_CFG_HAS_FPOUT;
1444 MONO_INST_NEW (cfg, ins, OP_MOVE);
1445 ins->dreg = mono_alloc_ireg (cfg);
1446 ins->sreg1 = in->dreg;
1447 MONO_ADD_INS (cfg->cbb, ins);
1449 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1452 case RegTypeStructByAddr:
1455 /* FIXME: where si the data allocated? */
1456 arg->backend.reg3 = ainfo->reg;
1457 call->used_iregs |= 1 << ainfo->reg;
1458 g_assert_not_reached ();
1461 case RegTypeStructByVal:
1462 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1463 ins->opcode = OP_OUTARG_VT;
1464 ins->sreg1 = in->dreg;
1465 ins->klass = in->klass;
1466 ins->inst_p0 = call;
1467 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1468 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1469 MONO_ADD_INS (cfg->cbb, ins);
1472 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1473 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1474 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1475 if (t->type == MONO_TYPE_R8) {
1476 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1478 #ifdef MONO_ARCH_SOFT_FLOAT
1479 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1481 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1485 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1488 case RegTypeBaseGen:
1489 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1490 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1491 MONO_INST_NEW (cfg, ins, OP_MOVE);
1492 ins->dreg = mono_alloc_ireg (cfg);
1493 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1494 MONO_ADD_INS (cfg->cbb, ins);
1495 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1496 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1499 #ifdef MONO_ARCH_SOFT_FLOAT
1500 g_assert_not_reached ();
1503 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1504 creg = mono_alloc_ireg (cfg);
1505 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1506 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1507 creg = mono_alloc_ireg (cfg);
1508 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1509 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1510 cfg->flags |= MONO_CFG_HAS_FPOUT;
1512 g_assert_not_reached ();
1519 arg->backend.reg3 = ainfo->reg;
1520 /* FP args are passed in int regs */
1521 call->used_iregs |= 1 << ainfo->reg;
1522 if (ainfo->size == 8) {
1523 arg->opcode = OP_OUTARG_R8;
1524 call->used_iregs |= 1 << (ainfo->reg + 1);
1526 arg->opcode = OP_OUTARG_R4;
1529 cfg->flags |= MONO_CFG_HAS_FPOUT;
1533 g_assert_not_reached ();
1537 /* Handle the case where there are no implicit arguments */
1538 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1539 emit_sig_cookie (cfg, call, cinfo);
1541 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1544 if (cinfo->ret.storage == RegTypeStructByVal) {
1545 /* The JIT will transform this into a normal call */
1546 call->vret_in_reg = TRUE;
1548 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1549 vtarg->sreg1 = call->vret_var->dreg;
1550 vtarg->dreg = mono_alloc_preg (cfg);
1551 MONO_ADD_INS (cfg->cbb, vtarg);
1553 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1557 call->stack_usage = cinfo->stack_usage;
1563 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1565 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1566 ArgInfo *ainfo = ins->inst_p1;
1567 int ovf_size = ainfo->vtsize;
1568 int doffset = ainfo->offset;
1569 int i, soffset, dreg;
1572 for (i = 0; i < ainfo->size; ++i) {
1573 dreg = mono_alloc_ireg (cfg);
1574 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1575 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1576 soffset += sizeof (gpointer);
1578 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1580 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1584 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1586 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1589 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1592 if (COMPILE_LLVM (cfg)) {
1593 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1595 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1596 ins->sreg1 = val->dreg + 1;
1597 ins->sreg2 = val->dreg + 2;
1598 MONO_ADD_INS (cfg->cbb, ins);
1602 #ifdef MONO_ARCH_SOFT_FLOAT
1603 if (ret->type == MONO_TYPE_R8) {
1606 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1607 ins->dreg = cfg->ret->dreg;
1608 ins->sreg1 = val->dreg;
1609 MONO_ADD_INS (cfg->cbb, ins);
1612 if (ret->type == MONO_TYPE_R4) {
1613 /* Already converted to an int in method_to_ir () */
1614 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1617 #elif defined(ARM_FPU_VFP)
1618 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1621 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1622 ins->dreg = cfg->ret->dreg;
1623 ins->sreg1 = val->dreg;
1624 MONO_ADD_INS (cfg->cbb, ins);
1628 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1629 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1636 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1640 mono_arch_is_inst_imm (gint64 imm)
1645 #define DYN_CALL_STACK_ARGS 6
1648 MonoMethodSignature *sig;
1653 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1659 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1663 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1666 switch (cinfo->ret.storage) {
1668 case RegTypeGeneral:
1669 case RegTypeIRegPair:
1670 case RegTypeStructByAddr:
1675 #elif defined(ARM_FPU_VFP)
1684 for (i = 0; i < cinfo->nargs; ++i) {
1685 switch (cinfo->args [i].storage) {
1686 case RegTypeGeneral:
1688 case RegTypeIRegPair:
1691 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1694 case RegTypeStructByVal:
1695 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1703 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1704 for (i = 0; i < sig->param_count; ++i) {
1705 MonoType *t = sig->params [i];
1713 #ifdef MONO_ARCH_SOFT_FLOAT
1732 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1734 ArchDynCallInfo *info;
1737 cinfo = get_call_info (NULL, sig, FALSE);
1739 if (!dyn_call_supported (cinfo, sig)) {
1744 info = g_new0 (ArchDynCallInfo, 1);
1745 // FIXME: Preprocess the info to speed up start_dyn_call ()
1747 info->cinfo = cinfo;
1749 return (MonoDynCallInfo*)info;
1753 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1755 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1757 g_free (ainfo->cinfo);
1762 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1764 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1765 DynCallArgs *p = (DynCallArgs*)buf;
1766 int arg_index, greg, i, j;
1767 MonoMethodSignature *sig = dinfo->sig;
1769 g_assert (buf_len >= sizeof (DynCallArgs));
1777 if (dinfo->cinfo->vtype_retaddr)
1778 p->regs [greg ++] = (mgreg_t)ret;
1781 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1783 for (i = 0; i < sig->param_count; i++) {
1784 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1785 gpointer *arg = args [arg_index ++];
1786 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1789 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1791 else if (ainfo->storage == RegTypeBase)
1792 slot = PARAM_REGS + (ainfo->offset / 4);
1794 g_assert_not_reached ();
1797 p->regs [slot] = (mgreg_t)*arg;
1802 case MONO_TYPE_STRING:
1803 case MONO_TYPE_CLASS:
1804 case MONO_TYPE_ARRAY:
1805 case MONO_TYPE_SZARRAY:
1806 case MONO_TYPE_OBJECT:
1810 p->regs [slot] = (mgreg_t)*arg;
1812 case MONO_TYPE_BOOLEAN:
1814 p->regs [slot] = *(guint8*)arg;
1817 p->regs [slot] = *(gint8*)arg;
1820 p->regs [slot] = *(gint16*)arg;
1823 case MONO_TYPE_CHAR:
1824 p->regs [slot] = *(guint16*)arg;
1827 p->regs [slot] = *(gint32*)arg;
1830 p->regs [slot] = *(guint32*)arg;
1834 p->regs [slot ++] = (mgreg_t)arg [0];
1835 p->regs [slot] = (mgreg_t)arg [1];
1838 p->regs [slot] = *(mgreg_t*)arg;
1841 p->regs [slot ++] = (mgreg_t)arg [0];
1842 p->regs [slot] = (mgreg_t)arg [1];
1844 case MONO_TYPE_GENERICINST:
1845 if (MONO_TYPE_IS_REFERENCE (t)) {
1846 p->regs [slot] = (mgreg_t)*arg;
1851 case MONO_TYPE_VALUETYPE:
1852 g_assert (ainfo->storage == RegTypeStructByVal);
1854 if (ainfo->size == 0)
1855 slot = PARAM_REGS + (ainfo->offset / 4);
1859 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1860 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1863 g_assert_not_reached ();
1869 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1871 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1872 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1873 guint8 *ret = ((DynCallArgs*)buf)->ret;
1874 mgreg_t res = ((DynCallArgs*)buf)->res;
1875 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1877 switch (mono_type_get_underlying_type (sig->ret)->type) {
1878 case MONO_TYPE_VOID:
1879 *(gpointer*)ret = NULL;
1881 case MONO_TYPE_STRING:
1882 case MONO_TYPE_CLASS:
1883 case MONO_TYPE_ARRAY:
1884 case MONO_TYPE_SZARRAY:
1885 case MONO_TYPE_OBJECT:
1889 *(gpointer*)ret = (gpointer)res;
1895 case MONO_TYPE_BOOLEAN:
1896 *(guint8*)ret = res;
1899 *(gint16*)ret = res;
1902 case MONO_TYPE_CHAR:
1903 *(guint16*)ret = res;
1906 *(gint32*)ret = res;
1909 *(guint32*)ret = res;
1913 /* This handles endianness as well */
1914 ((gint32*)ret) [0] = res;
1915 ((gint32*)ret) [1] = res2;
1917 case MONO_TYPE_GENERICINST:
1918 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1919 *(gpointer*)ret = (gpointer)res;
1924 case MONO_TYPE_VALUETYPE:
1925 g_assert (ainfo->cinfo->vtype_retaddr);
1928 #if defined(ARM_FPU_VFP)
1930 *(float*)ret = *(float*)&res;
1932 case MONO_TYPE_R8: {
1938 *(double*)ret = *(double*)®s;
1943 g_assert_not_reached ();
1948 * Allow tracing to work with this interface (with an optional argument)
1952 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1956 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1957 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1958 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1959 code = emit_call_reg (code, ARMREG_R2);
1972 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
1975 int save_mode = SAVE_NONE;
1977 MonoMethod *method = cfg->method;
1978 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
1979 int save_offset = cfg->param_area;
1983 offset = code - cfg->native_code;
1984 /* we need about 16 instructions */
1985 if (offset > (cfg->code_size - 16 * 4)) {
1986 cfg->code_size *= 2;
1987 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
1988 code = cfg->native_code + offset;
1991 case MONO_TYPE_VOID:
1992 /* special case string .ctor icall */
1993 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
1994 save_mode = SAVE_ONE;
1996 save_mode = SAVE_NONE;
2000 save_mode = SAVE_TWO;
2004 save_mode = SAVE_FP;
2006 case MONO_TYPE_VALUETYPE:
2007 save_mode = SAVE_STRUCT;
2010 save_mode = SAVE_ONE;
2014 switch (save_mode) {
2016 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2017 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2018 if (enable_arguments) {
2019 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2020 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2024 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2025 if (enable_arguments) {
2026 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2030 /* FIXME: what reg? */
2031 if (enable_arguments) {
2032 /* FIXME: what reg? */
2036 if (enable_arguments) {
2037 /* FIXME: get the actual address */
2038 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2046 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2047 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2048 code = emit_call_reg (code, ARMREG_IP);
2050 switch (save_mode) {
2052 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2053 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2056 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2070 * The immediate field for cond branches is big enough for all reasonable methods
2072 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2073 if (0 && ins->inst_true_bb->native_offset) { \
2074 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2076 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2077 ARM_B_COND (code, (condcode), 0); \
2080 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2082 /* emit an exception if condition is fail
2084 * We assign the extra code used to throw the implicit exceptions
2085 * to cfg->bb_exit as far as the big branch handling is concerned
2087 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2089 mono_add_patch_info (cfg, code - cfg->native_code, \
2090 MONO_PATCH_INFO_EXC, exc_name); \
2091 ARM_BL_COND (code, (condcode), 0); \
2094 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2097 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2102 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2104 MonoInst *ins, *n, *last_ins = NULL;
2106 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2107 switch (ins->opcode) {
2110 /* Already done by an arch-independent pass */
2112 case OP_LOAD_MEMBASE:
2113 case OP_LOADI4_MEMBASE:
2115 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2116 * OP_LOAD_MEMBASE offset(basereg), reg
2118 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2119 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2120 ins->inst_basereg == last_ins->inst_destbasereg &&
2121 ins->inst_offset == last_ins->inst_offset) {
2122 if (ins->dreg == last_ins->sreg1) {
2123 MONO_DELETE_INS (bb, ins);
2126 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2127 ins->opcode = OP_MOVE;
2128 ins->sreg1 = last_ins->sreg1;
2132 * Note: reg1 must be different from the basereg in the second load
2133 * OP_LOAD_MEMBASE offset(basereg), reg1
2134 * OP_LOAD_MEMBASE offset(basereg), reg2
2136 * OP_LOAD_MEMBASE offset(basereg), reg1
2137 * OP_MOVE reg1, reg2
2139 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2140 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2141 ins->inst_basereg != last_ins->dreg &&
2142 ins->inst_basereg == last_ins->inst_basereg &&
2143 ins->inst_offset == last_ins->inst_offset) {
2145 if (ins->dreg == last_ins->dreg) {
2146 MONO_DELETE_INS (bb, ins);
2149 ins->opcode = OP_MOVE;
2150 ins->sreg1 = last_ins->dreg;
2153 //g_assert_not_reached ();
2157 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2158 * OP_LOAD_MEMBASE offset(basereg), reg
2160 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2161 * OP_ICONST reg, imm
2163 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2164 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2165 ins->inst_basereg == last_ins->inst_destbasereg &&
2166 ins->inst_offset == last_ins->inst_offset) {
2167 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2168 ins->opcode = OP_ICONST;
2169 ins->inst_c0 = last_ins->inst_imm;
2170 g_assert_not_reached (); // check this rule
2174 case OP_LOADU1_MEMBASE:
2175 case OP_LOADI1_MEMBASE:
2176 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2177 ins->inst_basereg == last_ins->inst_destbasereg &&
2178 ins->inst_offset == last_ins->inst_offset) {
2179 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2180 ins->sreg1 = last_ins->sreg1;
2183 case OP_LOADU2_MEMBASE:
2184 case OP_LOADI2_MEMBASE:
2185 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2186 ins->inst_basereg == last_ins->inst_destbasereg &&
2187 ins->inst_offset == last_ins->inst_offset) {
2188 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2189 ins->sreg1 = last_ins->sreg1;
2193 ins->opcode = OP_MOVE;
2197 if (ins->dreg == ins->sreg1) {
2198 MONO_DELETE_INS (bb, ins);
2202 * OP_MOVE sreg, dreg
2203 * OP_MOVE dreg, sreg
2205 if (last_ins && last_ins->opcode == OP_MOVE &&
2206 ins->sreg1 == last_ins->dreg &&
2207 ins->dreg == last_ins->sreg1) {
2208 MONO_DELETE_INS (bb, ins);
2216 bb->last_ins = last_ins;
2220 * the branch_cc_table should maintain the order of these
2234 branch_cc_table [] = {
2248 #define NEW_INS(cfg,dest,op) do { \
2249 MONO_INST_NEW ((cfg), (dest), (op)); \
2250 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2254 map_to_reg_reg_op (int op)
2263 case OP_COMPARE_IMM:
2265 case OP_ICOMPARE_IMM:
2279 case OP_LOAD_MEMBASE:
2280 return OP_LOAD_MEMINDEX;
2281 case OP_LOADI4_MEMBASE:
2282 return OP_LOADI4_MEMINDEX;
2283 case OP_LOADU4_MEMBASE:
2284 return OP_LOADU4_MEMINDEX;
2285 case OP_LOADU1_MEMBASE:
2286 return OP_LOADU1_MEMINDEX;
2287 case OP_LOADI2_MEMBASE:
2288 return OP_LOADI2_MEMINDEX;
2289 case OP_LOADU2_MEMBASE:
2290 return OP_LOADU2_MEMINDEX;
2291 case OP_LOADI1_MEMBASE:
2292 return OP_LOADI1_MEMINDEX;
2293 case OP_STOREI1_MEMBASE_REG:
2294 return OP_STOREI1_MEMINDEX;
2295 case OP_STOREI2_MEMBASE_REG:
2296 return OP_STOREI2_MEMINDEX;
2297 case OP_STOREI4_MEMBASE_REG:
2298 return OP_STOREI4_MEMINDEX;
2299 case OP_STORE_MEMBASE_REG:
2300 return OP_STORE_MEMINDEX;
2301 case OP_STORER4_MEMBASE_REG:
2302 return OP_STORER4_MEMINDEX;
2303 case OP_STORER8_MEMBASE_REG:
2304 return OP_STORER8_MEMINDEX;
2305 case OP_STORE_MEMBASE_IMM:
2306 return OP_STORE_MEMBASE_REG;
2307 case OP_STOREI1_MEMBASE_IMM:
2308 return OP_STOREI1_MEMBASE_REG;
2309 case OP_STOREI2_MEMBASE_IMM:
2310 return OP_STOREI2_MEMBASE_REG;
2311 case OP_STOREI4_MEMBASE_IMM:
2312 return OP_STOREI4_MEMBASE_REG;
2314 g_assert_not_reached ();
2318 * Remove from the instruction list the instructions that can't be
2319 * represented with very simple instructions with no register
2323 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2325 MonoInst *ins, *temp, *last_ins = NULL;
2326 int rot_amount, imm8, low_imm;
2328 MONO_BB_FOR_EACH_INS (bb, ins) {
2330 switch (ins->opcode) {
2334 case OP_COMPARE_IMM:
2335 case OP_ICOMPARE_IMM:
2349 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2350 NEW_INS (cfg, temp, OP_ICONST);
2351 temp->inst_c0 = ins->inst_imm;
2352 temp->dreg = mono_alloc_ireg (cfg);
2353 ins->sreg2 = temp->dreg;
2354 ins->opcode = mono_op_imm_to_op (ins->opcode);
2356 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2362 if (ins->inst_imm == 1) {
2363 ins->opcode = OP_MOVE;
2366 if (ins->inst_imm == 0) {
2367 ins->opcode = OP_ICONST;
2371 imm8 = mono_is_power_of_two (ins->inst_imm);
2373 ins->opcode = OP_SHL_IMM;
2374 ins->inst_imm = imm8;
2377 NEW_INS (cfg, temp, OP_ICONST);
2378 temp->inst_c0 = ins->inst_imm;
2379 temp->dreg = mono_alloc_ireg (cfg);
2380 ins->sreg2 = temp->dreg;
2381 ins->opcode = OP_IMUL;
2387 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2388 /* ARM sets the C flag to 1 if there was _no_ overflow */
2389 ins->next->opcode = OP_COND_EXC_NC;
2391 case OP_LOCALLOC_IMM:
2392 NEW_INS (cfg, temp, OP_ICONST);
2393 temp->inst_c0 = ins->inst_imm;
2394 temp->dreg = mono_alloc_ireg (cfg);
2395 ins->sreg1 = temp->dreg;
2396 ins->opcode = OP_LOCALLOC;
2398 case OP_LOAD_MEMBASE:
2399 case OP_LOADI4_MEMBASE:
2400 case OP_LOADU4_MEMBASE:
2401 case OP_LOADU1_MEMBASE:
2402 /* we can do two things: load the immed in a register
2403 * and use an indexed load, or see if the immed can be
2404 * represented as an ad_imm + a load with a smaller offset
2405 * that fits. We just do the first for now, optimize later.
2407 if (arm_is_imm12 (ins->inst_offset))
2409 NEW_INS (cfg, temp, OP_ICONST);
2410 temp->inst_c0 = ins->inst_offset;
2411 temp->dreg = mono_alloc_ireg (cfg);
2412 ins->sreg2 = temp->dreg;
2413 ins->opcode = map_to_reg_reg_op (ins->opcode);
2415 case OP_LOADI2_MEMBASE:
2416 case OP_LOADU2_MEMBASE:
2417 case OP_LOADI1_MEMBASE:
2418 if (arm_is_imm8 (ins->inst_offset))
2420 NEW_INS (cfg, temp, OP_ICONST);
2421 temp->inst_c0 = ins->inst_offset;
2422 temp->dreg = mono_alloc_ireg (cfg);
2423 ins->sreg2 = temp->dreg;
2424 ins->opcode = map_to_reg_reg_op (ins->opcode);
2426 case OP_LOADR4_MEMBASE:
2427 case OP_LOADR8_MEMBASE:
2428 if (arm_is_fpimm8 (ins->inst_offset))
2430 low_imm = ins->inst_offset & 0x1ff;
2431 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2432 NEW_INS (cfg, temp, OP_ADD_IMM);
2433 temp->inst_imm = ins->inst_offset & ~0x1ff;
2434 temp->sreg1 = ins->inst_basereg;
2435 temp->dreg = mono_alloc_ireg (cfg);
2436 ins->inst_basereg = temp->dreg;
2437 ins->inst_offset = low_imm;
2440 /* VFP/FPA doesn't have indexed load instructions */
2441 g_assert_not_reached ();
2443 case OP_STORE_MEMBASE_REG:
2444 case OP_STOREI4_MEMBASE_REG:
2445 case OP_STOREI1_MEMBASE_REG:
2446 if (arm_is_imm12 (ins->inst_offset))
2448 NEW_INS (cfg, temp, OP_ICONST);
2449 temp->inst_c0 = ins->inst_offset;
2450 temp->dreg = mono_alloc_ireg (cfg);
2451 ins->sreg2 = temp->dreg;
2452 ins->opcode = map_to_reg_reg_op (ins->opcode);
2454 case OP_STOREI2_MEMBASE_REG:
2455 if (arm_is_imm8 (ins->inst_offset))
2457 NEW_INS (cfg, temp, OP_ICONST);
2458 temp->inst_c0 = ins->inst_offset;
2459 temp->dreg = mono_alloc_ireg (cfg);
2460 ins->sreg2 = temp->dreg;
2461 ins->opcode = map_to_reg_reg_op (ins->opcode);
2463 case OP_STORER4_MEMBASE_REG:
2464 case OP_STORER8_MEMBASE_REG:
2465 if (arm_is_fpimm8 (ins->inst_offset))
2467 low_imm = ins->inst_offset & 0x1ff;
2468 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2469 NEW_INS (cfg, temp, OP_ADD_IMM);
2470 temp->inst_imm = ins->inst_offset & ~0x1ff;
2471 temp->sreg1 = ins->inst_destbasereg;
2472 temp->dreg = mono_alloc_ireg (cfg);
2473 ins->inst_destbasereg = temp->dreg;
2474 ins->inst_offset = low_imm;
2477 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2478 /* VFP/FPA doesn't have indexed store instructions */
2479 g_assert_not_reached ();
2481 case OP_STORE_MEMBASE_IMM:
2482 case OP_STOREI1_MEMBASE_IMM:
2483 case OP_STOREI2_MEMBASE_IMM:
2484 case OP_STOREI4_MEMBASE_IMM:
2485 NEW_INS (cfg, temp, OP_ICONST);
2486 temp->inst_c0 = ins->inst_imm;
2487 temp->dreg = mono_alloc_ireg (cfg);
2488 ins->sreg1 = temp->dreg;
2489 ins->opcode = map_to_reg_reg_op (ins->opcode);
2491 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2493 gboolean swap = FALSE;
2497 /* Optimized away */
2502 /* Some fp compares require swapped operands */
2503 switch (ins->next->opcode) {
2505 ins->next->opcode = OP_FBLT;
2509 ins->next->opcode = OP_FBLT_UN;
2513 ins->next->opcode = OP_FBGE;
2517 ins->next->opcode = OP_FBGE_UN;
2525 ins->sreg1 = ins->sreg2;
2534 bb->last_ins = last_ins;
2535 bb->max_vreg = cfg->next_vreg;
2539 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2543 if (long_ins->opcode == OP_LNEG) {
2545 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2546 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2552 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2554 /* sreg is a float, dreg is an integer reg */
2556 ARM_FIXZ (code, dreg, sreg);
2557 #elif defined(ARM_FPU_VFP)
2559 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2561 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2562 ARM_FMRS (code, dreg, ARM_VFP_F0);
2566 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2567 else if (size == 2) {
2568 ARM_SHL_IMM (code, dreg, dreg, 16);
2569 ARM_SHR_IMM (code, dreg, dreg, 16);
2573 ARM_SHL_IMM (code, dreg, dreg, 24);
2574 ARM_SAR_IMM (code, dreg, dreg, 24);
2575 } else if (size == 2) {
2576 ARM_SHL_IMM (code, dreg, dreg, 16);
2577 ARM_SAR_IMM (code, dreg, dreg, 16);
2585 const guchar *target;
2590 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2593 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2594 PatchData *pdata = (PatchData*)user_data;
2595 guchar *code = data;
2596 guint32 *thunks = data;
2597 guint32 *endthunks = (guint32*)(code + bsize);
2599 int difflow, diffhigh;
2601 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2602 difflow = (char*)pdata->code - (char*)thunks;
2603 diffhigh = (char*)pdata->code - (char*)endthunks;
2604 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2608 * The thunk is composed of 3 words:
2609 * load constant from thunks [2] into ARM_IP
2612 * Note that the LR register is already setup
2614 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2615 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2616 while (thunks < endthunks) {
2617 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2618 if (thunks [2] == (guint32)pdata->target) {
2619 arm_patch (pdata->code, (guchar*)thunks);
2620 mono_arch_flush_icache (pdata->code, 4);
2623 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2624 /* found a free slot instead: emit thunk */
2625 /* ARMREG_IP is fine to use since this can't be an IMT call
2628 code = (guchar*)thunks;
2629 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2630 if (thumb_supported)
2631 ARM_BX (code, ARMREG_IP);
2633 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2634 thunks [2] = (guint32)pdata->target;
2635 mono_arch_flush_icache ((guchar*)thunks, 12);
2637 arm_patch (pdata->code, (guchar*)thunks);
2638 mono_arch_flush_icache (pdata->code, 4);
2642 /* skip 12 bytes, the size of the thunk */
2646 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2652 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2657 domain = mono_domain_get ();
2660 pdata.target = target;
2661 pdata.absolute = absolute;
2664 mono_domain_lock (domain);
2665 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2668 /* this uses the first available slot */
2670 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2672 mono_domain_unlock (domain);
2674 if (pdata.found != 1)
2675 g_print ("thunk failed for %p from %p\n", target, code);
2676 g_assert (pdata.found == 1);
2680 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2682 guint32 *code32 = (void*)code;
2683 guint32 ins = *code32;
2684 guint32 prim = (ins >> 25) & 7;
2685 guint32 tval = GPOINTER_TO_UINT (target);
2687 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2688 if (prim == 5) { /* 101b */
2689 /* the diff starts 8 bytes from the branch opcode */
2690 gint diff = target - code - 8;
2692 gint tmask = 0xffffffff;
2693 if (tval & 1) { /* entering thumb mode */
2694 diff = target - 1 - code - 8;
2695 g_assert (thumb_supported);
2696 tbits = 0xf << 28; /* bl->blx bit pattern */
2697 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2698 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2702 tmask = ~(1 << 24); /* clear the link bit */
2703 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2708 if (diff <= 33554431) {
2710 ins = (ins & 0xff000000) | diff;
2712 *code32 = ins | tbits;
2716 /* diff between 0 and -33554432 */
2717 if (diff >= -33554432) {
2719 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2721 *code32 = ins | tbits;
2726 handle_thunk (domain, TRUE, code, target);
2731 * The alternative call sequences looks like this:
2733 * ldr ip, [pc] // loads the address constant
2734 * b 1f // jumps around the constant
2735 * address constant embedded in the code
2740 * There are two cases for patching:
2741 * a) at the end of method emission: in this case code points to the start
2742 * of the call sequence
2743 * b) during runtime patching of the call site: in this case code points
2744 * to the mov pc, ip instruction
2746 * We have to handle also the thunk jump code sequence:
2750 * address constant // execution never reaches here
2752 if ((ins & 0x0ffffff0) == 0x12fff10) {
2753 /* Branch and exchange: the address is constructed in a reg
2754 * We can patch BX when the code sequence is the following:
2755 * ldr ip, [pc, #0] ; 0x8
2762 guint8 *emit = (guint8*)ccode;
2763 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2765 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2766 ARM_BX (emit, ARMREG_IP);
2768 /*patching from magic trampoline*/
2769 if (ins == ccode [3]) {
2770 g_assert (code32 [-4] == ccode [0]);
2771 g_assert (code32 [-3] == ccode [1]);
2772 g_assert (code32 [-1] == ccode [2]);
2773 code32 [-2] = (guint32)target;
2776 /*patching from JIT*/
2777 if (ins == ccode [0]) {
2778 g_assert (code32 [1] == ccode [1]);
2779 g_assert (code32 [3] == ccode [2]);
2780 g_assert (code32 [4] == ccode [3]);
2781 code32 [2] = (guint32)target;
2784 g_assert_not_reached ();
2785 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2793 guint8 *emit = (guint8*)ccode;
2794 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2796 ARM_BLX_REG (emit, ARMREG_IP);
2798 g_assert (code32 [-3] == ccode [0]);
2799 g_assert (code32 [-2] == ccode [1]);
2800 g_assert (code32 [0] == ccode [2]);
2802 code32 [-1] = (guint32)target;
2805 guint32 *tmp = ccode;
2806 guint8 *emit = (guint8*)tmp;
2807 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2808 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2809 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2810 ARM_BX (emit, ARMREG_IP);
2811 if (ins == ccode [2]) {
2812 g_assert_not_reached (); // should be -2 ...
2813 code32 [-1] = (guint32)target;
2816 if (ins == ccode [0]) {
2817 /* handles both thunk jump code and the far call sequence */
2818 code32 [2] = (guint32)target;
2821 g_assert_not_reached ();
2823 // g_print ("patched with 0x%08x\n", ins);
2827 arm_patch (guchar *code, const guchar *target)
2829 arm_patch_general (NULL, code, target);
2833 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2834 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2835 * to be used with the emit macros.
2836 * Return -1 otherwise.
2839 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2842 for (i = 0; i < 31; i+= 2) {
2843 res = (val << (32 - i)) | (val >> i);
2846 *rot_amount = i? 32 - i: 0;
2853 * Emits in code a sequence of instructions that load the value 'val'
2854 * into the dreg register. Uses at most 4 instructions.
2857 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2859 int imm8, rot_amount;
2861 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2862 /* skip the constant pool */
2868 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2869 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2870 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2871 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2874 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2876 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2880 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2882 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2884 if (val & 0xFF0000) {
2885 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2887 if (val & 0xFF000000) {
2888 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2890 } else if (val & 0xFF00) {
2891 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2892 if (val & 0xFF0000) {
2893 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2895 if (val & 0xFF000000) {
2896 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2898 } else if (val & 0xFF0000) {
2899 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2900 if (val & 0xFF000000) {
2901 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2904 //g_assert_not_reached ();
2910 mono_arm_thumb_supported (void)
2912 return thumb_supported;
2916 * emit_load_volatile_arguments:
2918 * Load volatile arguments from the stack to the original input registers.
2919 * Required before a tail call.
2922 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2924 MonoMethod *method = cfg->method;
2925 MonoMethodSignature *sig;
2930 /* FIXME: Generate intermediate code instead */
2932 sig = mono_method_signature (method);
2934 /* This is the opposite of the code in emit_prolog */
2938 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2940 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2941 ArgInfo *ainfo = &cinfo->ret;
2942 inst = cfg->vret_addr;
2943 g_assert (arm_is_imm12 (inst->inst_offset));
2944 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2946 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2947 ArgInfo *ainfo = cinfo->args + i;
2948 inst = cfg->args [pos];
2950 if (cfg->verbose_level > 2)
2951 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
2952 if (inst->opcode == OP_REGVAR) {
2953 if (ainfo->storage == RegTypeGeneral)
2954 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2955 else if (ainfo->storage == RegTypeFP) {
2956 g_assert_not_reached ();
2957 } else if (ainfo->storage == RegTypeBase) {
2961 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
2962 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
2964 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2965 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
2969 g_assert_not_reached ();
2971 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
2972 switch (ainfo->size) {
2979 g_assert (arm_is_imm12 (inst->inst_offset));
2980 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2981 g_assert (arm_is_imm12 (inst->inst_offset + 4));
2982 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
2985 if (arm_is_imm12 (inst->inst_offset)) {
2986 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2988 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
2989 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
2993 } else if (ainfo->storage == RegTypeBaseGen) {
2996 } else if (ainfo->storage == RegTypeBase) {
2998 } else if (ainfo->storage == RegTypeFP) {
2999 g_assert_not_reached ();
3000 } else if (ainfo->storage == RegTypeStructByVal) {
3001 int doffset = inst->inst_offset;
3005 if (mono_class_from_mono_type (inst->inst_vtype))
3006 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3007 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3008 if (arm_is_imm12 (doffset)) {
3009 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3011 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3012 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3014 soffset += sizeof (gpointer);
3015 doffset += sizeof (gpointer);
3020 } else if (ainfo->storage == RegTypeStructByAddr) {
3037 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3042 guint8 *code = cfg->native_code + cfg->code_len;
3043 MonoInst *last_ins = NULL;
3044 guint last_offset = 0;
3046 int imm8, rot_amount;
3048 /* we don't align basic blocks of loops on arm */
3050 if (cfg->verbose_level > 2)
3051 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3053 cpos = bb->max_offset;
3055 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3056 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3057 //g_assert (!mono_compile_aot);
3060 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3061 /* this is not thread save, but good enough */
3062 /* fixme: howto handle overflows? */
3063 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3066 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3067 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3068 (gpointer)"mono_break");
3069 code = emit_call_seq (cfg, code);
3072 MONO_BB_FOR_EACH_INS (bb, ins) {
3073 offset = code - cfg->native_code;
3075 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3077 if (offset > (cfg->code_size - max_len - 16)) {
3078 cfg->code_size *= 2;
3079 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3080 code = cfg->native_code + offset;
3082 // if (ins->cil_code)
3083 // g_print ("cil code\n");
3084 mono_debug_record_line_number (cfg, ins, offset);
3086 switch (ins->opcode) {
3087 case OP_MEMORY_BARRIER:
3090 #ifdef HAVE_AEABI_READ_TP
3091 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3092 (gpointer)"__aeabi_read_tp");
3093 code = emit_call_seq (cfg, code);
3095 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3097 g_assert_not_reached ();
3101 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3102 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3105 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3106 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3108 case OP_STOREI1_MEMBASE_IMM:
3109 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3110 g_assert (arm_is_imm12 (ins->inst_offset));
3111 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3113 case OP_STOREI2_MEMBASE_IMM:
3114 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3115 g_assert (arm_is_imm8 (ins->inst_offset));
3116 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3118 case OP_STORE_MEMBASE_IMM:
3119 case OP_STOREI4_MEMBASE_IMM:
3120 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3121 g_assert (arm_is_imm12 (ins->inst_offset));
3122 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3124 case OP_STOREI1_MEMBASE_REG:
3125 g_assert (arm_is_imm12 (ins->inst_offset));
3126 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3128 case OP_STOREI2_MEMBASE_REG:
3129 g_assert (arm_is_imm8 (ins->inst_offset));
3130 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3132 case OP_STORE_MEMBASE_REG:
3133 case OP_STOREI4_MEMBASE_REG:
3134 /* this case is special, since it happens for spill code after lowering has been called */
3135 if (arm_is_imm12 (ins->inst_offset)) {
3136 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3138 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3139 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3142 case OP_STOREI1_MEMINDEX:
3143 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3145 case OP_STOREI2_MEMINDEX:
3146 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3148 case OP_STORE_MEMINDEX:
3149 case OP_STOREI4_MEMINDEX:
3150 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3153 g_assert_not_reached ();
3155 case OP_LOAD_MEMINDEX:
3156 case OP_LOADI4_MEMINDEX:
3157 case OP_LOADU4_MEMINDEX:
3158 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3160 case OP_LOADI1_MEMINDEX:
3161 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3163 case OP_LOADU1_MEMINDEX:
3164 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3166 case OP_LOADI2_MEMINDEX:
3167 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3169 case OP_LOADU2_MEMINDEX:
3170 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3172 case OP_LOAD_MEMBASE:
3173 case OP_LOADI4_MEMBASE:
3174 case OP_LOADU4_MEMBASE:
3175 /* this case is special, since it happens for spill code after lowering has been called */
3176 if (arm_is_imm12 (ins->inst_offset)) {
3177 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3179 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3180 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3183 case OP_LOADI1_MEMBASE:
3184 g_assert (arm_is_imm8 (ins->inst_offset));
3185 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3187 case OP_LOADU1_MEMBASE:
3188 g_assert (arm_is_imm12 (ins->inst_offset));
3189 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3191 case OP_LOADU2_MEMBASE:
3192 g_assert (arm_is_imm8 (ins->inst_offset));
3193 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3195 case OP_LOADI2_MEMBASE:
3196 g_assert (arm_is_imm8 (ins->inst_offset));
3197 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3199 case OP_ICONV_TO_I1:
3200 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3201 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3203 case OP_ICONV_TO_I2:
3204 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3205 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3207 case OP_ICONV_TO_U1:
3208 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3210 case OP_ICONV_TO_U2:
3211 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3212 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3216 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3218 case OP_COMPARE_IMM:
3219 case OP_ICOMPARE_IMM:
3220 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3221 g_assert (imm8 >= 0);
3222 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3226 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3227 * So instead of emitting a trap, we emit a call a C function and place a
3230 //*(int*)code = 0xef9f0001;
3233 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3234 (gpointer)"mono_break");
3235 code = emit_call_seq (cfg, code);
3237 case OP_RELAXED_NOP:
3242 case OP_DUMMY_STORE:
3243 case OP_NOT_REACHED:
3246 case OP_SEQ_POINT: {
3248 MonoInst *info_var = cfg->arch.seq_point_info_var;
3249 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3251 int dreg = ARMREG_LR;
3254 * For AOT, we use one got slot per method, which will point to a
3255 * SeqPointInfo structure, containing all the information required
3256 * by the code below.
3258 if (cfg->compile_aot) {
3259 g_assert (info_var);
3260 g_assert (info_var->opcode == OP_REGOFFSET);
3261 g_assert (arm_is_imm12 (info_var->inst_offset));
3265 * Read from the single stepping trigger page. This will cause a
3266 * SIGSEGV when single stepping is enabled.
3267 * We do this _before_ the breakpoint, so single stepping after
3268 * a breakpoint is hit will step to the next IL offset.
3270 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3272 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3273 if (cfg->compile_aot) {
3274 /* Load the trigger page addr from the variable initialized in the prolog */
3275 var = ss_trigger_page_var;
3277 g_assert (var->opcode == OP_REGOFFSET);
3278 g_assert (arm_is_imm12 (var->inst_offset));
3279 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3281 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3283 *(int*)code = (int)ss_trigger_page;
3286 ARM_LDR_IMM (code, dreg, dreg, 0);
3289 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3291 if (cfg->compile_aot) {
3292 guint32 offset = code - cfg->native_code;
3295 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3296 /* Add the offset */
3297 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3298 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3300 * Have to emit nops to keep the difference between the offset
3301 * stored in seq_points and breakpoint instruction constant,
3302 * mono_arch_get_ip_for_breakpoint () depends on this.
3305 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3309 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3312 g_assert (!(val & 0xFF000000));
3313 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3314 ARM_LDR_IMM (code, dreg, dreg, 0);
3316 /* What is faster, a branch or a load ? */
3317 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3318 /* The breakpoint instruction */
3319 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3322 * A placeholder for a possible breakpoint inserted by
3323 * mono_arch_set_breakpoint ().
3325 for (i = 0; i < 4; ++i)
3332 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3335 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3339 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3342 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3343 g_assert (imm8 >= 0);
3344 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3348 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3349 g_assert (imm8 >= 0);
3350 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3354 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3355 g_assert (imm8 >= 0);
3356 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3359 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3360 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3362 case OP_IADD_OVF_UN:
3363 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3364 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3367 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3368 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3370 case OP_ISUB_OVF_UN:
3371 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3372 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3374 case OP_ADD_OVF_CARRY:
3375 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3376 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3378 case OP_ADD_OVF_UN_CARRY:
3379 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3380 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3382 case OP_SUB_OVF_CARRY:
3383 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3384 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3386 case OP_SUB_OVF_UN_CARRY:
3387 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3388 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3392 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3395 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3396 g_assert (imm8 >= 0);
3397 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3400 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3404 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3408 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3409 g_assert (imm8 >= 0);
3410 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3414 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3415 g_assert (imm8 >= 0);
3416 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3418 case OP_ARM_RSBS_IMM:
3419 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3420 g_assert (imm8 >= 0);
3421 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3423 case OP_ARM_RSC_IMM:
3424 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3425 g_assert (imm8 >= 0);
3426 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3429 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3433 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3434 g_assert (imm8 >= 0);
3435 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3443 /* crappy ARM arch doesn't have a DIV instruction */
3444 g_assert_not_reached ();
3446 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3450 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3451 g_assert (imm8 >= 0);
3452 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3455 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3459 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3460 g_assert (imm8 >= 0);
3461 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3469 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3470 else if (ins->dreg != ins->sreg1)
3471 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3474 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3479 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3480 else if (ins->dreg != ins->sreg1)
3481 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3484 case OP_ISHR_UN_IMM:
3486 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3487 else if (ins->dreg != ins->sreg1)
3488 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3491 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3494 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3497 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3500 if (ins->dreg == ins->sreg2)
3501 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3503 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3506 g_assert_not_reached ();
3509 /* FIXME: handle ovf/ sreg2 != dreg */
3510 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3511 /* FIXME: MUL doesn't set the C/O flags on ARM */
3513 case OP_IMUL_OVF_UN:
3514 /* FIXME: handle ovf/ sreg2 != dreg */
3515 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3516 /* FIXME: MUL doesn't set the C/O flags on ARM */
3519 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3522 /* Load the GOT offset */
3523 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3524 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3526 *(gpointer*)code = NULL;
3528 /* Load the value from the GOT */
3529 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3531 case OP_ICONV_TO_I4:
3532 case OP_ICONV_TO_U4:
3534 if (ins->dreg != ins->sreg1)
3535 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3538 int saved = ins->sreg2;
3539 if (ins->sreg2 == ARM_LSW_REG) {
3540 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3543 if (ins->sreg1 != ARM_LSW_REG)
3544 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3545 if (saved != ARM_MSW_REG)
3546 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3551 ARM_MVFD (code, ins->dreg, ins->sreg1);
3552 #elif defined(ARM_FPU_VFP)
3553 ARM_CPYD (code, ins->dreg, ins->sreg1);
3556 case OP_FCONV_TO_R4:
3558 ARM_MVFS (code, ins->dreg, ins->sreg1);
3559 #elif defined(ARM_FPU_VFP)
3560 ARM_CVTD (code, ins->dreg, ins->sreg1);
3561 ARM_CVTS (code, ins->dreg, ins->dreg);
3566 * Keep in sync with mono_arch_emit_epilog
3568 g_assert (!cfg->method->save_lmf);
3570 code = emit_load_volatile_arguments (cfg, code);
3572 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3573 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3574 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3575 if (cfg->compile_aot) {
3576 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3578 *(gpointer*)code = NULL;
3580 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3586 /* ensure ins->sreg1 is not NULL */
3587 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3590 g_assert (cfg->sig_cookie < 128);
3591 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3592 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3601 call = (MonoCallInst*)ins;
3602 if (ins->flags & MONO_INST_HAS_METHOD)
3603 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3605 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3606 code = emit_call_seq (cfg, code);
3607 code = emit_move_return_value (cfg, ins, code);
3613 case OP_VOIDCALL_REG:
3615 code = emit_call_reg (code, ins->sreg1);
3616 code = emit_move_return_value (cfg, ins, code);
3618 case OP_FCALL_MEMBASE:
3619 case OP_LCALL_MEMBASE:
3620 case OP_VCALL_MEMBASE:
3621 case OP_VCALL2_MEMBASE:
3622 case OP_VOIDCALL_MEMBASE:
3623 case OP_CALL_MEMBASE:
3624 g_assert (arm_is_imm12 (ins->inst_offset));
3625 g_assert (ins->sreg1 != ARMREG_LR);
3626 call = (MonoCallInst*)ins;
3627 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3628 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3629 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3631 * We can't embed the method in the code stream in PIC code, or
3633 * Instead, we put it in V5 in code emitted by
3634 * mono_arch_emit_imt_argument (), and embed NULL here to
3635 * signal the IMT thunk that the value is in V5.
3637 if (call->dynamic_imt_arg)
3638 *((gpointer*)code) = NULL;
3640 *((gpointer*)code) = (gpointer)call->method;
3643 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3644 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3646 code = emit_move_return_value (cfg, ins, code);
3649 /* keep alignment */
3650 int alloca_waste = cfg->param_area;
3653 /* round the size to 8 bytes */
3654 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3655 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3657 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3658 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3659 /* memzero the area: dreg holds the size, sp is the pointer */
3660 if (ins->flags & MONO_INST_INIT) {
3661 guint8 *start_loop, *branch_to_cond;
3662 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3663 branch_to_cond = code;
3666 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3667 arm_patch (branch_to_cond, code);
3668 /* decrement by 4 and set flags */
3669 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3670 ARM_B_COND (code, ARMCOND_GE, 0);
3671 arm_patch (code - 4, start_loop);
3673 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3678 MonoInst *var = cfg->dyn_call_var;
3680 g_assert (var->opcode == OP_REGOFFSET);
3681 g_assert (arm_is_imm12 (var->inst_offset));
3683 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3684 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3686 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3688 /* Save args buffer */
3689 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3691 /* Set stack slots using R0 as scratch reg */
3692 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3693 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3694 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3695 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3698 /* Set argument registers */
3699 for (i = 0; i < PARAM_REGS; ++i)
3700 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3703 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3704 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3707 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3708 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3709 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3713 if (ins->sreg1 != ARMREG_R0)
3714 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3715 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3716 (gpointer)"mono_arch_throw_exception");
3717 code = emit_call_seq (cfg, code);
3721 if (ins->sreg1 != ARMREG_R0)
3722 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3723 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3724 (gpointer)"mono_arch_rethrow_exception");
3725 code = emit_call_seq (cfg, code);
3728 case OP_START_HANDLER: {
3729 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3731 if (arm_is_imm12 (spvar->inst_offset)) {
3732 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3734 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3735 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3739 case OP_ENDFILTER: {
3740 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3742 if (ins->sreg1 != ARMREG_R0)
3743 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3744 if (arm_is_imm12 (spvar->inst_offset)) {
3745 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3747 g_assert (ARMREG_IP != spvar->inst_basereg);
3748 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3749 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3751 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3754 case OP_ENDFINALLY: {
3755 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3757 if (arm_is_imm12 (spvar->inst_offset)) {
3758 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3760 g_assert (ARMREG_IP != spvar->inst_basereg);
3761 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3762 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3764 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3767 case OP_CALL_HANDLER:
3768 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3772 ins->inst_c0 = code - cfg->native_code;
3775 /*if (ins->inst_target_bb->native_offset) {
3777 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3779 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3784 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3788 * In the normal case we have:
3789 * ldr pc, [pc, ins->sreg1 << 2]
3792 * ldr lr, [pc, ins->sreg1 << 2]
3794 * After follows the data.
3795 * FIXME: add aot support.
3797 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3798 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3799 if (offset > (cfg->code_size - max_len - 16)) {
3800 cfg->code_size += max_len;
3801 cfg->code_size *= 2;
3802 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3803 code = cfg->native_code + offset;
3805 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3807 code += 4 * GPOINTER_TO_INT (ins->klass);
3811 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3812 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3816 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3817 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3821 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3822 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3826 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3827 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3831 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3832 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3834 case OP_COND_EXC_EQ:
3835 case OP_COND_EXC_NE_UN:
3836 case OP_COND_EXC_LT:
3837 case OP_COND_EXC_LT_UN:
3838 case OP_COND_EXC_GT:
3839 case OP_COND_EXC_GT_UN:
3840 case OP_COND_EXC_GE:
3841 case OP_COND_EXC_GE_UN:
3842 case OP_COND_EXC_LE:
3843 case OP_COND_EXC_LE_UN:
3844 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3846 case OP_COND_EXC_IEQ:
3847 case OP_COND_EXC_INE_UN:
3848 case OP_COND_EXC_ILT:
3849 case OP_COND_EXC_ILT_UN:
3850 case OP_COND_EXC_IGT:
3851 case OP_COND_EXC_IGT_UN:
3852 case OP_COND_EXC_IGE:
3853 case OP_COND_EXC_IGE_UN:
3854 case OP_COND_EXC_ILE:
3855 case OP_COND_EXC_ILE_UN:
3856 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3859 case OP_COND_EXC_IC:
3860 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3862 case OP_COND_EXC_OV:
3863 case OP_COND_EXC_IOV:
3864 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3866 case OP_COND_EXC_NC:
3867 case OP_COND_EXC_INC:
3868 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3870 case OP_COND_EXC_NO:
3871 case OP_COND_EXC_INO:
3872 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3884 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3887 /* floating point opcodes */
3890 if (cfg->compile_aot) {
3891 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3893 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3895 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3898 /* FIXME: we can optimize the imm load by dealing with part of
3899 * the displacement in LDFD (aligning to 512).
3901 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3902 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3906 if (cfg->compile_aot) {
3907 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3909 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3912 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3913 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3916 case OP_STORER8_MEMBASE_REG:
3917 /* This is generated by the local regalloc pass which runs after the lowering pass */
3918 if (!arm_is_fpimm8 (ins->inst_offset)) {
3919 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3920 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3921 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3923 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3926 case OP_LOADR8_MEMBASE:
3927 /* This is generated by the local regalloc pass which runs after the lowering pass */
3928 if (!arm_is_fpimm8 (ins->inst_offset)) {
3929 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3930 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3931 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3933 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3936 case OP_STORER4_MEMBASE_REG:
3937 g_assert (arm_is_fpimm8 (ins->inst_offset));
3938 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3940 case OP_LOADR4_MEMBASE:
3941 g_assert (arm_is_fpimm8 (ins->inst_offset));
3942 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3944 case OP_ICONV_TO_R_UN: {
3946 tmpreg = ins->dreg == 0? 1: 0;
3947 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3948 ARM_FLTD (code, ins->dreg, ins->sreg1);
3949 ARM_B_COND (code, ARMCOND_GE, 8);
3950 /* save the temp register */
3951 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3952 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3953 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3954 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3955 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3956 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3957 /* skip the constant pool */
3960 *(int*)code = 0x41f00000;
3965 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
3966 * adfltd fdest, fdest, ftemp
3970 case OP_ICONV_TO_R4:
3971 ARM_FLTS (code, ins->dreg, ins->sreg1);
3973 case OP_ICONV_TO_R8:
3974 ARM_FLTD (code, ins->dreg, ins->sreg1);
3977 #elif defined(ARM_FPU_VFP)
3980 if (cfg->compile_aot) {
3981 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
3983 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3985 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3988 /* FIXME: we can optimize the imm load by dealing with part of
3989 * the displacement in LDFD (aligning to 512).
3991 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3992 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
3996 if (cfg->compile_aot) {
3997 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
3999 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4001 ARM_CVTS (code, ins->dreg, ins->dreg);
4003 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4004 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4005 ARM_CVTS (code, ins->dreg, ins->dreg);
4008 case OP_STORER8_MEMBASE_REG:
4009 /* This is generated by the local regalloc pass which runs after the lowering pass */
4010 if (!arm_is_fpimm8 (ins->inst_offset)) {
4011 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4012 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4013 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4015 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4018 case OP_LOADR8_MEMBASE:
4019 /* This is generated by the local regalloc pass which runs after the lowering pass */
4020 if (!arm_is_fpimm8 (ins->inst_offset)) {
4021 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4022 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4023 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4025 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4028 case OP_STORER4_MEMBASE_REG:
4029 g_assert (arm_is_fpimm8 (ins->inst_offset));
4030 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4031 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4033 case OP_LOADR4_MEMBASE:
4034 g_assert (arm_is_fpimm8 (ins->inst_offset));
4035 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4036 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4038 case OP_ICONV_TO_R_UN: {
4039 g_assert_not_reached ();
4042 case OP_ICONV_TO_R4:
4043 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4044 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4045 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4047 case OP_ICONV_TO_R8:
4048 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4049 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4053 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4054 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4055 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4057 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4063 case OP_FCONV_TO_I1:
4064 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4066 case OP_FCONV_TO_U1:
4067 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4069 case OP_FCONV_TO_I2:
4070 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4072 case OP_FCONV_TO_U2:
4073 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4075 case OP_FCONV_TO_I4:
4077 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4079 case OP_FCONV_TO_U4:
4081 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4083 case OP_FCONV_TO_I8:
4084 case OP_FCONV_TO_U8:
4085 g_assert_not_reached ();
4086 /* Implemented as helper calls */
4088 case OP_LCONV_TO_R_UN:
4089 g_assert_not_reached ();
4090 /* Implemented as helper calls */
4092 case OP_LCONV_TO_OVF_I4_2: {
4093 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4095 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4098 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4099 high_bit_not_set = code;
4100 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4102 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4103 valid_negative = code;
4104 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4105 invalid_negative = code;
4106 ARM_B_COND (code, ARMCOND_AL, 0);
4108 arm_patch (high_bit_not_set, code);
4110 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4111 valid_positive = code;
4112 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4114 arm_patch (invalid_negative, code);
4115 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4117 arm_patch (valid_negative, code);
4118 arm_patch (valid_positive, code);
4120 if (ins->dreg != ins->sreg1)
4121 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4126 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4129 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4132 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4135 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4138 ARM_MNFD (code, ins->dreg, ins->sreg1);
4140 #elif defined(ARM_FPU_VFP)
4142 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4145 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4148 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4151 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4154 ARM_NEGD (code, ins->dreg, ins->sreg1);
4159 g_assert_not_reached ();
4163 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4164 #elif defined(ARM_FPU_VFP)
4165 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4171 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4172 #elif defined(ARM_FPU_VFP)
4173 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4176 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4177 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4181 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4182 #elif defined(ARM_FPU_VFP)
4183 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4186 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4187 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4191 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4192 #elif defined(ARM_FPU_VFP)
4193 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4196 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4197 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4198 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4203 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4204 #elif defined(ARM_FPU_VFP)
4205 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4208 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4209 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4214 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4215 #elif defined(ARM_FPU_VFP)
4216 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4219 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4220 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4221 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4223 /* ARM FPA flags table:
4224 * N Less than ARMCOND_MI
4225 * Z Equal ARMCOND_EQ
4226 * C Greater Than or Equal ARMCOND_CS
4227 * V Unordered ARMCOND_VS
4230 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4233 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4236 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4239 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4240 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4246 g_assert_not_reached ();
4250 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4252 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4253 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4254 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4258 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4259 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4264 if (ins->dreg != ins->sreg1)
4265 ARM_MVFD (code, ins->dreg, ins->sreg1);
4266 #elif defined(ARM_FPU_VFP)
4267 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4268 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4270 *(guint32*)code = 0xffffffff;
4272 *(guint32*)code = 0x7fefffff;
4274 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4276 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4277 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4279 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4281 ARM_CPYD (code, ins->dreg, ins->sreg1);
4286 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4287 g_assert_not_reached ();
4290 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4291 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4292 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4293 g_assert_not_reached ();
4299 last_offset = offset;
4302 cfg->code_len = code - cfg->native_code;
4305 #endif /* DISABLE_JIT */
4307 #ifdef HAVE_AEABI_READ_TP
4308 void __aeabi_read_tp (void);
4312 mono_arch_register_lowlevel_calls (void)
4314 /* The signature doesn't matter */
4315 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4316 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4318 #ifndef MONO_CROSS_COMPILE
4319 #ifdef HAVE_AEABI_READ_TP
4320 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4325 #define patch_lis_ori(ip,val) do {\
4326 guint16 *__lis_ori = (guint16*)(ip); \
4327 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4328 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4332 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4334 MonoJumpInfo *patch_info;
4335 gboolean compile_aot = !run_cctors;
4337 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4338 unsigned char *ip = patch_info->ip.i + code;
4339 const unsigned char *target;
4341 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4342 gpointer *jt = (gpointer*)(ip + 8);
4344 /* jt is the inlined jump table, 2 instructions after ip
4345 * In the normal case we store the absolute addresses,
4346 * otherwise the displacements.
4348 for (i = 0; i < patch_info->data.table->table_size; i++)
4349 jt [i] = code + (int)patch_info->data.table->table [i];
4352 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4355 switch (patch_info->type) {
4356 case MONO_PATCH_INFO_BB:
4357 case MONO_PATCH_INFO_LABEL:
4360 /* No need to patch these */
4365 switch (patch_info->type) {
4366 case MONO_PATCH_INFO_IP:
4367 g_assert_not_reached ();
4368 patch_lis_ori (ip, ip);
4370 case MONO_PATCH_INFO_METHOD_REL:
4371 g_assert_not_reached ();
4372 *((gpointer *)(ip)) = code + patch_info->data.offset;
4374 case MONO_PATCH_INFO_METHODCONST:
4375 case MONO_PATCH_INFO_CLASS:
4376 case MONO_PATCH_INFO_IMAGE:
4377 case MONO_PATCH_INFO_FIELD:
4378 case MONO_PATCH_INFO_VTABLE:
4379 case MONO_PATCH_INFO_IID:
4380 case MONO_PATCH_INFO_SFLDA:
4381 case MONO_PATCH_INFO_LDSTR:
4382 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4383 case MONO_PATCH_INFO_LDTOKEN:
4384 g_assert_not_reached ();
4385 /* from OP_AOTCONST : lis + ori */
4386 patch_lis_ori (ip, target);
4388 case MONO_PATCH_INFO_R4:
4389 case MONO_PATCH_INFO_R8:
4390 g_assert_not_reached ();
4391 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4393 case MONO_PATCH_INFO_EXC_NAME:
4394 g_assert_not_reached ();
4395 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4397 case MONO_PATCH_INFO_NONE:
4398 case MONO_PATCH_INFO_BB_OVF:
4399 case MONO_PATCH_INFO_EXC_OVF:
4400 /* everything is dealt with at epilog output time */
4405 arm_patch_general (domain, ip, target);
4410 * Stack frame layout:
4412 * ------------------- fp
4413 * MonoLMF structure or saved registers
4414 * -------------------
4416 * -------------------
4418 * -------------------
4419 * optional 8 bytes for tracing
4420 * -------------------
4421 * param area size is cfg->param_area
4422 * ------------------- sp
4425 mono_arch_emit_prolog (MonoCompile *cfg)
4427 MonoMethod *method = cfg->method;
4429 MonoMethodSignature *sig;
4431 int alloc_size, pos, max_offset, i, rot_amount;
4436 int prev_sp_offset, reg_offset;
4438 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4441 sig = mono_method_signature (method);
4442 cfg->code_size = 256 + sig->param_count * 20;
4443 code = cfg->native_code = g_malloc (cfg->code_size);
4445 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4447 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4449 alloc_size = cfg->stack_offset;
4452 if (!method->save_lmf) {
4453 /* We save SP by storing it into IP and saving IP */
4454 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4455 prev_sp_offset = 8; /* ip and lr */
4456 for (i = 0; i < 16; ++i) {
4457 if (cfg->used_int_regs & (1 << i))
4458 prev_sp_offset += 4;
4460 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4462 for (i = 0; i < 16; ++i) {
4463 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4464 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4469 ARM_PUSH (code, 0x5ff0);
4470 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4471 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4473 for (i = 0; i < 16; ++i) {
4474 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4475 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4479 pos += sizeof (MonoLMF) - prev_sp_offset;
4483 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4484 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4485 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4486 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4489 /* the stack used in the pushed regs */
4490 if (prev_sp_offset & 4)
4492 cfg->stack_usage = alloc_size;
4494 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4495 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4497 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4498 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4500 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4502 if (cfg->frame_reg != ARMREG_SP) {
4503 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4504 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4506 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4507 prev_sp_offset += alloc_size;
4509 /* compute max_offset in order to use short forward jumps
4510 * we could skip do it on arm because the immediate displacement
4511 * for jumps is large enough, it may be useful later for constant pools
4514 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4515 MonoInst *ins = bb->code;
4516 bb->max_offset = max_offset;
4518 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4521 MONO_BB_FOR_EACH_INS (bb, ins)
4522 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4525 /* store runtime generic context */
4526 if (cfg->rgctx_var) {
4527 MonoInst *ins = cfg->rgctx_var;
4529 g_assert (ins->opcode == OP_REGOFFSET);
4531 if (arm_is_imm12 (ins->inst_offset)) {
4532 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4534 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4535 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4539 /* load arguments allocated to register from the stack */
4542 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4544 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4545 ArgInfo *ainfo = &cinfo->ret;
4546 inst = cfg->vret_addr;
4547 g_assert (arm_is_imm12 (inst->inst_offset));
4548 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4551 if (sig->call_convention == MONO_CALL_VARARG) {
4552 ArgInfo *cookie = &cinfo->sig_cookie;
4554 /* Save the sig cookie address */
4555 g_assert (cookie->storage == RegTypeBase);
4557 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4558 g_assert (arm_is_imm12 (cfg->sig_cookie));
4559 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4560 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4563 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4564 ArgInfo *ainfo = cinfo->args + i;
4565 inst = cfg->args [pos];
4567 if (cfg->verbose_level > 2)
4568 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4569 if (inst->opcode == OP_REGVAR) {
4570 if (ainfo->storage == RegTypeGeneral)
4571 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4572 else if (ainfo->storage == RegTypeFP) {
4573 g_assert_not_reached ();
4574 } else if (ainfo->storage == RegTypeBase) {
4575 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4576 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4578 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4579 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4582 g_assert_not_reached ();
4584 if (cfg->verbose_level > 2)
4585 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4587 /* the argument should be put on the stack: FIXME handle size != word */
4588 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4589 switch (ainfo->size) {
4591 if (arm_is_imm12 (inst->inst_offset))
4592 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4594 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4595 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4599 if (arm_is_imm8 (inst->inst_offset)) {
4600 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4602 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4603 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4607 g_assert (arm_is_imm12 (inst->inst_offset));
4608 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4609 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4610 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4613 if (arm_is_imm12 (inst->inst_offset)) {
4614 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4616 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4617 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4621 } else if (ainfo->storage == RegTypeBaseGen) {
4622 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4623 g_assert (arm_is_imm12 (inst->inst_offset));
4624 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4625 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4626 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4627 } else if (ainfo->storage == RegTypeBase) {
4628 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4629 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4631 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4632 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4635 switch (ainfo->size) {
4637 if (arm_is_imm8 (inst->inst_offset)) {
4638 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4640 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4641 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4645 if (arm_is_imm8 (inst->inst_offset)) {
4646 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4648 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4649 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4653 if (arm_is_imm12 (inst->inst_offset)) {
4654 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4656 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4657 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4659 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4660 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4662 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4663 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4665 if (arm_is_imm12 (inst->inst_offset + 4)) {
4666 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4668 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4669 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4673 if (arm_is_imm12 (inst->inst_offset)) {
4674 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4676 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4677 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4681 } else if (ainfo->storage == RegTypeFP) {
4682 g_assert_not_reached ();
4683 } else if (ainfo->storage == RegTypeStructByVal) {
4684 int doffset = inst->inst_offset;
4688 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4689 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4690 if (arm_is_imm12 (doffset)) {
4691 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4693 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4694 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4696 soffset += sizeof (gpointer);
4697 doffset += sizeof (gpointer);
4699 if (ainfo->vtsize) {
4700 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4701 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4702 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4704 } else if (ainfo->storage == RegTypeStructByAddr) {
4705 g_assert_not_reached ();
4706 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4707 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4709 g_assert_not_reached ();
4714 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4715 if (cfg->compile_aot)
4716 /* AOT code is only used in the root domain */
4717 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4719 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4720 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4721 (gpointer)"mono_jit_thread_attach");
4722 code = emit_call_seq (cfg, code);
4725 if (method->save_lmf) {
4726 gboolean get_lmf_fast = FALSE;
4728 #ifdef HAVE_AEABI_READ_TP
4729 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4731 if (lmf_addr_tls_offset != -1) {
4732 get_lmf_fast = TRUE;
4734 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4735 (gpointer)"__aeabi_read_tp");
4736 code = emit_call_seq (cfg, code);
4738 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4739 get_lmf_fast = TRUE;
4742 if (!get_lmf_fast) {
4743 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4744 (gpointer)"mono_get_lmf_addr");
4745 code = emit_call_seq (cfg, code);
4747 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4748 /* lmf_offset is the offset from the previous stack pointer,
4749 * alloc_size is the total stack space allocated, so the offset
4750 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4751 * The pointer to the struct is put in r1 (new_lmf).
4752 * r2 is used as scratch
4753 * The callee-saved registers are already in the MonoLMF structure
4755 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4756 /* r0 is the result from mono_get_lmf_addr () */
4757 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4758 /* new_lmf->previous_lmf = *lmf_addr */
4759 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4760 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4761 /* *(lmf_addr) = r1 */
4762 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4763 /* Skip method (only needed for trampoline LMF frames) */
4764 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4765 /* save the current IP */
4766 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4767 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4771 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4773 if (cfg->arch.seq_point_info_var) {
4774 MonoInst *ins = cfg->arch.seq_point_info_var;
4776 /* Initialize the variable from a GOT slot */
4777 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4778 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4780 *(gpointer*)code = NULL;
4782 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4784 g_assert (ins->opcode == OP_REGOFFSET);
4786 if (arm_is_imm12 (ins->inst_offset)) {
4787 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4789 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4790 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4794 /* Initialize ss_trigger_page_var */
4796 MonoInst *info_var = cfg->arch.seq_point_info_var;
4797 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4798 int dreg = ARMREG_LR;
4801 g_assert (info_var->opcode == OP_REGOFFSET);
4802 g_assert (arm_is_imm12 (info_var->inst_offset));
4804 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4805 /* Load the trigger page addr */
4806 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4807 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4811 cfg->code_len = code - cfg->native_code;
4812 g_assert (cfg->code_len < cfg->code_size);
4819 mono_arch_emit_epilog (MonoCompile *cfg)
4821 MonoMethod *method = cfg->method;
4822 int pos, i, rot_amount;
4823 int max_epilog_size = 16 + 20*4;
4827 if (cfg->method->save_lmf)
4828 max_epilog_size += 128;
4830 if (mono_jit_trace_calls != NULL)
4831 max_epilog_size += 50;
4833 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4834 max_epilog_size += 50;
4836 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4837 cfg->code_size *= 2;
4838 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4839 mono_jit_stats.code_reallocs++;
4843 * Keep in sync with OP_JMP
4845 code = cfg->native_code + cfg->code_len;
4847 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4848 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4852 /* Load returned vtypes into registers if needed */
4853 cinfo = cfg->arch.cinfo;
4854 if (cinfo->ret.storage == RegTypeStructByVal) {
4855 MonoInst *ins = cfg->ret;
4857 if (arm_is_imm12 (ins->inst_offset)) {
4858 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4860 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4861 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4865 if (method->save_lmf) {
4867 /* all but r0-r3, sp and pc */
4868 pos += sizeof (MonoLMF) - (4 * 10);
4870 /* r2 contains the pointer to the current LMF */
4871 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4872 /* ip = previous_lmf */
4873 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4875 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4876 /* *(lmf_addr) = previous_lmf */
4877 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4878 /* FIXME: speedup: there is no actual need to restore the registers if
4879 * we didn't actually change them (idea from Zoltan).
4882 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4883 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4884 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4886 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4887 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4889 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4890 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4892 /* FIXME: add v4 thumb interworking support */
4893 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4896 cfg->code_len = code - cfg->native_code;
4898 g_assert (cfg->code_len < cfg->code_size);
4902 /* remove once throw_exception_by_name is eliminated */
4904 exception_id_by_name (const char *name)
4906 if (strcmp (name, "IndexOutOfRangeException") == 0)
4907 return MONO_EXC_INDEX_OUT_OF_RANGE;
4908 if (strcmp (name, "OverflowException") == 0)
4909 return MONO_EXC_OVERFLOW;
4910 if (strcmp (name, "ArithmeticException") == 0)
4911 return MONO_EXC_ARITHMETIC;
4912 if (strcmp (name, "DivideByZeroException") == 0)
4913 return MONO_EXC_DIVIDE_BY_ZERO;
4914 if (strcmp (name, "InvalidCastException") == 0)
4915 return MONO_EXC_INVALID_CAST;
4916 if (strcmp (name, "NullReferenceException") == 0)
4917 return MONO_EXC_NULL_REF;
4918 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4919 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4920 g_error ("Unknown intrinsic exception %s\n", name);
4925 mono_arch_emit_exceptions (MonoCompile *cfg)
4927 MonoJumpInfo *patch_info;
4930 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4931 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4932 int max_epilog_size = 50;
4934 /* count the number of exception infos */
4937 * make sure we have enough space for exceptions
4939 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4940 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4941 i = exception_id_by_name (patch_info->data.target);
4942 if (!exc_throw_found [i]) {
4943 max_epilog_size += 32;
4944 exc_throw_found [i] = TRUE;
4949 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4950 cfg->code_size *= 2;
4951 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4952 mono_jit_stats.code_reallocs++;
4955 code = cfg->native_code + cfg->code_len;
4957 /* add code to raise exceptions */
4958 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4959 switch (patch_info->type) {
4960 case MONO_PATCH_INFO_EXC: {
4961 MonoClass *exc_class;
4962 unsigned char *ip = patch_info->ip.i + cfg->native_code;
4964 i = exception_id_by_name (patch_info->data.target);
4965 if (exc_throw_pos [i]) {
4966 arm_patch (ip, exc_throw_pos [i]);
4967 patch_info->type = MONO_PATCH_INFO_NONE;
4970 exc_throw_pos [i] = code;
4972 arm_patch (ip, code);
4974 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4975 g_assert (exc_class);
4977 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
4978 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4979 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4980 patch_info->data.name = "mono_arch_throw_corlib_exception";
4981 patch_info->ip.i = code - cfg->native_code;
4983 *(guint32*)(gpointer)code = exc_class->type_token;
4993 cfg->code_len = code - cfg->native_code;
4995 g_assert (cfg->code_len < cfg->code_size);
4999 static gboolean tls_offset_inited = FALSE;
5002 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5004 if (!tls_offset_inited) {
5005 tls_offset_inited = TRUE;
5007 lmf_tls_offset = mono_get_lmf_tls_offset ();
5008 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5013 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5018 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5025 mono_arch_print_tree (MonoInst *tree, int arity)
5031 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5033 return mono_get_domain_intrinsic (cfg);
5037 mono_arch_get_patch_offset (guint8 *code)
5044 mono_arch_flush_register_windows (void)
5048 #ifdef MONO_ARCH_HAVE_IMT
5051 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5053 if (cfg->compile_aot) {
5054 int method_reg = mono_alloc_ireg (cfg);
5057 call->dynamic_imt_arg = TRUE;
5060 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5062 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5063 ins->dreg = method_reg;
5064 ins->inst_p0 = call->method;
5065 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5066 MONO_ADD_INS (cfg->cbb, ins);
5068 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5070 } else if (cfg->generic_context) {
5072 /* Always pass in a register for simplicity */
5073 call->dynamic_imt_arg = TRUE;
5075 cfg->uses_rgctx_reg = TRUE;
5078 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5081 int method_reg = mono_alloc_preg (cfg);
5083 MONO_INST_NEW (cfg, ins, OP_PCONST);
5084 ins->inst_p0 = call->method;
5085 ins->dreg = method_reg;
5086 MONO_ADD_INS (cfg->cbb, ins);
5088 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5094 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5096 guint32 *code_ptr = (guint32*)code;
5098 /* The IMT value is stored in the code stream right after the LDC instruction. */
5099 if (!IS_LDR_PC (code_ptr [0])) {
5100 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5101 g_assert (IS_LDR_PC (code_ptr [0]));
5103 if (code_ptr [1] == 0)
5104 /* This is AOTed code, the IMT method is in V5 */
5105 return (MonoMethod*)regs [ARMREG_V5];
5107 return (MonoMethod*) code_ptr [1];
5111 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5113 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5116 #define ENABLE_WRONG_METHOD_CHECK 0
5117 #define BASE_SIZE (6 * 4)
5118 #define BSEARCH_ENTRY_SIZE (4 * 4)
5119 #define CMP_SIZE (3 * 4)
5120 #define BRANCH_SIZE (1 * 4)
5121 #define CALL_SIZE (2 * 4)
5122 #define WMC_SIZE (5 * 4)
5123 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5126 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5128 guint32 delta = DISTANCE (target, code);
5130 g_assert (delta >= 0 && delta <= 0xFFF);
5131 *target = *target | delta;
5137 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5138 gpointer fail_tramp)
5140 int size, i, extra_space = 0;
5141 arminstr_t *code, *start, *vtable_target = NULL;
5142 gboolean large_offsets = FALSE;
5143 guint32 **constant_pool_starts;
5146 constant_pool_starts = g_new0 (guint32*, count);
5149 * We might be called with a fail_tramp from the IMT builder code even if
5150 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5152 //g_assert (!fail_tramp);
5154 for (i = 0; i < count; ++i) {
5155 MonoIMTCheckItem *item = imt_entries [i];
5156 if (item->is_equals) {
5157 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5158 item->chunk_size += 32;
5159 large_offsets = TRUE;
5162 if (item->check_target_idx) {
5163 if (!item->compare_done)
5164 item->chunk_size += CMP_SIZE;
5165 item->chunk_size += BRANCH_SIZE;
5167 #if ENABLE_WRONG_METHOD_CHECK
5168 item->chunk_size += WMC_SIZE;
5171 item->chunk_size += CALL_SIZE;
5173 item->chunk_size += BSEARCH_ENTRY_SIZE;
5174 imt_entries [item->check_target_idx]->compare_done = TRUE;
5176 size += item->chunk_size;
5180 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5182 start = code = mono_domain_code_reserve (domain, size);
5185 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5186 for (i = 0; i < count; ++i) {
5187 MonoIMTCheckItem *item = imt_entries [i];
5188 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5193 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5195 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5196 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5197 vtable_target = code;
5198 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5200 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5201 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5202 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5204 for (i = 0; i < count; ++i) {
5205 MonoIMTCheckItem *item = imt_entries [i];
5206 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5207 gint32 vtable_offset;
5209 item->code_target = (guint8*)code;
5211 if (item->is_equals) {
5212 if (item->check_target_idx) {
5213 if (!item->compare_done) {
5215 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5216 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5218 item->jmp_code = (guint8*)code;
5219 ARM_B_COND (code, ARMCOND_NE, 0);
5221 /*Enable the commented code to assert on wrong method*/
5222 #if ENABLE_WRONG_METHOD_CHECK
5224 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5225 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5226 ARM_B_COND (code, ARMCOND_NE, 1);
5232 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5233 if (!arm_is_imm12 (vtable_offset)) {
5235 * We need to branch to a computed address but we don't have
5236 * a free register to store it, since IP must contain the
5237 * vtable address. So we push the two values to the stack, and
5238 * load them both using LDM.
5240 /* Compute target address */
5241 vtable_offset_ins = code;
5242 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5243 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5244 /* Save it to the fourth slot */
5245 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5246 /* Restore registers and branch */
5247 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5249 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5251 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5253 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5254 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5258 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5260 /*must emit after unconditional branch*/
5261 if (vtable_target) {
5262 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5263 item->chunk_size += 4;
5264 vtable_target = NULL;
5267 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5268 constant_pool_starts [i] = code;
5270 code += extra_space;
5274 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5275 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5277 item->jmp_code = (guint8*)code;
5278 ARM_B_COND (code, ARMCOND_GE, 0);
5283 for (i = 0; i < count; ++i) {
5284 MonoIMTCheckItem *item = imt_entries [i];
5285 if (item->jmp_code) {
5286 if (item->check_target_idx)
5287 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5289 if (i > 0 && item->is_equals) {
5291 arminstr_t *space_start = constant_pool_starts [i];
5292 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5293 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5300 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5301 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5306 g_free (constant_pool_starts);
5308 mono_arch_flush_icache ((guint8*)start, size);
5309 mono_stats.imt_thunks_size += code - start;
5311 g_assert (DISTANCE (start, code) <= size);
5318 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5320 if (reg == ARMREG_SP)
5321 return (gpointer)ctx->esp;
5323 return (gpointer)ctx->regs [reg];
5327 * mono_arch_set_breakpoint:
5329 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5330 * The location should contain code emitted by OP_SEQ_POINT.
5333 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5336 guint32 native_offset = ip - (guint8*)ji->code_start;
5339 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5341 g_assert (native_offset % 4 == 0);
5342 g_assert (info->bp_addrs [native_offset / 4] == 0);
5343 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5345 int dreg = ARMREG_LR;
5347 /* Read from another trigger page */
5348 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5350 *(int*)code = (int)bp_trigger_page;
5352 ARM_LDR_IMM (code, dreg, dreg, 0);
5354 mono_arch_flush_icache (code - 16, 16);
5357 /* This is currently implemented by emitting an SWI instruction, which
5358 * qemu/linux seems to convert to a SIGILL.
5360 *(int*)code = (0xef << 24) | 8;
5362 mono_arch_flush_icache (code - 4, 4);
5368 * mono_arch_clear_breakpoint:
5370 * Clear the breakpoint at IP.
5373 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5379 guint32 native_offset = ip - (guint8*)ji->code_start;
5380 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5382 g_assert (native_offset % 4 == 0);
5383 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5384 info->bp_addrs [native_offset / 4] = 0;
5386 for (i = 0; i < 4; ++i)
5389 mono_arch_flush_icache (ip, code - ip);
5394 * mono_arch_start_single_stepping:
5396 * Start single stepping.
5399 mono_arch_start_single_stepping (void)
5401 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5405 * mono_arch_stop_single_stepping:
5407 * Stop single stepping.
5410 mono_arch_stop_single_stepping (void)
5412 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5416 #define DBG_SIGNAL SIGBUS
5418 #define DBG_SIGNAL SIGSEGV
5422 * mono_arch_is_single_step_event:
5424 * Return whenever the machine state in SIGCTX corresponds to a single
5428 mono_arch_is_single_step_event (void *info, void *sigctx)
5430 siginfo_t *sinfo = info;
5432 /* Sometimes the address is off by 4 */
5433 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5440 * mono_arch_is_breakpoint_event:
5442 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5445 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5447 siginfo_t *sinfo = info;
5449 if (sinfo->si_signo == DBG_SIGNAL) {
5450 /* Sometimes the address is off by 4 */
5451 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5461 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5463 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5474 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5476 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5484 * mono_arch_skip_breakpoint:
5486 * See mini-amd64.c for docs.
5489 mono_arch_skip_breakpoint (MonoContext *ctx)
5491 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5495 * mono_arch_skip_single_step:
5497 * See mini-amd64.c for docs.
5500 mono_arch_skip_single_step (MonoContext *ctx)
5502 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5506 * mono_arch_get_seq_point_info:
5508 * See mini-amd64.c for docs.
5511 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5516 // FIXME: Add a free function
5518 mono_domain_lock (domain);
5519 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5521 mono_domain_unlock (domain);
5524 ji = mono_jit_info_table_find (domain, (char*)code);
5527 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5529 info->ss_trigger_page = ss_trigger_page;
5530 info->bp_trigger_page = bp_trigger_page;
5532 mono_domain_lock (domain);
5533 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5535 mono_domain_unlock (domain);