2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
194 g_assert (size == 0);
199 emit_call_reg (guint8 *code, int reg)
202 ARM_BLX_REG (code, reg);
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
219 *(gpointer*)code = NULL;
221 code = emit_call_reg (code, ARMREG_IP);
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
234 case OP_FCALL_MEMBASE:
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
277 arg_info [0].offset = offset;
280 frame_size += sizeof (gpointer);
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
298 arg_info [k + 1].offset = offset;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
341 The call sequence could be also:
344 function pointer literal
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
356 /* Three possible code sequences can happen here:
360 * ldr pc, [rX - #offset]
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
372 * direct branch with mov:
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
388 #define MAX_ARCH_DELEGATE_PARAMS 3
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
425 *code_size = code - start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
437 mono_arch_get_delegate_invoke_impls (void)
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
468 mono_mini_arch_unlock ();
473 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
477 mono_mini_arch_unlock ();
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
492 mono_mini_arch_unlock ();
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_named_code (name);
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig->ret))
516 return (gpointer)regs [ARMREG_R1];
517 return (gpointer)regs [ARMREG_R0];
521 * Initialize the cpu to execute managed code.
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex);
536 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
537 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
538 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
542 * Cleanup architecture specific code.
545 mono_arch_cleanup (void)
550 * This function returns the optimizations supported on this cpu.
553 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
557 thumb_supported = TRUE;
562 FILE *file = fopen ("/proc/cpuinfo", "r");
564 while ((line = fgets (buf, 512, file))) {
565 if (strncmp (line, "Processor", 9) == 0) {
566 char *ver = strstr (line, "(v");
567 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
569 if (ver && (ver [2] == '7'))
573 if (strncmp (line, "Features", 8) == 0) {
574 char *th = strstr (line, "thumb");
576 thumb_supported = TRUE;
584 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
588 /* no arm-specific optimizations yet */
596 is_regsize_var (MonoType *t) {
599 t = mini_type_get_underlying_type (NULL, t);
606 case MONO_TYPE_FNPTR:
608 case MONO_TYPE_OBJECT:
609 case MONO_TYPE_STRING:
610 case MONO_TYPE_CLASS:
611 case MONO_TYPE_SZARRAY:
612 case MONO_TYPE_ARRAY:
614 case MONO_TYPE_GENERICINST:
615 if (!mono_type_generic_inst_is_valuetype (t))
618 case MONO_TYPE_VALUETYPE:
625 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
630 for (i = 0; i < cfg->num_varinfo; i++) {
631 MonoInst *ins = cfg->varinfo [i];
632 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
635 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
638 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
641 /* we can only allocate 32 bit values */
642 if (is_regsize_var (ins->inst_vtype)) {
643 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
644 g_assert (i == vmv->idx);
645 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
652 #define USE_EXTRA_TEMPS 0
655 mono_arch_get_global_int_regs (MonoCompile *cfg)
660 * FIXME: Interface calls might go through a static rgctx trampoline which
661 * sets V5, but it doesn't save it, so we need to save it ourselves, and
664 if (cfg->flags & MONO_CFG_HAS_CALLS)
665 cfg->uses_rgctx_reg = TRUE;
667 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
668 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
669 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
670 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
671 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
672 /* V5 is reserved for passing the vtable/rgctx/IMT method */
673 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
674 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
675 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
681 * mono_arch_regalloc_cost:
683 * Return the cost, in number of memory references, of the action of
684 * allocating the variable VMV into a register during global register
688 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
694 #endif /* #ifndef DISABLE_JIT */
696 #ifndef __GNUC_PREREQ
697 #define __GNUC_PREREQ(maj, min) (0)
701 mono_arch_flush_icache (guint8 *code, gint size)
704 sys_icache_invalidate (code, size);
705 #elif __GNUC_PREREQ(4, 1)
706 __clear_cache (code, code + size);
707 #elif defined(PLATFORM_ANDROID)
708 const int syscall = 0xf0002;
716 : "r" (code), "r" (code + size), "r" (syscall)
717 : "r0", "r1", "r7", "r2"
720 __asm __volatile ("mov r0, %0\n"
723 "swi 0x9f0002 @ sys_cacheflush"
725 : "r" (code), "r" (code + size), "r" (0)
726 : "r0", "r1", "r3" );
743 guint16 vtsize; /* in param area */
746 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
753 gboolean vtype_retaddr;
762 /*#define __alignof__(a) sizeof(a)*/
763 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
769 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
772 if (*gr > ARMREG_R3) {
773 ainfo->offset = *stack_size;
774 ainfo->reg = ARMREG_SP; /* in the caller */
775 ainfo->storage = RegTypeBase;
778 ainfo->storage = RegTypeGeneral;
782 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
785 int i8_align = __alignof__ (gint64);
789 gboolean split = i8_align == 4;
791 gboolean split = TRUE;
794 if (*gr == ARMREG_R3 && split) {
795 /* first word in r3 and the second on the stack */
796 ainfo->offset = *stack_size;
797 ainfo->reg = ARMREG_SP; /* in the caller */
798 ainfo->storage = RegTypeBaseGen;
800 } else if (*gr >= ARMREG_R3) {
802 /* darwin aligns longs to 4 byte only */
808 ainfo->offset = *stack_size;
809 ainfo->reg = ARMREG_SP; /* in the caller */
810 ainfo->storage = RegTypeBase;
814 if (i8_align == 8 && ((*gr) & 1))
817 ainfo->storage = RegTypeIRegPair;
826 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
829 int n = sig->hasthis + sig->param_count;
830 MonoType *simpletype;
831 guint32 stack_size = 0;
835 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
837 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
842 /* FIXME: handle returning a struct */
843 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
846 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
847 cinfo->ret.storage = RegTypeStructByVal;
849 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
850 cinfo->struct_ret = ARMREG_R0;
851 cinfo->vtype_retaddr = TRUE;
857 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
860 DEBUG(printf("params: %d\n", sig->param_count));
861 for (i = 0; i < sig->param_count; ++i) {
862 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
863 /* Prevent implicit arguments and sig_cookie from
864 being passed in registers */
866 /* Emit the signature cookie just before the implicit arguments */
867 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
869 DEBUG(printf("param %d: ", i));
870 if (sig->params [i]->byref) {
871 DEBUG(printf("byref\n"));
872 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
876 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
877 switch (simpletype->type) {
878 case MONO_TYPE_BOOLEAN:
881 cinfo->args [n].size = 1;
882 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
888 cinfo->args [n].size = 2;
889 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
894 cinfo->args [n].size = 4;
895 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
901 case MONO_TYPE_FNPTR:
902 case MONO_TYPE_CLASS:
903 case MONO_TYPE_OBJECT:
904 case MONO_TYPE_STRING:
905 case MONO_TYPE_SZARRAY:
906 case MONO_TYPE_ARRAY:
908 cinfo->args [n].size = sizeof (gpointer);
909 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
912 case MONO_TYPE_GENERICINST:
913 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
914 cinfo->args [n].size = sizeof (gpointer);
915 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
920 case MONO_TYPE_TYPEDBYREF:
921 case MONO_TYPE_VALUETYPE: {
927 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
928 size = sizeof (MonoTypedRef);
929 align = sizeof (gpointer);
931 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
933 size = mono_class_native_size (klass, &align);
935 size = mono_class_value_size (klass, &align);
937 DEBUG(printf ("load %d bytes struct\n",
938 mono_class_native_size (sig->params [i]->data.klass, NULL)));
941 align_size += (sizeof (gpointer) - 1);
942 align_size &= ~(sizeof (gpointer) - 1);
943 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
944 cinfo->args [n].storage = RegTypeStructByVal;
945 /* FIXME: align stack_size if needed */
947 if (align >= 8 && (gr & 1))
950 if (gr > ARMREG_R3) {
951 cinfo->args [n].size = 0;
952 cinfo->args [n].vtsize = nwords;
954 int rest = ARMREG_R3 - gr + 1;
955 int n_in_regs = rest >= nwords? nwords: rest;
957 cinfo->args [n].size = n_in_regs;
958 cinfo->args [n].vtsize = nwords - n_in_regs;
959 cinfo->args [n].reg = gr;
963 cinfo->args [n].offset = stack_size;
964 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
965 stack_size += nwords * sizeof (gpointer);
972 cinfo->args [n].size = 8;
973 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
977 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
981 /* Handle the case where there are no implicit arguments */
982 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
983 /* Prevent implicit arguments and sig_cookie from
984 being passed in registers */
986 /* Emit the signature cookie just before the implicit arguments */
987 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
991 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
992 switch (simpletype->type) {
993 case MONO_TYPE_BOOLEAN:
1004 case MONO_TYPE_FNPTR:
1005 case MONO_TYPE_CLASS:
1006 case MONO_TYPE_OBJECT:
1007 case MONO_TYPE_SZARRAY:
1008 case MONO_TYPE_ARRAY:
1009 case MONO_TYPE_STRING:
1010 cinfo->ret.storage = RegTypeGeneral;
1011 cinfo->ret.reg = ARMREG_R0;
1015 cinfo->ret.storage = RegTypeIRegPair;
1016 cinfo->ret.reg = ARMREG_R0;
1020 cinfo->ret.storage = RegTypeFP;
1021 cinfo->ret.reg = ARMREG_R0;
1022 /* FIXME: cinfo->ret.reg = ???;
1023 cinfo->ret.storage = RegTypeFP;*/
1025 case MONO_TYPE_GENERICINST:
1026 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1027 cinfo->ret.storage = RegTypeGeneral;
1028 cinfo->ret.reg = ARMREG_R0;
1032 case MONO_TYPE_VALUETYPE:
1033 case MONO_TYPE_TYPEDBYREF:
1034 if (cinfo->ret.storage != RegTypeStructByVal)
1035 cinfo->ret.storage = RegTypeStructByAddr;
1037 case MONO_TYPE_VOID:
1040 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1044 /* align stack size to 8 */
1045 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1046 stack_size = (stack_size + 7) & ~7;
1048 cinfo->stack_usage = stack_size;
1055 * Set var information according to the calling convention. arm version.
1056 * The locals var stuff should most likely be split in another method.
1059 mono_arch_allocate_vars (MonoCompile *cfg)
1061 MonoMethodSignature *sig;
1062 MonoMethodHeader *header;
1064 int i, offset, size, align, curinst;
1065 int frame_reg = ARMREG_FP;
1069 sig = mono_method_signature (cfg->method);
1071 if (!cfg->arch.cinfo)
1072 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1073 cinfo = cfg->arch.cinfo;
1075 /* FIXME: this will change when we use FP as gcc does */
1076 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1078 /* allow room for the vararg method args: void* and long/double */
1079 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1080 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1082 header = mono_method_get_header (cfg->method);
1085 * We use the frame register also for any method that has
1086 * exception clauses. This way, when the handlers are called,
1087 * the code will reference local variables using the frame reg instead of
1088 * the stack pointer: if we had to restore the stack pointer, we'd
1089 * corrupt the method frames that are already on the stack (since
1090 * filters get called before stack unwinding happens) when the filter
1091 * code would call any method (this also applies to finally etc.).
1093 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1094 frame_reg = ARMREG_FP;
1095 cfg->frame_reg = frame_reg;
1096 if (frame_reg != ARMREG_SP) {
1097 cfg->used_int_regs |= 1 << frame_reg;
1100 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1101 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1102 cfg->used_int_regs |= (1 << ARMREG_V5);
1106 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1107 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1108 case MONO_TYPE_VOID:
1111 cfg->ret->opcode = OP_REGVAR;
1112 cfg->ret->inst_c0 = ARMREG_R0;
1116 /* local vars are at a positive offset from the stack pointer */
1118 * also note that if the function uses alloca, we use FP
1119 * to point at the local variables.
1121 offset = 0; /* linkage area */
1122 /* align the offset to 16 bytes: not sure this is needed here */
1124 //offset &= ~(8 - 1);
1126 /* add parameter area size for called functions */
1127 offset += cfg->param_area;
1130 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1133 /* allow room to save the return value */
1134 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1137 /* the MonoLMF structure is stored just below the stack pointer */
1138 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1139 if (cinfo->ret.storage == RegTypeStructByVal) {
1140 cfg->ret->opcode = OP_REGOFFSET;
1141 cfg->ret->inst_basereg = cfg->frame_reg;
1142 offset += sizeof (gpointer) - 1;
1143 offset &= ~(sizeof (gpointer) - 1);
1144 cfg->ret->inst_offset = - offset;
1146 ins = cfg->vret_addr;
1147 offset += sizeof(gpointer) - 1;
1148 offset &= ~(sizeof(gpointer) - 1);
1149 ins->inst_offset = offset;
1150 ins->opcode = OP_REGOFFSET;
1151 ins->inst_basereg = frame_reg;
1152 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1153 printf ("vret_addr =");
1154 mono_print_ins (cfg->vret_addr);
1157 offset += sizeof(gpointer);
1160 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1161 if (cfg->arch.seq_point_info_var) {
1164 ins = cfg->arch.seq_point_info_var;
1168 offset += align - 1;
1169 offset &= ~(align - 1);
1170 ins->opcode = OP_REGOFFSET;
1171 ins->inst_basereg = frame_reg;
1172 ins->inst_offset = offset;
1175 ins = cfg->arch.ss_trigger_page_var;
1178 offset += align - 1;
1179 offset &= ~(align - 1);
1180 ins->opcode = OP_REGOFFSET;
1181 ins->inst_basereg = frame_reg;
1182 ins->inst_offset = offset;
1186 curinst = cfg->locals_start;
1187 for (i = curinst; i < cfg->num_varinfo; ++i) {
1188 ins = cfg->varinfo [i];
1189 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1192 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1193 * pinvoke wrappers when they call functions returning structure */
1194 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1195 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1199 size = mono_type_size (ins->inst_vtype, &align);
1201 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1202 * since it loads/stores misaligned words, which don't do the right thing.
1204 if (align < 4 && size >= 4)
1206 offset += align - 1;
1207 offset &= ~(align - 1);
1208 ins->opcode = OP_REGOFFSET;
1209 ins->inst_offset = offset;
1210 ins->inst_basereg = frame_reg;
1212 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1217 ins = cfg->args [curinst];
1218 if (ins->opcode != OP_REGVAR) {
1219 ins->opcode = OP_REGOFFSET;
1220 ins->inst_basereg = frame_reg;
1221 offset += sizeof (gpointer) - 1;
1222 offset &= ~(sizeof (gpointer) - 1);
1223 ins->inst_offset = offset;
1224 offset += sizeof (gpointer);
1229 if (sig->call_convention == MONO_CALL_VARARG) {
1233 /* Allocate a local slot to hold the sig cookie address */
1234 offset += align - 1;
1235 offset &= ~(align - 1);
1236 cfg->sig_cookie = offset;
1240 for (i = 0; i < sig->param_count; ++i) {
1241 ins = cfg->args [curinst];
1243 if (ins->opcode != OP_REGVAR) {
1244 ins->opcode = OP_REGOFFSET;
1245 ins->inst_basereg = frame_reg;
1246 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1248 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1249 * since it loads/stores misaligned words, which don't do the right thing.
1251 if (align < 4 && size >= 4)
1253 /* The code in the prolog () stores words when storing vtypes received in a register */
1254 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1256 offset += align - 1;
1257 offset &= ~(align - 1);
1258 ins->inst_offset = offset;
1264 /* align the offset to 8 bytes */
1269 cfg->stack_offset = offset;
1273 mono_arch_create_vars (MonoCompile *cfg)
1275 MonoMethodSignature *sig;
1278 sig = mono_method_signature (cfg->method);
1280 if (!cfg->arch.cinfo)
1281 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1282 cinfo = cfg->arch.cinfo;
1284 if (cinfo->ret.storage == RegTypeStructByVal)
1285 cfg->ret_var_is_local = TRUE;
1287 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1288 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1289 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1290 printf ("vret_addr = ");
1291 mono_print_ins (cfg->vret_addr);
1295 if (cfg->gen_seq_points && cfg->compile_aot) {
1296 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1297 ins->flags |= MONO_INST_VOLATILE;
1298 cfg->arch.seq_point_info_var = ins;
1300 /* Allocate a separate variable for this to save 1 load per seq point */
1301 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1302 ins->flags |= MONO_INST_VOLATILE;
1303 cfg->arch.ss_trigger_page_var = ins;
1308 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1310 MonoMethodSignature *tmp_sig;
1313 if (call->tail_call)
1316 /* FIXME: Add support for signature tokens to AOT */
1317 cfg->disable_aot = TRUE;
1319 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1322 * mono_ArgIterator_Setup assumes the signature cookie is
1323 * passed first and all the arguments which were before it are
1324 * passed on the stack after the signature. So compensate by
1325 * passing a different signature.
1327 tmp_sig = mono_metadata_signature_dup (call->signature);
1328 tmp_sig->param_count -= call->signature->sentinelpos;
1329 tmp_sig->sentinelpos = 0;
1330 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1332 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1333 sig_arg->dreg = mono_alloc_ireg (cfg);
1334 sig_arg->inst_p0 = tmp_sig;
1335 MONO_ADD_INS (cfg->cbb, sig_arg);
1337 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1342 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1347 LLVMCallInfo *linfo;
1349 n = sig->param_count + sig->hasthis;
1351 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1353 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1356 * LLVM always uses the native ABI while we use our own ABI, the
1357 * only difference is the handling of vtypes:
1358 * - we only pass/receive them in registers in some cases, and only
1359 * in 1 or 2 integer registers.
1361 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1362 cfg->exception_message = g_strdup ("unknown ret conv");
1363 cfg->disable_llvm = TRUE;
1367 for (i = 0; i < n; ++i) {
1368 ainfo = cinfo->args + i;
1370 linfo->args [i].storage = LLVMArgNone;
1372 switch (ainfo->storage) {
1373 case RegTypeGeneral:
1374 case RegTypeIRegPair:
1376 linfo->args [i].storage = LLVMArgInIReg;
1379 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1380 cfg->disable_llvm = TRUE;
1390 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1393 MonoMethodSignature *sig;
1397 sig = call->signature;
1398 n = sig->param_count + sig->hasthis;
1400 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1402 for (i = 0; i < n; ++i) {
1403 ArgInfo *ainfo = cinfo->args + i;
1406 if (i >= sig->hasthis)
1407 t = sig->params [i - sig->hasthis];
1409 t = &mono_defaults.int_class->byval_arg;
1410 t = mini_type_get_underlying_type (NULL, t);
1412 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1413 /* Emit the signature cookie just before the implicit arguments */
1414 emit_sig_cookie (cfg, call, cinfo);
1417 in = call->args [i];
1419 switch (ainfo->storage) {
1420 case RegTypeGeneral:
1421 case RegTypeIRegPair:
1422 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1423 MONO_INST_NEW (cfg, ins, OP_MOVE);
1424 ins->dreg = mono_alloc_ireg (cfg);
1425 ins->sreg1 = in->dreg + 1;
1426 MONO_ADD_INS (cfg->cbb, ins);
1427 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1429 MONO_INST_NEW (cfg, ins, OP_MOVE);
1430 ins->dreg = mono_alloc_ireg (cfg);
1431 ins->sreg1 = in->dreg + 2;
1432 MONO_ADD_INS (cfg->cbb, ins);
1433 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1434 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1435 #ifndef MONO_ARCH_SOFT_FLOAT
1439 if (ainfo->size == 4) {
1440 #ifdef MONO_ARCH_SOFT_FLOAT
1441 /* mono_emit_call_args () have already done the r8->r4 conversion */
1442 /* The converted value is in an int vreg */
1443 MONO_INST_NEW (cfg, ins, OP_MOVE);
1444 ins->dreg = mono_alloc_ireg (cfg);
1445 ins->sreg1 = in->dreg;
1446 MONO_ADD_INS (cfg->cbb, ins);
1447 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1449 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1450 creg = mono_alloc_ireg (cfg);
1451 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1452 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1455 #ifdef MONO_ARCH_SOFT_FLOAT
1456 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1457 ins->dreg = mono_alloc_ireg (cfg);
1458 ins->sreg1 = in->dreg;
1459 MONO_ADD_INS (cfg->cbb, ins);
1460 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1462 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1463 ins->dreg = mono_alloc_ireg (cfg);
1464 ins->sreg1 = in->dreg;
1465 MONO_ADD_INS (cfg->cbb, ins);
1466 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1468 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1469 creg = mono_alloc_ireg (cfg);
1470 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1471 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1472 creg = mono_alloc_ireg (cfg);
1473 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1474 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1477 cfg->flags |= MONO_CFG_HAS_FPOUT;
1479 MONO_INST_NEW (cfg, ins, OP_MOVE);
1480 ins->dreg = mono_alloc_ireg (cfg);
1481 ins->sreg1 = in->dreg;
1482 MONO_ADD_INS (cfg->cbb, ins);
1484 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1487 case RegTypeStructByAddr:
1490 /* FIXME: where si the data allocated? */
1491 arg->backend.reg3 = ainfo->reg;
1492 call->used_iregs |= 1 << ainfo->reg;
1493 g_assert_not_reached ();
1496 case RegTypeStructByVal:
1497 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1498 ins->opcode = OP_OUTARG_VT;
1499 ins->sreg1 = in->dreg;
1500 ins->klass = in->klass;
1501 ins->inst_p0 = call;
1502 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1503 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1504 MONO_ADD_INS (cfg->cbb, ins);
1507 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1508 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1509 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1510 if (t->type == MONO_TYPE_R8) {
1511 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1513 #ifdef MONO_ARCH_SOFT_FLOAT
1514 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1516 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1523 case RegTypeBaseGen:
1524 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1526 MONO_INST_NEW (cfg, ins, OP_MOVE);
1527 ins->dreg = mono_alloc_ireg (cfg);
1528 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1529 MONO_ADD_INS (cfg->cbb, ins);
1530 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1531 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1534 #ifdef MONO_ARCH_SOFT_FLOAT
1535 g_assert_not_reached ();
1538 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1539 creg = mono_alloc_ireg (cfg);
1540 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1541 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1542 creg = mono_alloc_ireg (cfg);
1543 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1544 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1545 cfg->flags |= MONO_CFG_HAS_FPOUT;
1547 g_assert_not_reached ();
1554 arg->backend.reg3 = ainfo->reg;
1555 /* FP args are passed in int regs */
1556 call->used_iregs |= 1 << ainfo->reg;
1557 if (ainfo->size == 8) {
1558 arg->opcode = OP_OUTARG_R8;
1559 call->used_iregs |= 1 << (ainfo->reg + 1);
1561 arg->opcode = OP_OUTARG_R4;
1564 cfg->flags |= MONO_CFG_HAS_FPOUT;
1568 g_assert_not_reached ();
1572 /* Handle the case where there are no implicit arguments */
1573 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1574 emit_sig_cookie (cfg, call, cinfo);
1576 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1579 if (cinfo->ret.storage == RegTypeStructByVal) {
1580 /* The JIT will transform this into a normal call */
1581 call->vret_in_reg = TRUE;
1583 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1584 vtarg->sreg1 = call->vret_var->dreg;
1585 vtarg->dreg = mono_alloc_preg (cfg);
1586 MONO_ADD_INS (cfg->cbb, vtarg);
1588 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1592 call->stack_usage = cinfo->stack_usage;
1598 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1600 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1601 ArgInfo *ainfo = ins->inst_p1;
1602 int ovf_size = ainfo->vtsize;
1603 int doffset = ainfo->offset;
1604 int i, soffset, dreg;
1607 for (i = 0; i < ainfo->size; ++i) {
1608 dreg = mono_alloc_ireg (cfg);
1609 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1610 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1611 soffset += sizeof (gpointer);
1613 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1615 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1619 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1621 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1624 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1627 if (COMPILE_LLVM (cfg)) {
1628 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1630 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1631 ins->sreg1 = val->dreg + 1;
1632 ins->sreg2 = val->dreg + 2;
1633 MONO_ADD_INS (cfg->cbb, ins);
1637 #ifdef MONO_ARCH_SOFT_FLOAT
1638 if (ret->type == MONO_TYPE_R8) {
1641 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1642 ins->dreg = cfg->ret->dreg;
1643 ins->sreg1 = val->dreg;
1644 MONO_ADD_INS (cfg->cbb, ins);
1647 if (ret->type == MONO_TYPE_R4) {
1648 /* Already converted to an int in method_to_ir () */
1649 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1652 #elif defined(ARM_FPU_VFP)
1653 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1656 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1657 ins->dreg = cfg->ret->dreg;
1658 ins->sreg1 = val->dreg;
1659 MONO_ADD_INS (cfg->cbb, ins);
1663 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1664 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1671 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1674 #endif /* #ifndef DISABLE_JIT */
1677 mono_arch_is_inst_imm (gint64 imm)
1682 #define DYN_CALL_STACK_ARGS 6
1685 MonoMethodSignature *sig;
1690 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1696 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1700 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1703 switch (cinfo->ret.storage) {
1705 case RegTypeGeneral:
1706 case RegTypeIRegPair:
1707 case RegTypeStructByAddr:
1712 #elif defined(ARM_FPU_VFP)
1721 for (i = 0; i < cinfo->nargs; ++i) {
1722 switch (cinfo->args [i].storage) {
1723 case RegTypeGeneral:
1725 case RegTypeIRegPair:
1728 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1731 case RegTypeStructByVal:
1732 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1740 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1741 for (i = 0; i < sig->param_count; ++i) {
1742 MonoType *t = sig->params [i];
1750 #ifdef MONO_ARCH_SOFT_FLOAT
1769 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1771 ArchDynCallInfo *info;
1774 cinfo = get_call_info (NULL, sig, FALSE);
1776 if (!dyn_call_supported (cinfo, sig)) {
1781 info = g_new0 (ArchDynCallInfo, 1);
1782 // FIXME: Preprocess the info to speed up start_dyn_call ()
1784 info->cinfo = cinfo;
1786 return (MonoDynCallInfo*)info;
1790 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1792 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1794 g_free (ainfo->cinfo);
1799 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1801 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1802 DynCallArgs *p = (DynCallArgs*)buf;
1803 int arg_index, greg, i, j;
1804 MonoMethodSignature *sig = dinfo->sig;
1806 g_assert (buf_len >= sizeof (DynCallArgs));
1814 if (dinfo->cinfo->vtype_retaddr)
1815 p->regs [greg ++] = (mgreg_t)ret;
1818 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1820 for (i = 0; i < sig->param_count; i++) {
1821 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1822 gpointer *arg = args [arg_index ++];
1823 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1826 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1828 else if (ainfo->storage == RegTypeBase)
1829 slot = PARAM_REGS + (ainfo->offset / 4);
1831 g_assert_not_reached ();
1834 p->regs [slot] = (mgreg_t)*arg;
1839 case MONO_TYPE_STRING:
1840 case MONO_TYPE_CLASS:
1841 case MONO_TYPE_ARRAY:
1842 case MONO_TYPE_SZARRAY:
1843 case MONO_TYPE_OBJECT:
1847 p->regs [slot] = (mgreg_t)*arg;
1849 case MONO_TYPE_BOOLEAN:
1851 p->regs [slot] = *(guint8*)arg;
1854 p->regs [slot] = *(gint8*)arg;
1857 p->regs [slot] = *(gint16*)arg;
1860 case MONO_TYPE_CHAR:
1861 p->regs [slot] = *(guint16*)arg;
1864 p->regs [slot] = *(gint32*)arg;
1867 p->regs [slot] = *(guint32*)arg;
1871 p->regs [slot ++] = (mgreg_t)arg [0];
1872 p->regs [slot] = (mgreg_t)arg [1];
1875 p->regs [slot] = *(mgreg_t*)arg;
1878 p->regs [slot ++] = (mgreg_t)arg [0];
1879 p->regs [slot] = (mgreg_t)arg [1];
1881 case MONO_TYPE_GENERICINST:
1882 if (MONO_TYPE_IS_REFERENCE (t)) {
1883 p->regs [slot] = (mgreg_t)*arg;
1888 case MONO_TYPE_VALUETYPE:
1889 g_assert (ainfo->storage == RegTypeStructByVal);
1891 if (ainfo->size == 0)
1892 slot = PARAM_REGS + (ainfo->offset / 4);
1896 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1897 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1900 g_assert_not_reached ();
1906 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1908 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1909 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1910 guint8 *ret = ((DynCallArgs*)buf)->ret;
1911 mgreg_t res = ((DynCallArgs*)buf)->res;
1912 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1914 switch (mono_type_get_underlying_type (sig->ret)->type) {
1915 case MONO_TYPE_VOID:
1916 *(gpointer*)ret = NULL;
1918 case MONO_TYPE_STRING:
1919 case MONO_TYPE_CLASS:
1920 case MONO_TYPE_ARRAY:
1921 case MONO_TYPE_SZARRAY:
1922 case MONO_TYPE_OBJECT:
1926 *(gpointer*)ret = (gpointer)res;
1932 case MONO_TYPE_BOOLEAN:
1933 *(guint8*)ret = res;
1936 *(gint16*)ret = res;
1939 case MONO_TYPE_CHAR:
1940 *(guint16*)ret = res;
1943 *(gint32*)ret = res;
1946 *(guint32*)ret = res;
1950 /* This handles endianness as well */
1951 ((gint32*)ret) [0] = res;
1952 ((gint32*)ret) [1] = res2;
1954 case MONO_TYPE_GENERICINST:
1955 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1956 *(gpointer*)ret = (gpointer)res;
1961 case MONO_TYPE_VALUETYPE:
1962 g_assert (ainfo->cinfo->vtype_retaddr);
1965 #if defined(ARM_FPU_VFP)
1967 *(float*)ret = *(float*)&res;
1969 case MONO_TYPE_R8: {
1975 *(double*)ret = *(double*)®s;
1980 g_assert_not_reached ();
1987 * Allow tracing to work with this interface (with an optional argument)
1991 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
1995 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
1996 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
1997 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
1998 code = emit_call_reg (code, ARMREG_R2);
2011 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2014 int save_mode = SAVE_NONE;
2016 MonoMethod *method = cfg->method;
2017 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2018 int save_offset = cfg->param_area;
2022 offset = code - cfg->native_code;
2023 /* we need about 16 instructions */
2024 if (offset > (cfg->code_size - 16 * 4)) {
2025 cfg->code_size *= 2;
2026 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2027 code = cfg->native_code + offset;
2030 case MONO_TYPE_VOID:
2031 /* special case string .ctor icall */
2032 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2033 save_mode = SAVE_ONE;
2035 save_mode = SAVE_NONE;
2039 save_mode = SAVE_TWO;
2043 save_mode = SAVE_FP;
2045 case MONO_TYPE_VALUETYPE:
2046 save_mode = SAVE_STRUCT;
2049 save_mode = SAVE_ONE;
2053 switch (save_mode) {
2055 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2056 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2057 if (enable_arguments) {
2058 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2059 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2063 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2064 if (enable_arguments) {
2065 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2069 /* FIXME: what reg? */
2070 if (enable_arguments) {
2071 /* FIXME: what reg? */
2075 if (enable_arguments) {
2076 /* FIXME: get the actual address */
2077 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2085 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2086 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2087 code = emit_call_reg (code, ARMREG_IP);
2089 switch (save_mode) {
2091 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2092 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2095 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2109 * The immediate field for cond branches is big enough for all reasonable methods
2111 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2112 if (0 && ins->inst_true_bb->native_offset) { \
2113 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2115 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2116 ARM_B_COND (code, (condcode), 0); \
2119 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2121 /* emit an exception if condition is fail
2123 * We assign the extra code used to throw the implicit exceptions
2124 * to cfg->bb_exit as far as the big branch handling is concerned
2126 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2128 mono_add_patch_info (cfg, code - cfg->native_code, \
2129 MONO_PATCH_INFO_EXC, exc_name); \
2130 ARM_BL_COND (code, (condcode), 0); \
2133 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2136 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2141 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2143 MonoInst *ins, *n, *last_ins = NULL;
2145 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2146 switch (ins->opcode) {
2149 /* Already done by an arch-independent pass */
2151 case OP_LOAD_MEMBASE:
2152 case OP_LOADI4_MEMBASE:
2154 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2155 * OP_LOAD_MEMBASE offset(basereg), reg
2157 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2158 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2159 ins->inst_basereg == last_ins->inst_destbasereg &&
2160 ins->inst_offset == last_ins->inst_offset) {
2161 if (ins->dreg == last_ins->sreg1) {
2162 MONO_DELETE_INS (bb, ins);
2165 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2166 ins->opcode = OP_MOVE;
2167 ins->sreg1 = last_ins->sreg1;
2171 * Note: reg1 must be different from the basereg in the second load
2172 * OP_LOAD_MEMBASE offset(basereg), reg1
2173 * OP_LOAD_MEMBASE offset(basereg), reg2
2175 * OP_LOAD_MEMBASE offset(basereg), reg1
2176 * OP_MOVE reg1, reg2
2178 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2179 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2180 ins->inst_basereg != last_ins->dreg &&
2181 ins->inst_basereg == last_ins->inst_basereg &&
2182 ins->inst_offset == last_ins->inst_offset) {
2184 if (ins->dreg == last_ins->dreg) {
2185 MONO_DELETE_INS (bb, ins);
2188 ins->opcode = OP_MOVE;
2189 ins->sreg1 = last_ins->dreg;
2192 //g_assert_not_reached ();
2196 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2197 * OP_LOAD_MEMBASE offset(basereg), reg
2199 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2200 * OP_ICONST reg, imm
2202 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2203 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2204 ins->inst_basereg == last_ins->inst_destbasereg &&
2205 ins->inst_offset == last_ins->inst_offset) {
2206 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2207 ins->opcode = OP_ICONST;
2208 ins->inst_c0 = last_ins->inst_imm;
2209 g_assert_not_reached (); // check this rule
2213 case OP_LOADU1_MEMBASE:
2214 case OP_LOADI1_MEMBASE:
2215 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2216 ins->inst_basereg == last_ins->inst_destbasereg &&
2217 ins->inst_offset == last_ins->inst_offset) {
2218 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2219 ins->sreg1 = last_ins->sreg1;
2222 case OP_LOADU2_MEMBASE:
2223 case OP_LOADI2_MEMBASE:
2224 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2225 ins->inst_basereg == last_ins->inst_destbasereg &&
2226 ins->inst_offset == last_ins->inst_offset) {
2227 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2228 ins->sreg1 = last_ins->sreg1;
2232 ins->opcode = OP_MOVE;
2236 if (ins->dreg == ins->sreg1) {
2237 MONO_DELETE_INS (bb, ins);
2241 * OP_MOVE sreg, dreg
2242 * OP_MOVE dreg, sreg
2244 if (last_ins && last_ins->opcode == OP_MOVE &&
2245 ins->sreg1 == last_ins->dreg &&
2246 ins->dreg == last_ins->sreg1) {
2247 MONO_DELETE_INS (bb, ins);
2255 bb->last_ins = last_ins;
2259 * the branch_cc_table should maintain the order of these
2273 branch_cc_table [] = {
2287 #define NEW_INS(cfg,dest,op) do { \
2288 MONO_INST_NEW ((cfg), (dest), (op)); \
2289 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2293 map_to_reg_reg_op (int op)
2302 case OP_COMPARE_IMM:
2304 case OP_ICOMPARE_IMM:
2318 case OP_LOAD_MEMBASE:
2319 return OP_LOAD_MEMINDEX;
2320 case OP_LOADI4_MEMBASE:
2321 return OP_LOADI4_MEMINDEX;
2322 case OP_LOADU4_MEMBASE:
2323 return OP_LOADU4_MEMINDEX;
2324 case OP_LOADU1_MEMBASE:
2325 return OP_LOADU1_MEMINDEX;
2326 case OP_LOADI2_MEMBASE:
2327 return OP_LOADI2_MEMINDEX;
2328 case OP_LOADU2_MEMBASE:
2329 return OP_LOADU2_MEMINDEX;
2330 case OP_LOADI1_MEMBASE:
2331 return OP_LOADI1_MEMINDEX;
2332 case OP_STOREI1_MEMBASE_REG:
2333 return OP_STOREI1_MEMINDEX;
2334 case OP_STOREI2_MEMBASE_REG:
2335 return OP_STOREI2_MEMINDEX;
2336 case OP_STOREI4_MEMBASE_REG:
2337 return OP_STOREI4_MEMINDEX;
2338 case OP_STORE_MEMBASE_REG:
2339 return OP_STORE_MEMINDEX;
2340 case OP_STORER4_MEMBASE_REG:
2341 return OP_STORER4_MEMINDEX;
2342 case OP_STORER8_MEMBASE_REG:
2343 return OP_STORER8_MEMINDEX;
2344 case OP_STORE_MEMBASE_IMM:
2345 return OP_STORE_MEMBASE_REG;
2346 case OP_STOREI1_MEMBASE_IMM:
2347 return OP_STOREI1_MEMBASE_REG;
2348 case OP_STOREI2_MEMBASE_IMM:
2349 return OP_STOREI2_MEMBASE_REG;
2350 case OP_STOREI4_MEMBASE_IMM:
2351 return OP_STOREI4_MEMBASE_REG;
2353 g_assert_not_reached ();
2357 * Remove from the instruction list the instructions that can't be
2358 * represented with very simple instructions with no register
2362 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2364 MonoInst *ins, *temp, *last_ins = NULL;
2365 int rot_amount, imm8, low_imm;
2367 MONO_BB_FOR_EACH_INS (bb, ins) {
2369 switch (ins->opcode) {
2373 case OP_COMPARE_IMM:
2374 case OP_ICOMPARE_IMM:
2388 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2389 NEW_INS (cfg, temp, OP_ICONST);
2390 temp->inst_c0 = ins->inst_imm;
2391 temp->dreg = mono_alloc_ireg (cfg);
2392 ins->sreg2 = temp->dreg;
2393 ins->opcode = mono_op_imm_to_op (ins->opcode);
2395 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2401 if (ins->inst_imm == 1) {
2402 ins->opcode = OP_MOVE;
2405 if (ins->inst_imm == 0) {
2406 ins->opcode = OP_ICONST;
2410 imm8 = mono_is_power_of_two (ins->inst_imm);
2412 ins->opcode = OP_SHL_IMM;
2413 ins->inst_imm = imm8;
2416 NEW_INS (cfg, temp, OP_ICONST);
2417 temp->inst_c0 = ins->inst_imm;
2418 temp->dreg = mono_alloc_ireg (cfg);
2419 ins->sreg2 = temp->dreg;
2420 ins->opcode = OP_IMUL;
2426 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2427 /* ARM sets the C flag to 1 if there was _no_ overflow */
2428 ins->next->opcode = OP_COND_EXC_NC;
2430 case OP_LOCALLOC_IMM:
2431 NEW_INS (cfg, temp, OP_ICONST);
2432 temp->inst_c0 = ins->inst_imm;
2433 temp->dreg = mono_alloc_ireg (cfg);
2434 ins->sreg1 = temp->dreg;
2435 ins->opcode = OP_LOCALLOC;
2437 case OP_LOAD_MEMBASE:
2438 case OP_LOADI4_MEMBASE:
2439 case OP_LOADU4_MEMBASE:
2440 case OP_LOADU1_MEMBASE:
2441 /* we can do two things: load the immed in a register
2442 * and use an indexed load, or see if the immed can be
2443 * represented as an ad_imm + a load with a smaller offset
2444 * that fits. We just do the first for now, optimize later.
2446 if (arm_is_imm12 (ins->inst_offset))
2448 NEW_INS (cfg, temp, OP_ICONST);
2449 temp->inst_c0 = ins->inst_offset;
2450 temp->dreg = mono_alloc_ireg (cfg);
2451 ins->sreg2 = temp->dreg;
2452 ins->opcode = map_to_reg_reg_op (ins->opcode);
2454 case OP_LOADI2_MEMBASE:
2455 case OP_LOADU2_MEMBASE:
2456 case OP_LOADI1_MEMBASE:
2457 if (arm_is_imm8 (ins->inst_offset))
2459 NEW_INS (cfg, temp, OP_ICONST);
2460 temp->inst_c0 = ins->inst_offset;
2461 temp->dreg = mono_alloc_ireg (cfg);
2462 ins->sreg2 = temp->dreg;
2463 ins->opcode = map_to_reg_reg_op (ins->opcode);
2465 case OP_LOADR4_MEMBASE:
2466 case OP_LOADR8_MEMBASE:
2467 if (arm_is_fpimm8 (ins->inst_offset))
2469 low_imm = ins->inst_offset & 0x1ff;
2470 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2471 NEW_INS (cfg, temp, OP_ADD_IMM);
2472 temp->inst_imm = ins->inst_offset & ~0x1ff;
2473 temp->sreg1 = ins->inst_basereg;
2474 temp->dreg = mono_alloc_ireg (cfg);
2475 ins->inst_basereg = temp->dreg;
2476 ins->inst_offset = low_imm;
2479 /* VFP/FPA doesn't have indexed load instructions */
2480 g_assert_not_reached ();
2482 case OP_STORE_MEMBASE_REG:
2483 case OP_STOREI4_MEMBASE_REG:
2484 case OP_STOREI1_MEMBASE_REG:
2485 if (arm_is_imm12 (ins->inst_offset))
2487 NEW_INS (cfg, temp, OP_ICONST);
2488 temp->inst_c0 = ins->inst_offset;
2489 temp->dreg = mono_alloc_ireg (cfg);
2490 ins->sreg2 = temp->dreg;
2491 ins->opcode = map_to_reg_reg_op (ins->opcode);
2493 case OP_STOREI2_MEMBASE_REG:
2494 if (arm_is_imm8 (ins->inst_offset))
2496 NEW_INS (cfg, temp, OP_ICONST);
2497 temp->inst_c0 = ins->inst_offset;
2498 temp->dreg = mono_alloc_ireg (cfg);
2499 ins->sreg2 = temp->dreg;
2500 ins->opcode = map_to_reg_reg_op (ins->opcode);
2502 case OP_STORER4_MEMBASE_REG:
2503 case OP_STORER8_MEMBASE_REG:
2504 if (arm_is_fpimm8 (ins->inst_offset))
2506 low_imm = ins->inst_offset & 0x1ff;
2507 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2508 NEW_INS (cfg, temp, OP_ADD_IMM);
2509 temp->inst_imm = ins->inst_offset & ~0x1ff;
2510 temp->sreg1 = ins->inst_destbasereg;
2511 temp->dreg = mono_alloc_ireg (cfg);
2512 ins->inst_destbasereg = temp->dreg;
2513 ins->inst_offset = low_imm;
2516 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2517 /* VFP/FPA doesn't have indexed store instructions */
2518 g_assert_not_reached ();
2520 case OP_STORE_MEMBASE_IMM:
2521 case OP_STOREI1_MEMBASE_IMM:
2522 case OP_STOREI2_MEMBASE_IMM:
2523 case OP_STOREI4_MEMBASE_IMM:
2524 NEW_INS (cfg, temp, OP_ICONST);
2525 temp->inst_c0 = ins->inst_imm;
2526 temp->dreg = mono_alloc_ireg (cfg);
2527 ins->sreg1 = temp->dreg;
2528 ins->opcode = map_to_reg_reg_op (ins->opcode);
2530 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2532 gboolean swap = FALSE;
2536 /* Optimized away */
2541 /* Some fp compares require swapped operands */
2542 switch (ins->next->opcode) {
2544 ins->next->opcode = OP_FBLT;
2548 ins->next->opcode = OP_FBLT_UN;
2552 ins->next->opcode = OP_FBGE;
2556 ins->next->opcode = OP_FBGE_UN;
2564 ins->sreg1 = ins->sreg2;
2573 bb->last_ins = last_ins;
2574 bb->max_vreg = cfg->next_vreg;
2578 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2582 if (long_ins->opcode == OP_LNEG) {
2584 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2585 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2591 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2593 /* sreg is a float, dreg is an integer reg */
2595 ARM_FIXZ (code, dreg, sreg);
2596 #elif defined(ARM_FPU_VFP)
2598 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2600 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2601 ARM_FMRS (code, dreg, ARM_VFP_F0);
2605 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2606 else if (size == 2) {
2607 ARM_SHL_IMM (code, dreg, dreg, 16);
2608 ARM_SHR_IMM (code, dreg, dreg, 16);
2612 ARM_SHL_IMM (code, dreg, dreg, 24);
2613 ARM_SAR_IMM (code, dreg, dreg, 24);
2614 } else if (size == 2) {
2615 ARM_SHL_IMM (code, dreg, dreg, 16);
2616 ARM_SAR_IMM (code, dreg, dreg, 16);
2622 #endif /* #ifndef DISABLE_JIT */
2626 const guchar *target;
2631 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2634 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2635 PatchData *pdata = (PatchData*)user_data;
2636 guchar *code = data;
2637 guint32 *thunks = data;
2638 guint32 *endthunks = (guint32*)(code + bsize);
2640 int difflow, diffhigh;
2642 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2643 difflow = (char*)pdata->code - (char*)thunks;
2644 diffhigh = (char*)pdata->code - (char*)endthunks;
2645 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2649 * The thunk is composed of 3 words:
2650 * load constant from thunks [2] into ARM_IP
2653 * Note that the LR register is already setup
2655 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2656 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2657 while (thunks < endthunks) {
2658 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2659 if (thunks [2] == (guint32)pdata->target) {
2660 arm_patch (pdata->code, (guchar*)thunks);
2661 mono_arch_flush_icache (pdata->code, 4);
2664 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2665 /* found a free slot instead: emit thunk */
2666 /* ARMREG_IP is fine to use since this can't be an IMT call
2669 code = (guchar*)thunks;
2670 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2671 if (thumb_supported)
2672 ARM_BX (code, ARMREG_IP);
2674 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2675 thunks [2] = (guint32)pdata->target;
2676 mono_arch_flush_icache ((guchar*)thunks, 12);
2678 arm_patch (pdata->code, (guchar*)thunks);
2679 mono_arch_flush_icache (pdata->code, 4);
2683 /* skip 12 bytes, the size of the thunk */
2687 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2693 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2698 domain = mono_domain_get ();
2701 pdata.target = target;
2702 pdata.absolute = absolute;
2705 mono_domain_lock (domain);
2706 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2709 /* this uses the first available slot */
2711 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2713 mono_domain_unlock (domain);
2715 if (pdata.found != 1)
2716 g_print ("thunk failed for %p from %p\n", target, code);
2717 g_assert (pdata.found == 1);
2721 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2723 guint32 *code32 = (void*)code;
2724 guint32 ins = *code32;
2725 guint32 prim = (ins >> 25) & 7;
2726 guint32 tval = GPOINTER_TO_UINT (target);
2728 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2729 if (prim == 5) { /* 101b */
2730 /* the diff starts 8 bytes from the branch opcode */
2731 gint diff = target - code - 8;
2733 gint tmask = 0xffffffff;
2734 if (tval & 1) { /* entering thumb mode */
2735 diff = target - 1 - code - 8;
2736 g_assert (thumb_supported);
2737 tbits = 0xf << 28; /* bl->blx bit pattern */
2738 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2739 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2743 tmask = ~(1 << 24); /* clear the link bit */
2744 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2749 if (diff <= 33554431) {
2751 ins = (ins & 0xff000000) | diff;
2753 *code32 = ins | tbits;
2757 /* diff between 0 and -33554432 */
2758 if (diff >= -33554432) {
2760 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2762 *code32 = ins | tbits;
2767 handle_thunk (domain, TRUE, code, target);
2772 * The alternative call sequences looks like this:
2774 * ldr ip, [pc] // loads the address constant
2775 * b 1f // jumps around the constant
2776 * address constant embedded in the code
2781 * There are two cases for patching:
2782 * a) at the end of method emission: in this case code points to the start
2783 * of the call sequence
2784 * b) during runtime patching of the call site: in this case code points
2785 * to the mov pc, ip instruction
2787 * We have to handle also the thunk jump code sequence:
2791 * address constant // execution never reaches here
2793 if ((ins & 0x0ffffff0) == 0x12fff10) {
2794 /* Branch and exchange: the address is constructed in a reg
2795 * We can patch BX when the code sequence is the following:
2796 * ldr ip, [pc, #0] ; 0x8
2803 guint8 *emit = (guint8*)ccode;
2804 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2806 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2807 ARM_BX (emit, ARMREG_IP);
2809 /*patching from magic trampoline*/
2810 if (ins == ccode [3]) {
2811 g_assert (code32 [-4] == ccode [0]);
2812 g_assert (code32 [-3] == ccode [1]);
2813 g_assert (code32 [-1] == ccode [2]);
2814 code32 [-2] = (guint32)target;
2817 /*patching from JIT*/
2818 if (ins == ccode [0]) {
2819 g_assert (code32 [1] == ccode [1]);
2820 g_assert (code32 [3] == ccode [2]);
2821 g_assert (code32 [4] == ccode [3]);
2822 code32 [2] = (guint32)target;
2825 g_assert_not_reached ();
2826 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2834 guint8 *emit = (guint8*)ccode;
2835 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2837 ARM_BLX_REG (emit, ARMREG_IP);
2839 g_assert (code32 [-3] == ccode [0]);
2840 g_assert (code32 [-2] == ccode [1]);
2841 g_assert (code32 [0] == ccode [2]);
2843 code32 [-1] = (guint32)target;
2846 guint32 *tmp = ccode;
2847 guint8 *emit = (guint8*)tmp;
2848 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2849 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2850 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2851 ARM_BX (emit, ARMREG_IP);
2852 if (ins == ccode [2]) {
2853 g_assert_not_reached (); // should be -2 ...
2854 code32 [-1] = (guint32)target;
2857 if (ins == ccode [0]) {
2858 /* handles both thunk jump code and the far call sequence */
2859 code32 [2] = (guint32)target;
2862 g_assert_not_reached ();
2864 // g_print ("patched with 0x%08x\n", ins);
2868 arm_patch (guchar *code, const guchar *target)
2870 arm_patch_general (NULL, code, target);
2874 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2875 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2876 * to be used with the emit macros.
2877 * Return -1 otherwise.
2880 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2883 for (i = 0; i < 31; i+= 2) {
2884 res = (val << (32 - i)) | (val >> i);
2887 *rot_amount = i? 32 - i: 0;
2894 * Emits in code a sequence of instructions that load the value 'val'
2895 * into the dreg register. Uses at most 4 instructions.
2898 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2900 int imm8, rot_amount;
2902 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2903 /* skip the constant pool */
2909 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2910 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2911 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2912 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2915 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2917 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2921 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2923 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2925 if (val & 0xFF0000) {
2926 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2928 if (val & 0xFF000000) {
2929 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2931 } else if (val & 0xFF00) {
2932 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2933 if (val & 0xFF0000) {
2934 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2936 if (val & 0xFF000000) {
2937 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2939 } else if (val & 0xFF0000) {
2940 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2941 if (val & 0xFF000000) {
2942 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2945 //g_assert_not_reached ();
2951 mono_arm_thumb_supported (void)
2953 return thumb_supported;
2959 * emit_load_volatile_arguments:
2961 * Load volatile arguments from the stack to the original input registers.
2962 * Required before a tail call.
2965 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2967 MonoMethod *method = cfg->method;
2968 MonoMethodSignature *sig;
2973 /* FIXME: Generate intermediate code instead */
2975 sig = mono_method_signature (method);
2977 /* This is the opposite of the code in emit_prolog */
2981 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2983 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2984 ArgInfo *ainfo = &cinfo->ret;
2985 inst = cfg->vret_addr;
2986 g_assert (arm_is_imm12 (inst->inst_offset));
2987 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2989 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2990 ArgInfo *ainfo = cinfo->args + i;
2991 inst = cfg->args [pos];
2993 if (cfg->verbose_level > 2)
2994 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
2995 if (inst->opcode == OP_REGVAR) {
2996 if (ainfo->storage == RegTypeGeneral)
2997 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
2998 else if (ainfo->storage == RegTypeFP) {
2999 g_assert_not_reached ();
3000 } else if (ainfo->storage == RegTypeBase) {
3004 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3005 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3007 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3008 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3012 g_assert_not_reached ();
3014 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3015 switch (ainfo->size) {
3022 g_assert (arm_is_imm12 (inst->inst_offset));
3023 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3024 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3025 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3028 if (arm_is_imm12 (inst->inst_offset)) {
3029 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3031 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3032 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3036 } else if (ainfo->storage == RegTypeBaseGen) {
3039 } else if (ainfo->storage == RegTypeBase) {
3041 } else if (ainfo->storage == RegTypeFP) {
3042 g_assert_not_reached ();
3043 } else if (ainfo->storage == RegTypeStructByVal) {
3044 int doffset = inst->inst_offset;
3048 if (mono_class_from_mono_type (inst->inst_vtype))
3049 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3050 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3051 if (arm_is_imm12 (doffset)) {
3052 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3054 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3055 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3057 soffset += sizeof (gpointer);
3058 doffset += sizeof (gpointer);
3063 } else if (ainfo->storage == RegTypeStructByAddr) {
3078 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3083 guint8 *code = cfg->native_code + cfg->code_len;
3084 MonoInst *last_ins = NULL;
3085 guint last_offset = 0;
3087 int imm8, rot_amount;
3089 /* we don't align basic blocks of loops on arm */
3091 if (cfg->verbose_level > 2)
3092 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3094 cpos = bb->max_offset;
3096 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3097 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3098 //g_assert (!mono_compile_aot);
3101 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3102 /* this is not thread save, but good enough */
3103 /* fixme: howto handle overflows? */
3104 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3107 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3108 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3109 (gpointer)"mono_break");
3110 code = emit_call_seq (cfg, code);
3113 MONO_BB_FOR_EACH_INS (bb, ins) {
3114 offset = code - cfg->native_code;
3116 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3118 if (offset > (cfg->code_size - max_len - 16)) {
3119 cfg->code_size *= 2;
3120 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3121 code = cfg->native_code + offset;
3123 // if (ins->cil_code)
3124 // g_print ("cil code\n");
3125 mono_debug_record_line_number (cfg, ins, offset);
3127 switch (ins->opcode) {
3128 case OP_MEMORY_BARRIER:
3131 #ifdef HAVE_AEABI_READ_TP
3132 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3133 (gpointer)"__aeabi_read_tp");
3134 code = emit_call_seq (cfg, code);
3136 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3138 g_assert_not_reached ();
3142 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3143 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3146 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3147 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3149 case OP_STOREI1_MEMBASE_IMM:
3150 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3151 g_assert (arm_is_imm12 (ins->inst_offset));
3152 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3154 case OP_STOREI2_MEMBASE_IMM:
3155 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3156 g_assert (arm_is_imm8 (ins->inst_offset));
3157 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3159 case OP_STORE_MEMBASE_IMM:
3160 case OP_STOREI4_MEMBASE_IMM:
3161 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3162 g_assert (arm_is_imm12 (ins->inst_offset));
3163 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3165 case OP_STOREI1_MEMBASE_REG:
3166 g_assert (arm_is_imm12 (ins->inst_offset));
3167 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3169 case OP_STOREI2_MEMBASE_REG:
3170 g_assert (arm_is_imm8 (ins->inst_offset));
3171 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3173 case OP_STORE_MEMBASE_REG:
3174 case OP_STOREI4_MEMBASE_REG:
3175 /* this case is special, since it happens for spill code after lowering has been called */
3176 if (arm_is_imm12 (ins->inst_offset)) {
3177 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3179 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3180 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3183 case OP_STOREI1_MEMINDEX:
3184 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3186 case OP_STOREI2_MEMINDEX:
3187 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3189 case OP_STORE_MEMINDEX:
3190 case OP_STOREI4_MEMINDEX:
3191 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3194 g_assert_not_reached ();
3196 case OP_LOAD_MEMINDEX:
3197 case OP_LOADI4_MEMINDEX:
3198 case OP_LOADU4_MEMINDEX:
3199 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3201 case OP_LOADI1_MEMINDEX:
3202 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3204 case OP_LOADU1_MEMINDEX:
3205 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3207 case OP_LOADI2_MEMINDEX:
3208 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3210 case OP_LOADU2_MEMINDEX:
3211 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3213 case OP_LOAD_MEMBASE:
3214 case OP_LOADI4_MEMBASE:
3215 case OP_LOADU4_MEMBASE:
3216 /* this case is special, since it happens for spill code after lowering has been called */
3217 if (arm_is_imm12 (ins->inst_offset)) {
3218 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3220 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3221 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3224 case OP_LOADI1_MEMBASE:
3225 g_assert (arm_is_imm8 (ins->inst_offset));
3226 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3228 case OP_LOADU1_MEMBASE:
3229 g_assert (arm_is_imm12 (ins->inst_offset));
3230 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3232 case OP_LOADU2_MEMBASE:
3233 g_assert (arm_is_imm8 (ins->inst_offset));
3234 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3236 case OP_LOADI2_MEMBASE:
3237 g_assert (arm_is_imm8 (ins->inst_offset));
3238 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3240 case OP_ICONV_TO_I1:
3241 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3242 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3244 case OP_ICONV_TO_I2:
3245 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3246 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3248 case OP_ICONV_TO_U1:
3249 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3251 case OP_ICONV_TO_U2:
3252 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3253 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3257 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3259 case OP_COMPARE_IMM:
3260 case OP_ICOMPARE_IMM:
3261 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3262 g_assert (imm8 >= 0);
3263 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3267 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3268 * So instead of emitting a trap, we emit a call a C function and place a
3271 //*(int*)code = 0xef9f0001;
3274 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3275 (gpointer)"mono_break");
3276 code = emit_call_seq (cfg, code);
3278 case OP_RELAXED_NOP:
3283 case OP_DUMMY_STORE:
3284 case OP_NOT_REACHED:
3287 case OP_SEQ_POINT: {
3289 MonoInst *info_var = cfg->arch.seq_point_info_var;
3290 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3292 int dreg = ARMREG_LR;
3295 * For AOT, we use one got slot per method, which will point to a
3296 * SeqPointInfo structure, containing all the information required
3297 * by the code below.
3299 if (cfg->compile_aot) {
3300 g_assert (info_var);
3301 g_assert (info_var->opcode == OP_REGOFFSET);
3302 g_assert (arm_is_imm12 (info_var->inst_offset));
3306 * Read from the single stepping trigger page. This will cause a
3307 * SIGSEGV when single stepping is enabled.
3308 * We do this _before_ the breakpoint, so single stepping after
3309 * a breakpoint is hit will step to the next IL offset.
3311 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3313 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3314 if (cfg->compile_aot) {
3315 /* Load the trigger page addr from the variable initialized in the prolog */
3316 var = ss_trigger_page_var;
3318 g_assert (var->opcode == OP_REGOFFSET);
3319 g_assert (arm_is_imm12 (var->inst_offset));
3320 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3322 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3324 *(int*)code = (int)ss_trigger_page;
3327 ARM_LDR_IMM (code, dreg, dreg, 0);
3330 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3332 if (cfg->compile_aot) {
3333 guint32 offset = code - cfg->native_code;
3336 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3337 /* Add the offset */
3338 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3339 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3341 * Have to emit nops to keep the difference between the offset
3342 * stored in seq_points and breakpoint instruction constant,
3343 * mono_arch_get_ip_for_breakpoint () depends on this.
3346 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3350 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3353 g_assert (!(val & 0xFF000000));
3354 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3355 ARM_LDR_IMM (code, dreg, dreg, 0);
3357 /* What is faster, a branch or a load ? */
3358 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3359 /* The breakpoint instruction */
3360 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3363 * A placeholder for a possible breakpoint inserted by
3364 * mono_arch_set_breakpoint ().
3366 for (i = 0; i < 4; ++i)
3373 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3376 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3380 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3383 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3384 g_assert (imm8 >= 0);
3385 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3389 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3390 g_assert (imm8 >= 0);
3391 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3395 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3396 g_assert (imm8 >= 0);
3397 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3400 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3401 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3403 case OP_IADD_OVF_UN:
3404 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3405 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3408 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3409 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3411 case OP_ISUB_OVF_UN:
3412 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3413 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3415 case OP_ADD_OVF_CARRY:
3416 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3417 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3419 case OP_ADD_OVF_UN_CARRY:
3420 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3421 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3423 case OP_SUB_OVF_CARRY:
3424 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3425 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3427 case OP_SUB_OVF_UN_CARRY:
3428 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3429 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3433 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3436 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3437 g_assert (imm8 >= 0);
3438 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3441 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3445 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3449 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3450 g_assert (imm8 >= 0);
3451 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3455 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3456 g_assert (imm8 >= 0);
3457 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3459 case OP_ARM_RSBS_IMM:
3460 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3461 g_assert (imm8 >= 0);
3462 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 case OP_ARM_RSC_IMM:
3465 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3466 g_assert (imm8 >= 0);
3467 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3470 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3474 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3475 g_assert (imm8 >= 0);
3476 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3484 /* crappy ARM arch doesn't have a DIV instruction */
3485 g_assert_not_reached ();
3487 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3491 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3492 g_assert (imm8 >= 0);
3493 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3496 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3500 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3501 g_assert (imm8 >= 0);
3502 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3505 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3510 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3511 else if (ins->dreg != ins->sreg1)
3512 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3515 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3520 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3521 else if (ins->dreg != ins->sreg1)
3522 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3525 case OP_ISHR_UN_IMM:
3527 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3528 else if (ins->dreg != ins->sreg1)
3529 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3532 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3535 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3538 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3541 if (ins->dreg == ins->sreg2)
3542 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3544 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3547 g_assert_not_reached ();
3550 /* FIXME: handle ovf/ sreg2 != dreg */
3551 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3552 /* FIXME: MUL doesn't set the C/O flags on ARM */
3554 case OP_IMUL_OVF_UN:
3555 /* FIXME: handle ovf/ sreg2 != dreg */
3556 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3557 /* FIXME: MUL doesn't set the C/O flags on ARM */
3560 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3563 /* Load the GOT offset */
3564 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3565 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3567 *(gpointer*)code = NULL;
3569 /* Load the value from the GOT */
3570 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3572 case OP_ICONV_TO_I4:
3573 case OP_ICONV_TO_U4:
3575 if (ins->dreg != ins->sreg1)
3576 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3579 int saved = ins->sreg2;
3580 if (ins->sreg2 == ARM_LSW_REG) {
3581 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3584 if (ins->sreg1 != ARM_LSW_REG)
3585 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3586 if (saved != ARM_MSW_REG)
3587 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3592 ARM_MVFD (code, ins->dreg, ins->sreg1);
3593 #elif defined(ARM_FPU_VFP)
3594 ARM_CPYD (code, ins->dreg, ins->sreg1);
3597 case OP_FCONV_TO_R4:
3599 ARM_MVFS (code, ins->dreg, ins->sreg1);
3600 #elif defined(ARM_FPU_VFP)
3601 ARM_CVTD (code, ins->dreg, ins->sreg1);
3602 ARM_CVTS (code, ins->dreg, ins->dreg);
3607 * Keep in sync with mono_arch_emit_epilog
3609 g_assert (!cfg->method->save_lmf);
3611 code = emit_load_volatile_arguments (cfg, code);
3613 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3614 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3615 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3616 if (cfg->compile_aot) {
3617 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3619 *(gpointer*)code = NULL;
3621 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3627 /* ensure ins->sreg1 is not NULL */
3628 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3631 g_assert (cfg->sig_cookie < 128);
3632 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3633 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3642 call = (MonoCallInst*)ins;
3643 if (ins->flags & MONO_INST_HAS_METHOD)
3644 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3646 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3647 code = emit_call_seq (cfg, code);
3648 code = emit_move_return_value (cfg, ins, code);
3654 case OP_VOIDCALL_REG:
3656 code = emit_call_reg (code, ins->sreg1);
3657 code = emit_move_return_value (cfg, ins, code);
3659 case OP_FCALL_MEMBASE:
3660 case OP_LCALL_MEMBASE:
3661 case OP_VCALL_MEMBASE:
3662 case OP_VCALL2_MEMBASE:
3663 case OP_VOIDCALL_MEMBASE:
3664 case OP_CALL_MEMBASE:
3665 g_assert (arm_is_imm12 (ins->inst_offset));
3666 g_assert (ins->sreg1 != ARMREG_LR);
3667 call = (MonoCallInst*)ins;
3668 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3669 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3670 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3672 * We can't embed the method in the code stream in PIC code, or
3674 * Instead, we put it in V5 in code emitted by
3675 * mono_arch_emit_imt_argument (), and embed NULL here to
3676 * signal the IMT thunk that the value is in V5.
3678 if (call->dynamic_imt_arg)
3679 *((gpointer*)code) = NULL;
3681 *((gpointer*)code) = (gpointer)call->method;
3684 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3685 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3687 code = emit_move_return_value (cfg, ins, code);
3690 /* keep alignment */
3691 int alloca_waste = cfg->param_area;
3694 /* round the size to 8 bytes */
3695 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3696 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3698 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3699 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3700 /* memzero the area: dreg holds the size, sp is the pointer */
3701 if (ins->flags & MONO_INST_INIT) {
3702 guint8 *start_loop, *branch_to_cond;
3703 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3704 branch_to_cond = code;
3707 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3708 arm_patch (branch_to_cond, code);
3709 /* decrement by 4 and set flags */
3710 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3711 ARM_B_COND (code, ARMCOND_GE, 0);
3712 arm_patch (code - 4, start_loop);
3714 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3719 MonoInst *var = cfg->dyn_call_var;
3721 g_assert (var->opcode == OP_REGOFFSET);
3722 g_assert (arm_is_imm12 (var->inst_offset));
3724 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3725 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3727 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3729 /* Save args buffer */
3730 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3732 /* Set stack slots using R0 as scratch reg */
3733 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3734 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3735 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3736 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3739 /* Set argument registers */
3740 for (i = 0; i < PARAM_REGS; ++i)
3741 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3744 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3745 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3748 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3749 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3750 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3754 if (ins->sreg1 != ARMREG_R0)
3755 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3756 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3757 (gpointer)"mono_arch_throw_exception");
3758 code = emit_call_seq (cfg, code);
3762 if (ins->sreg1 != ARMREG_R0)
3763 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3764 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3765 (gpointer)"mono_arch_rethrow_exception");
3766 code = emit_call_seq (cfg, code);
3769 case OP_START_HANDLER: {
3770 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3772 if (arm_is_imm12 (spvar->inst_offset)) {
3773 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3775 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3776 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3780 case OP_ENDFILTER: {
3781 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3783 if (ins->sreg1 != ARMREG_R0)
3784 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3785 if (arm_is_imm12 (spvar->inst_offset)) {
3786 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3788 g_assert (ARMREG_IP != spvar->inst_basereg);
3789 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3790 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3792 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3795 case OP_ENDFINALLY: {
3796 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3798 if (arm_is_imm12 (spvar->inst_offset)) {
3799 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3801 g_assert (ARMREG_IP != spvar->inst_basereg);
3802 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3803 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3805 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3808 case OP_CALL_HANDLER:
3809 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3813 ins->inst_c0 = code - cfg->native_code;
3816 /*if (ins->inst_target_bb->native_offset) {
3818 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3820 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3825 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3829 * In the normal case we have:
3830 * ldr pc, [pc, ins->sreg1 << 2]
3833 * ldr lr, [pc, ins->sreg1 << 2]
3835 * After follows the data.
3836 * FIXME: add aot support.
3838 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3839 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3840 if (offset > (cfg->code_size - max_len - 16)) {
3841 cfg->code_size += max_len;
3842 cfg->code_size *= 2;
3843 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3844 code = cfg->native_code + offset;
3846 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3848 code += 4 * GPOINTER_TO_INT (ins->klass);
3852 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3853 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3857 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3858 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3862 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3863 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3867 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3868 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3872 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3873 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3875 case OP_COND_EXC_EQ:
3876 case OP_COND_EXC_NE_UN:
3877 case OP_COND_EXC_LT:
3878 case OP_COND_EXC_LT_UN:
3879 case OP_COND_EXC_GT:
3880 case OP_COND_EXC_GT_UN:
3881 case OP_COND_EXC_GE:
3882 case OP_COND_EXC_GE_UN:
3883 case OP_COND_EXC_LE:
3884 case OP_COND_EXC_LE_UN:
3885 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3887 case OP_COND_EXC_IEQ:
3888 case OP_COND_EXC_INE_UN:
3889 case OP_COND_EXC_ILT:
3890 case OP_COND_EXC_ILT_UN:
3891 case OP_COND_EXC_IGT:
3892 case OP_COND_EXC_IGT_UN:
3893 case OP_COND_EXC_IGE:
3894 case OP_COND_EXC_IGE_UN:
3895 case OP_COND_EXC_ILE:
3896 case OP_COND_EXC_ILE_UN:
3897 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3900 case OP_COND_EXC_IC:
3901 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3903 case OP_COND_EXC_OV:
3904 case OP_COND_EXC_IOV:
3905 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3907 case OP_COND_EXC_NC:
3908 case OP_COND_EXC_INC:
3909 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3911 case OP_COND_EXC_NO:
3912 case OP_COND_EXC_INO:
3913 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3925 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3928 /* floating point opcodes */
3931 if (cfg->compile_aot) {
3932 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3934 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3936 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3939 /* FIXME: we can optimize the imm load by dealing with part of
3940 * the displacement in LDFD (aligning to 512).
3942 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3943 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3947 if (cfg->compile_aot) {
3948 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3950 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3953 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3954 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3957 case OP_STORER8_MEMBASE_REG:
3958 /* This is generated by the local regalloc pass which runs after the lowering pass */
3959 if (!arm_is_fpimm8 (ins->inst_offset)) {
3960 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3961 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3962 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3964 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3967 case OP_LOADR8_MEMBASE:
3968 /* This is generated by the local regalloc pass which runs after the lowering pass */
3969 if (!arm_is_fpimm8 (ins->inst_offset)) {
3970 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3971 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3972 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3974 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3977 case OP_STORER4_MEMBASE_REG:
3978 g_assert (arm_is_fpimm8 (ins->inst_offset));
3979 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3981 case OP_LOADR4_MEMBASE:
3982 g_assert (arm_is_fpimm8 (ins->inst_offset));
3983 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3985 case OP_ICONV_TO_R_UN: {
3987 tmpreg = ins->dreg == 0? 1: 0;
3988 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3989 ARM_FLTD (code, ins->dreg, ins->sreg1);
3990 ARM_B_COND (code, ARMCOND_GE, 8);
3991 /* save the temp register */
3992 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3993 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
3994 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
3995 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
3996 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
3997 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
3998 /* skip the constant pool */
4001 *(int*)code = 0x41f00000;
4006 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4007 * adfltd fdest, fdest, ftemp
4011 case OP_ICONV_TO_R4:
4012 ARM_FLTS (code, ins->dreg, ins->sreg1);
4014 case OP_ICONV_TO_R8:
4015 ARM_FLTD (code, ins->dreg, ins->sreg1);
4018 #elif defined(ARM_FPU_VFP)
4021 if (cfg->compile_aot) {
4022 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4024 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4026 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4029 /* FIXME: we can optimize the imm load by dealing with part of
4030 * the displacement in LDFD (aligning to 512).
4032 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4033 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4037 if (cfg->compile_aot) {
4038 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4040 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4042 ARM_CVTS (code, ins->dreg, ins->dreg);
4044 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4045 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4046 ARM_CVTS (code, ins->dreg, ins->dreg);
4049 case OP_STORER8_MEMBASE_REG:
4050 /* This is generated by the local regalloc pass which runs after the lowering pass */
4051 if (!arm_is_fpimm8 (ins->inst_offset)) {
4052 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4053 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4054 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4056 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4059 case OP_LOADR8_MEMBASE:
4060 /* This is generated by the local regalloc pass which runs after the lowering pass */
4061 if (!arm_is_fpimm8 (ins->inst_offset)) {
4062 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4063 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4064 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4066 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4069 case OP_STORER4_MEMBASE_REG:
4070 g_assert (arm_is_fpimm8 (ins->inst_offset));
4071 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4072 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4074 case OP_LOADR4_MEMBASE:
4075 g_assert (arm_is_fpimm8 (ins->inst_offset));
4076 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4077 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4079 case OP_ICONV_TO_R_UN: {
4080 g_assert_not_reached ();
4083 case OP_ICONV_TO_R4:
4084 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4085 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4086 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4088 case OP_ICONV_TO_R8:
4089 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4090 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4094 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4095 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4096 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4098 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4104 case OP_FCONV_TO_I1:
4105 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4107 case OP_FCONV_TO_U1:
4108 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4110 case OP_FCONV_TO_I2:
4111 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4113 case OP_FCONV_TO_U2:
4114 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4116 case OP_FCONV_TO_I4:
4118 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4120 case OP_FCONV_TO_U4:
4122 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4124 case OP_FCONV_TO_I8:
4125 case OP_FCONV_TO_U8:
4126 g_assert_not_reached ();
4127 /* Implemented as helper calls */
4129 case OP_LCONV_TO_R_UN:
4130 g_assert_not_reached ();
4131 /* Implemented as helper calls */
4133 case OP_LCONV_TO_OVF_I4_2: {
4134 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4136 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4139 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4140 high_bit_not_set = code;
4141 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4143 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4144 valid_negative = code;
4145 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4146 invalid_negative = code;
4147 ARM_B_COND (code, ARMCOND_AL, 0);
4149 arm_patch (high_bit_not_set, code);
4151 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4152 valid_positive = code;
4153 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4155 arm_patch (invalid_negative, code);
4156 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4158 arm_patch (valid_negative, code);
4159 arm_patch (valid_positive, code);
4161 if (ins->dreg != ins->sreg1)
4162 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4167 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4170 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4173 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4176 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4179 ARM_MNFD (code, ins->dreg, ins->sreg1);
4181 #elif defined(ARM_FPU_VFP)
4183 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4186 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4189 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4192 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4195 ARM_NEGD (code, ins->dreg, ins->sreg1);
4200 g_assert_not_reached ();
4204 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4205 #elif defined(ARM_FPU_VFP)
4206 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4212 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4213 #elif defined(ARM_FPU_VFP)
4214 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4217 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4218 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4222 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4223 #elif defined(ARM_FPU_VFP)
4224 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4227 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4228 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4232 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4233 #elif defined(ARM_FPU_VFP)
4234 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4237 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4238 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4239 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4244 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4245 #elif defined(ARM_FPU_VFP)
4246 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4249 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4250 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4255 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4256 #elif defined(ARM_FPU_VFP)
4257 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4260 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4261 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4262 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4264 /* ARM FPA flags table:
4265 * N Less than ARMCOND_MI
4266 * Z Equal ARMCOND_EQ
4267 * C Greater Than or Equal ARMCOND_CS
4268 * V Unordered ARMCOND_VS
4271 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4274 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4277 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4280 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4281 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4287 g_assert_not_reached ();
4291 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4293 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4294 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4295 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4299 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4300 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4305 if (ins->dreg != ins->sreg1)
4306 ARM_MVFD (code, ins->dreg, ins->sreg1);
4307 #elif defined(ARM_FPU_VFP)
4308 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4309 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4311 *(guint32*)code = 0xffffffff;
4313 *(guint32*)code = 0x7fefffff;
4315 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4317 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4318 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4320 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4322 ARM_CPYD (code, ins->dreg, ins->sreg1);
4327 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4328 g_assert_not_reached ();
4331 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4332 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4333 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4334 g_assert_not_reached ();
4340 last_offset = offset;
4343 cfg->code_len = code - cfg->native_code;
4346 #endif /* DISABLE_JIT */
4348 #ifdef HAVE_AEABI_READ_TP
4349 void __aeabi_read_tp (void);
4353 mono_arch_register_lowlevel_calls (void)
4355 /* The signature doesn't matter */
4356 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4357 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4359 #ifndef MONO_CROSS_COMPILE
4360 #ifdef HAVE_AEABI_READ_TP
4361 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4366 #define patch_lis_ori(ip,val) do {\
4367 guint16 *__lis_ori = (guint16*)(ip); \
4368 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4369 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4373 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4375 MonoJumpInfo *patch_info;
4376 gboolean compile_aot = !run_cctors;
4378 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4379 unsigned char *ip = patch_info->ip.i + code;
4380 const unsigned char *target;
4382 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4383 gpointer *jt = (gpointer*)(ip + 8);
4385 /* jt is the inlined jump table, 2 instructions after ip
4386 * In the normal case we store the absolute addresses,
4387 * otherwise the displacements.
4389 for (i = 0; i < patch_info->data.table->table_size; i++)
4390 jt [i] = code + (int)patch_info->data.table->table [i];
4393 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4396 switch (patch_info->type) {
4397 case MONO_PATCH_INFO_BB:
4398 case MONO_PATCH_INFO_LABEL:
4401 /* No need to patch these */
4406 switch (patch_info->type) {
4407 case MONO_PATCH_INFO_IP:
4408 g_assert_not_reached ();
4409 patch_lis_ori (ip, ip);
4411 case MONO_PATCH_INFO_METHOD_REL:
4412 g_assert_not_reached ();
4413 *((gpointer *)(ip)) = code + patch_info->data.offset;
4415 case MONO_PATCH_INFO_METHODCONST:
4416 case MONO_PATCH_INFO_CLASS:
4417 case MONO_PATCH_INFO_IMAGE:
4418 case MONO_PATCH_INFO_FIELD:
4419 case MONO_PATCH_INFO_VTABLE:
4420 case MONO_PATCH_INFO_IID:
4421 case MONO_PATCH_INFO_SFLDA:
4422 case MONO_PATCH_INFO_LDSTR:
4423 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4424 case MONO_PATCH_INFO_LDTOKEN:
4425 g_assert_not_reached ();
4426 /* from OP_AOTCONST : lis + ori */
4427 patch_lis_ori (ip, target);
4429 case MONO_PATCH_INFO_R4:
4430 case MONO_PATCH_INFO_R8:
4431 g_assert_not_reached ();
4432 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4434 case MONO_PATCH_INFO_EXC_NAME:
4435 g_assert_not_reached ();
4436 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4438 case MONO_PATCH_INFO_NONE:
4439 case MONO_PATCH_INFO_BB_OVF:
4440 case MONO_PATCH_INFO_EXC_OVF:
4441 /* everything is dealt with at epilog output time */
4446 arm_patch_general (domain, ip, target);
4453 * Stack frame layout:
4455 * ------------------- fp
4456 * MonoLMF structure or saved registers
4457 * -------------------
4459 * -------------------
4461 * -------------------
4462 * optional 8 bytes for tracing
4463 * -------------------
4464 * param area size is cfg->param_area
4465 * ------------------- sp
4468 mono_arch_emit_prolog (MonoCompile *cfg)
4470 MonoMethod *method = cfg->method;
4472 MonoMethodSignature *sig;
4474 int alloc_size, pos, max_offset, i, rot_amount;
4479 int prev_sp_offset, reg_offset;
4481 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4484 sig = mono_method_signature (method);
4485 cfg->code_size = 256 + sig->param_count * 20;
4486 code = cfg->native_code = g_malloc (cfg->code_size);
4488 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4490 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4492 alloc_size = cfg->stack_offset;
4495 if (!method->save_lmf) {
4496 /* We save SP by storing it into IP and saving IP */
4497 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4498 prev_sp_offset = 8; /* ip and lr */
4499 for (i = 0; i < 16; ++i) {
4500 if (cfg->used_int_regs & (1 << i))
4501 prev_sp_offset += 4;
4503 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4505 for (i = 0; i < 16; ++i) {
4506 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4507 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4512 ARM_PUSH (code, 0x5ff0);
4513 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4514 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4516 for (i = 0; i < 16; ++i) {
4517 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4518 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4522 pos += sizeof (MonoLMF) - prev_sp_offset;
4526 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4527 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4528 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4529 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4532 /* the stack used in the pushed regs */
4533 if (prev_sp_offset & 4)
4535 cfg->stack_usage = alloc_size;
4537 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4538 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4540 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4541 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4543 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4545 if (cfg->frame_reg != ARMREG_SP) {
4546 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4547 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4549 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4550 prev_sp_offset += alloc_size;
4552 /* compute max_offset in order to use short forward jumps
4553 * we could skip do it on arm because the immediate displacement
4554 * for jumps is large enough, it may be useful later for constant pools
4557 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4558 MonoInst *ins = bb->code;
4559 bb->max_offset = max_offset;
4561 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4564 MONO_BB_FOR_EACH_INS (bb, ins)
4565 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4568 /* store runtime generic context */
4569 if (cfg->rgctx_var) {
4570 MonoInst *ins = cfg->rgctx_var;
4572 g_assert (ins->opcode == OP_REGOFFSET);
4574 if (arm_is_imm12 (ins->inst_offset)) {
4575 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4577 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4578 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4582 /* load arguments allocated to register from the stack */
4585 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4587 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4588 ArgInfo *ainfo = &cinfo->ret;
4589 inst = cfg->vret_addr;
4590 g_assert (arm_is_imm12 (inst->inst_offset));
4591 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4594 if (sig->call_convention == MONO_CALL_VARARG) {
4595 ArgInfo *cookie = &cinfo->sig_cookie;
4597 /* Save the sig cookie address */
4598 g_assert (cookie->storage == RegTypeBase);
4600 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4601 g_assert (arm_is_imm12 (cfg->sig_cookie));
4602 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4603 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4606 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4607 ArgInfo *ainfo = cinfo->args + i;
4608 inst = cfg->args [pos];
4610 if (cfg->verbose_level > 2)
4611 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4612 if (inst->opcode == OP_REGVAR) {
4613 if (ainfo->storage == RegTypeGeneral)
4614 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4615 else if (ainfo->storage == RegTypeFP) {
4616 g_assert_not_reached ();
4617 } else if (ainfo->storage == RegTypeBase) {
4618 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4619 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4621 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4622 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4625 g_assert_not_reached ();
4627 if (cfg->verbose_level > 2)
4628 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4630 /* the argument should be put on the stack: FIXME handle size != word */
4631 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4632 switch (ainfo->size) {
4634 if (arm_is_imm12 (inst->inst_offset))
4635 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4637 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4638 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4642 if (arm_is_imm8 (inst->inst_offset)) {
4643 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4645 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4646 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4650 g_assert (arm_is_imm12 (inst->inst_offset));
4651 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4652 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4653 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4656 if (arm_is_imm12 (inst->inst_offset)) {
4657 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4659 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4660 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4664 } else if (ainfo->storage == RegTypeBaseGen) {
4665 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4666 g_assert (arm_is_imm12 (inst->inst_offset));
4667 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4668 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4669 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4670 } else if (ainfo->storage == RegTypeBase) {
4671 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4672 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4674 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4675 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4678 switch (ainfo->size) {
4680 if (arm_is_imm8 (inst->inst_offset)) {
4681 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4683 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4684 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4688 if (arm_is_imm8 (inst->inst_offset)) {
4689 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4691 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4692 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4696 if (arm_is_imm12 (inst->inst_offset)) {
4697 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4699 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4700 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4702 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4703 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4705 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4706 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4708 if (arm_is_imm12 (inst->inst_offset + 4)) {
4709 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4711 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4712 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4716 if (arm_is_imm12 (inst->inst_offset)) {
4717 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4719 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4720 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4724 } else if (ainfo->storage == RegTypeFP) {
4725 g_assert_not_reached ();
4726 } else if (ainfo->storage == RegTypeStructByVal) {
4727 int doffset = inst->inst_offset;
4731 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4732 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4733 if (arm_is_imm12 (doffset)) {
4734 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4736 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4737 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4739 soffset += sizeof (gpointer);
4740 doffset += sizeof (gpointer);
4742 if (ainfo->vtsize) {
4743 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4744 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4745 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4747 } else if (ainfo->storage == RegTypeStructByAddr) {
4748 g_assert_not_reached ();
4749 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4750 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4752 g_assert_not_reached ();
4757 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4758 if (cfg->compile_aot)
4759 /* AOT code is only used in the root domain */
4760 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4762 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4763 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4764 (gpointer)"mono_jit_thread_attach");
4765 code = emit_call_seq (cfg, code);
4768 if (method->save_lmf) {
4769 gboolean get_lmf_fast = FALSE;
4771 #ifdef HAVE_AEABI_READ_TP
4772 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4774 if (lmf_addr_tls_offset != -1) {
4775 get_lmf_fast = TRUE;
4777 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4778 (gpointer)"__aeabi_read_tp");
4779 code = emit_call_seq (cfg, code);
4781 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4782 get_lmf_fast = TRUE;
4785 if (!get_lmf_fast) {
4786 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4787 (gpointer)"mono_get_lmf_addr");
4788 code = emit_call_seq (cfg, code);
4790 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4791 /* lmf_offset is the offset from the previous stack pointer,
4792 * alloc_size is the total stack space allocated, so the offset
4793 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4794 * The pointer to the struct is put in r1 (new_lmf).
4795 * r2 is used as scratch
4796 * The callee-saved registers are already in the MonoLMF structure
4798 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4799 /* r0 is the result from mono_get_lmf_addr () */
4800 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4801 /* new_lmf->previous_lmf = *lmf_addr */
4802 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4803 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4804 /* *(lmf_addr) = r1 */
4805 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4806 /* Skip method (only needed for trampoline LMF frames) */
4807 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4808 /* save the current IP */
4809 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4810 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4814 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4816 if (cfg->arch.seq_point_info_var) {
4817 MonoInst *ins = cfg->arch.seq_point_info_var;
4819 /* Initialize the variable from a GOT slot */
4820 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4821 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4823 *(gpointer*)code = NULL;
4825 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4827 g_assert (ins->opcode == OP_REGOFFSET);
4829 if (arm_is_imm12 (ins->inst_offset)) {
4830 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4832 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4833 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4837 /* Initialize ss_trigger_page_var */
4839 MonoInst *info_var = cfg->arch.seq_point_info_var;
4840 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4841 int dreg = ARMREG_LR;
4844 g_assert (info_var->opcode == OP_REGOFFSET);
4845 g_assert (arm_is_imm12 (info_var->inst_offset));
4847 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4848 /* Load the trigger page addr */
4849 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4850 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4854 cfg->code_len = code - cfg->native_code;
4855 g_assert (cfg->code_len < cfg->code_size);
4862 mono_arch_emit_epilog (MonoCompile *cfg)
4864 MonoMethod *method = cfg->method;
4865 int pos, i, rot_amount;
4866 int max_epilog_size = 16 + 20*4;
4870 if (cfg->method->save_lmf)
4871 max_epilog_size += 128;
4873 if (mono_jit_trace_calls != NULL)
4874 max_epilog_size += 50;
4876 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4877 max_epilog_size += 50;
4879 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4880 cfg->code_size *= 2;
4881 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4882 mono_jit_stats.code_reallocs++;
4886 * Keep in sync with OP_JMP
4888 code = cfg->native_code + cfg->code_len;
4890 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4891 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4895 /* Load returned vtypes into registers if needed */
4896 cinfo = cfg->arch.cinfo;
4897 if (cinfo->ret.storage == RegTypeStructByVal) {
4898 MonoInst *ins = cfg->ret;
4900 if (arm_is_imm12 (ins->inst_offset)) {
4901 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4903 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4904 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4908 if (method->save_lmf) {
4910 /* all but r0-r3, sp and pc */
4911 pos += sizeof (MonoLMF) - (4 * 10);
4913 /* r2 contains the pointer to the current LMF */
4914 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4915 /* ip = previous_lmf */
4916 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4918 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4919 /* *(lmf_addr) = previous_lmf */
4920 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4921 /* FIXME: speedup: there is no actual need to restore the registers if
4922 * we didn't actually change them (idea from Zoltan).
4925 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4926 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4927 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4929 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4930 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4932 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4933 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4935 /* FIXME: add v4 thumb interworking support */
4936 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4939 cfg->code_len = code - cfg->native_code;
4941 g_assert (cfg->code_len < cfg->code_size);
4945 /* remove once throw_exception_by_name is eliminated */
4947 exception_id_by_name (const char *name)
4949 if (strcmp (name, "IndexOutOfRangeException") == 0)
4950 return MONO_EXC_INDEX_OUT_OF_RANGE;
4951 if (strcmp (name, "OverflowException") == 0)
4952 return MONO_EXC_OVERFLOW;
4953 if (strcmp (name, "ArithmeticException") == 0)
4954 return MONO_EXC_ARITHMETIC;
4955 if (strcmp (name, "DivideByZeroException") == 0)
4956 return MONO_EXC_DIVIDE_BY_ZERO;
4957 if (strcmp (name, "InvalidCastException") == 0)
4958 return MONO_EXC_INVALID_CAST;
4959 if (strcmp (name, "NullReferenceException") == 0)
4960 return MONO_EXC_NULL_REF;
4961 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4962 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4963 g_error ("Unknown intrinsic exception %s\n", name);
4968 mono_arch_emit_exceptions (MonoCompile *cfg)
4970 MonoJumpInfo *patch_info;
4973 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4974 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4975 int max_epilog_size = 50;
4977 /* count the number of exception infos */
4980 * make sure we have enough space for exceptions
4982 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4983 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4984 i = exception_id_by_name (patch_info->data.target);
4985 if (!exc_throw_found [i]) {
4986 max_epilog_size += 32;
4987 exc_throw_found [i] = TRUE;
4992 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4993 cfg->code_size *= 2;
4994 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4995 mono_jit_stats.code_reallocs++;
4998 code = cfg->native_code + cfg->code_len;
5000 /* add code to raise exceptions */
5001 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5002 switch (patch_info->type) {
5003 case MONO_PATCH_INFO_EXC: {
5004 MonoClass *exc_class;
5005 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5007 i = exception_id_by_name (patch_info->data.target);
5008 if (exc_throw_pos [i]) {
5009 arm_patch (ip, exc_throw_pos [i]);
5010 patch_info->type = MONO_PATCH_INFO_NONE;
5013 exc_throw_pos [i] = code;
5015 arm_patch (ip, code);
5017 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5018 g_assert (exc_class);
5020 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5021 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5022 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5023 patch_info->data.name = "mono_arch_throw_corlib_exception";
5024 patch_info->ip.i = code - cfg->native_code;
5026 *(guint32*)(gpointer)code = exc_class->type_token;
5036 cfg->code_len = code - cfg->native_code;
5038 g_assert (cfg->code_len < cfg->code_size);
5042 #endif /* #ifndef DISABLE_JIT */
5044 static gboolean tls_offset_inited = FALSE;
5047 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5049 if (!tls_offset_inited) {
5050 tls_offset_inited = TRUE;
5052 lmf_tls_offset = mono_get_lmf_tls_offset ();
5053 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5058 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5063 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5070 mono_arch_print_tree (MonoInst *tree, int arity)
5076 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5078 return mono_get_domain_intrinsic (cfg);
5082 mono_arch_get_patch_offset (guint8 *code)
5089 mono_arch_flush_register_windows (void)
5093 #ifdef MONO_ARCH_HAVE_IMT
5098 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5100 if (cfg->compile_aot) {
5101 int method_reg = mono_alloc_ireg (cfg);
5104 call->dynamic_imt_arg = TRUE;
5107 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5109 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5110 ins->dreg = method_reg;
5111 ins->inst_p0 = call->method;
5112 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5113 MONO_ADD_INS (cfg->cbb, ins);
5115 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5117 } else if (cfg->generic_context) {
5119 /* Always pass in a register for simplicity */
5120 call->dynamic_imt_arg = TRUE;
5122 cfg->uses_rgctx_reg = TRUE;
5125 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5128 int method_reg = mono_alloc_preg (cfg);
5130 MONO_INST_NEW (cfg, ins, OP_PCONST);
5131 ins->inst_p0 = call->method;
5132 ins->dreg = method_reg;
5133 MONO_ADD_INS (cfg->cbb, ins);
5135 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5140 #endif /* DISABLE_JIT */
5143 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5145 guint32 *code_ptr = (guint32*)code;
5147 /* The IMT value is stored in the code stream right after the LDC instruction. */
5148 if (!IS_LDR_PC (code_ptr [0])) {
5149 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5150 g_assert (IS_LDR_PC (code_ptr [0]));
5152 if (code_ptr [1] == 0)
5153 /* This is AOTed code, the IMT method is in V5 */
5154 return (MonoMethod*)regs [ARMREG_V5];
5156 return (MonoMethod*) code_ptr [1];
5160 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5162 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5165 #define ENABLE_WRONG_METHOD_CHECK 0
5166 #define BASE_SIZE (6 * 4)
5167 #define BSEARCH_ENTRY_SIZE (4 * 4)
5168 #define CMP_SIZE (3 * 4)
5169 #define BRANCH_SIZE (1 * 4)
5170 #define CALL_SIZE (2 * 4)
5171 #define WMC_SIZE (5 * 4)
5172 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5175 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5177 guint32 delta = DISTANCE (target, code);
5179 g_assert (delta >= 0 && delta <= 0xFFF);
5180 *target = *target | delta;
5186 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5187 gpointer fail_tramp)
5189 int size, i, extra_space = 0;
5190 arminstr_t *code, *start, *vtable_target = NULL;
5191 gboolean large_offsets = FALSE;
5192 guint32 **constant_pool_starts;
5195 constant_pool_starts = g_new0 (guint32*, count);
5198 * We might be called with a fail_tramp from the IMT builder code even if
5199 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5201 //g_assert (!fail_tramp);
5203 for (i = 0; i < count; ++i) {
5204 MonoIMTCheckItem *item = imt_entries [i];
5205 if (item->is_equals) {
5206 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5207 item->chunk_size += 32;
5208 large_offsets = TRUE;
5211 if (item->check_target_idx) {
5212 if (!item->compare_done)
5213 item->chunk_size += CMP_SIZE;
5214 item->chunk_size += BRANCH_SIZE;
5216 #if ENABLE_WRONG_METHOD_CHECK
5217 item->chunk_size += WMC_SIZE;
5220 item->chunk_size += CALL_SIZE;
5222 item->chunk_size += BSEARCH_ENTRY_SIZE;
5223 imt_entries [item->check_target_idx]->compare_done = TRUE;
5225 size += item->chunk_size;
5229 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5231 start = code = mono_domain_code_reserve (domain, size);
5234 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5235 for (i = 0; i < count; ++i) {
5236 MonoIMTCheckItem *item = imt_entries [i];
5237 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5242 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5244 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5245 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5246 vtable_target = code;
5247 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5249 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5250 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5251 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5253 for (i = 0; i < count; ++i) {
5254 MonoIMTCheckItem *item = imt_entries [i];
5255 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5256 gint32 vtable_offset;
5258 item->code_target = (guint8*)code;
5260 if (item->is_equals) {
5261 if (item->check_target_idx) {
5262 if (!item->compare_done) {
5264 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5265 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5267 item->jmp_code = (guint8*)code;
5268 ARM_B_COND (code, ARMCOND_NE, 0);
5270 /*Enable the commented code to assert on wrong method*/
5271 #if ENABLE_WRONG_METHOD_CHECK
5273 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5274 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5275 ARM_B_COND (code, ARMCOND_NE, 1);
5281 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5282 if (!arm_is_imm12 (vtable_offset)) {
5284 * We need to branch to a computed address but we don't have
5285 * a free register to store it, since IP must contain the
5286 * vtable address. So we push the two values to the stack, and
5287 * load them both using LDM.
5289 /* Compute target address */
5290 vtable_offset_ins = code;
5291 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5292 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5293 /* Save it to the fourth slot */
5294 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5295 /* Restore registers and branch */
5296 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5298 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5300 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5302 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5303 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5307 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5309 /*must emit after unconditional branch*/
5310 if (vtable_target) {
5311 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5312 item->chunk_size += 4;
5313 vtable_target = NULL;
5316 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5317 constant_pool_starts [i] = code;
5319 code += extra_space;
5323 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5324 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5326 item->jmp_code = (guint8*)code;
5327 ARM_B_COND (code, ARMCOND_GE, 0);
5332 for (i = 0; i < count; ++i) {
5333 MonoIMTCheckItem *item = imt_entries [i];
5334 if (item->jmp_code) {
5335 if (item->check_target_idx)
5336 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5338 if (i > 0 && item->is_equals) {
5340 arminstr_t *space_start = constant_pool_starts [i];
5341 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5342 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5349 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5350 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5355 g_free (constant_pool_starts);
5357 mono_arch_flush_icache ((guint8*)start, size);
5358 mono_stats.imt_thunks_size += code - start;
5360 g_assert (DISTANCE (start, code) <= size);
5367 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5369 if (reg == ARMREG_SP)
5370 return (gpointer)ctx->esp;
5372 return (gpointer)ctx->regs [reg];
5376 * mono_arch_set_breakpoint:
5378 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5379 * The location should contain code emitted by OP_SEQ_POINT.
5382 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5385 guint32 native_offset = ip - (guint8*)ji->code_start;
5388 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5390 g_assert (native_offset % 4 == 0);
5391 g_assert (info->bp_addrs [native_offset / 4] == 0);
5392 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5394 int dreg = ARMREG_LR;
5396 /* Read from another trigger page */
5397 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5399 *(int*)code = (int)bp_trigger_page;
5401 ARM_LDR_IMM (code, dreg, dreg, 0);
5403 mono_arch_flush_icache (code - 16, 16);
5406 /* This is currently implemented by emitting an SWI instruction, which
5407 * qemu/linux seems to convert to a SIGILL.
5409 *(int*)code = (0xef << 24) | 8;
5411 mono_arch_flush_icache (code - 4, 4);
5417 * mono_arch_clear_breakpoint:
5419 * Clear the breakpoint at IP.
5422 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5428 guint32 native_offset = ip - (guint8*)ji->code_start;
5429 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5431 g_assert (native_offset % 4 == 0);
5432 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5433 info->bp_addrs [native_offset / 4] = 0;
5435 for (i = 0; i < 4; ++i)
5438 mono_arch_flush_icache (ip, code - ip);
5443 * mono_arch_start_single_stepping:
5445 * Start single stepping.
5448 mono_arch_start_single_stepping (void)
5450 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5454 * mono_arch_stop_single_stepping:
5456 * Stop single stepping.
5459 mono_arch_stop_single_stepping (void)
5461 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5465 #define DBG_SIGNAL SIGBUS
5467 #define DBG_SIGNAL SIGSEGV
5471 * mono_arch_is_single_step_event:
5473 * Return whenever the machine state in SIGCTX corresponds to a single
5477 mono_arch_is_single_step_event (void *info, void *sigctx)
5479 siginfo_t *sinfo = info;
5481 /* Sometimes the address is off by 4 */
5482 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5489 * mono_arch_is_breakpoint_event:
5491 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5494 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5496 siginfo_t *sinfo = info;
5498 if (sinfo->si_signo == DBG_SIGNAL) {
5499 /* Sometimes the address is off by 4 */
5500 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5510 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5512 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5523 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5525 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5533 * mono_arch_skip_breakpoint:
5535 * See mini-amd64.c for docs.
5538 mono_arch_skip_breakpoint (MonoContext *ctx)
5540 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5544 * mono_arch_skip_single_step:
5546 * See mini-amd64.c for docs.
5549 mono_arch_skip_single_step (MonoContext *ctx)
5551 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5555 * mono_arch_get_seq_point_info:
5557 * See mini-amd64.c for docs.
5560 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5565 // FIXME: Add a free function
5567 mono_domain_lock (domain);
5568 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5570 mono_domain_unlock (domain);
5573 ji = mono_jit_info_table_find (domain, (char*)code);
5576 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5578 info->ss_trigger_page = ss_trigger_page;
5579 info->bp_trigger_page = bp_trigger_page;
5581 mono_domain_lock (domain);
5582 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5584 mono_domain_unlock (domain);