2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
194 g_assert (size == 0);
199 emit_call_reg (guint8 *code, int reg)
202 ARM_BLX_REG (code, reg);
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
219 *(gpointer*)code = NULL;
221 code = emit_call_reg (code, ARMREG_IP);
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
234 case OP_FCALL_MEMBASE:
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
277 arg_info [0].offset = offset;
280 frame_size += sizeof (gpointer);
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
298 arg_info [k + 1].offset = offset;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
341 The call sequence could be also:
344 function pointer literal
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
356 /* Three possible code sequences can happen here:
360 * ldr pc, [rX - #offset]
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
372 * direct branch with mov:
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
388 #define MAX_ARCH_DELEGATE_PARAMS 3
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
425 *code_size = code - start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
437 mono_arch_get_delegate_invoke_impls (void)
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
468 mono_mini_arch_unlock ();
473 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
477 mono_mini_arch_unlock ();
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
492 mono_mini_arch_unlock ();
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_named_code (name);
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig->ret))
516 return (gpointer)regs [ARMREG_R1];
517 return (gpointer)regs [ARMREG_R0];
521 * Initialize the cpu to execute managed code.
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex);
536 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
537 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
538 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
542 * Cleanup architecture specific code.
545 mono_arch_cleanup (void)
550 * This function returns the optimizations supported on this cpu.
553 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
556 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
557 if (cpu_arch != NULL) {
558 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
559 if (strncmp (cpu_arch, "armv", 4) == 0) {
560 v5_supported = cpu_arch [4] >= '5';
561 v7_supported = cpu_arch [4] >= '7';
565 thumb_supported = TRUE;
570 FILE *file = fopen ("/proc/cpuinfo", "r");
572 while ((line = fgets (buf, 512, file))) {
573 if (strncmp (line, "Processor", 9) == 0) {
574 char *ver = strstr (line, "(v");
575 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
577 if (ver && (ver [2] == '7'))
581 if (strncmp (line, "Features", 8) == 0) {
582 char *th = strstr (line, "thumb");
584 thumb_supported = TRUE;
592 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
597 /* no arm-specific optimizations yet */
605 is_regsize_var (MonoType *t) {
608 t = mini_type_get_underlying_type (NULL, t);
615 case MONO_TYPE_FNPTR:
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_STRING:
619 case MONO_TYPE_CLASS:
620 case MONO_TYPE_SZARRAY:
621 case MONO_TYPE_ARRAY:
623 case MONO_TYPE_GENERICINST:
624 if (!mono_type_generic_inst_is_valuetype (t))
627 case MONO_TYPE_VALUETYPE:
634 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
639 for (i = 0; i < cfg->num_varinfo; i++) {
640 MonoInst *ins = cfg->varinfo [i];
641 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
644 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
647 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
650 /* we can only allocate 32 bit values */
651 if (is_regsize_var (ins->inst_vtype)) {
652 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
653 g_assert (i == vmv->idx);
654 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
661 #define USE_EXTRA_TEMPS 0
664 mono_arch_get_global_int_regs (MonoCompile *cfg)
669 * FIXME: Interface calls might go through a static rgctx trampoline which
670 * sets V5, but it doesn't save it, so we need to save it ourselves, and
673 if (cfg->flags & MONO_CFG_HAS_CALLS)
674 cfg->uses_rgctx_reg = TRUE;
676 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
677 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
678 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
679 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
680 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
681 /* V5 is reserved for passing the vtable/rgctx/IMT method */
682 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
683 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
684 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
690 * mono_arch_regalloc_cost:
692 * Return the cost, in number of memory references, of the action of
693 * allocating the variable VMV into a register during global register
697 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
703 #endif /* #ifndef DISABLE_JIT */
705 #ifndef __GNUC_PREREQ
706 #define __GNUC_PREREQ(maj, min) (0)
710 mono_arch_flush_icache (guint8 *code, gint size)
713 sys_icache_invalidate (code, size);
714 #elif __GNUC_PREREQ(4, 1)
715 __clear_cache (code, code + size);
716 #elif defined(PLATFORM_ANDROID)
717 const int syscall = 0xf0002;
725 : "r" (code), "r" (code + size), "r" (syscall)
726 : "r0", "r1", "r7", "r2"
729 __asm __volatile ("mov r0, %0\n"
732 "swi 0x9f0002 @ sys_cacheflush"
734 : "r" (code), "r" (code + size), "r" (0)
735 : "r0", "r1", "r3" );
752 guint16 vtsize; /* in param area */
755 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
762 gboolean vtype_retaddr;
771 /*#define __alignof__(a) sizeof(a)*/
772 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
778 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
781 if (*gr > ARMREG_R3) {
782 ainfo->offset = *stack_size;
783 ainfo->reg = ARMREG_SP; /* in the caller */
784 ainfo->storage = RegTypeBase;
787 ainfo->storage = RegTypeGeneral;
791 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
794 int i8_align = __alignof__ (gint64);
798 gboolean split = i8_align == 4;
800 gboolean split = TRUE;
803 if (*gr == ARMREG_R3 && split) {
804 /* first word in r3 and the second on the stack */
805 ainfo->offset = *stack_size;
806 ainfo->reg = ARMREG_SP; /* in the caller */
807 ainfo->storage = RegTypeBaseGen;
809 } else if (*gr >= ARMREG_R3) {
811 /* darwin aligns longs to 4 byte only */
817 ainfo->offset = *stack_size;
818 ainfo->reg = ARMREG_SP; /* in the caller */
819 ainfo->storage = RegTypeBase;
823 if (i8_align == 8 && ((*gr) & 1))
826 ainfo->storage = RegTypeIRegPair;
835 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
838 int n = sig->hasthis + sig->param_count;
839 MonoType *simpletype;
840 guint32 stack_size = 0;
844 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
846 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
851 /* FIXME: handle returning a struct */
852 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
855 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
856 cinfo->ret.storage = RegTypeStructByVal;
858 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
859 cinfo->struct_ret = ARMREG_R0;
860 cinfo->vtype_retaddr = TRUE;
866 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
869 DEBUG(printf("params: %d\n", sig->param_count));
870 for (i = 0; i < sig->param_count; ++i) {
871 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
872 /* Prevent implicit arguments and sig_cookie from
873 being passed in registers */
875 /* Emit the signature cookie just before the implicit arguments */
876 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
878 DEBUG(printf("param %d: ", i));
879 if (sig->params [i]->byref) {
880 DEBUG(printf("byref\n"));
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
885 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
886 switch (simpletype->type) {
887 case MONO_TYPE_BOOLEAN:
890 cinfo->args [n].size = 1;
891 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
897 cinfo->args [n].size = 2;
898 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
903 cinfo->args [n].size = 4;
904 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
910 case MONO_TYPE_FNPTR:
911 case MONO_TYPE_CLASS:
912 case MONO_TYPE_OBJECT:
913 case MONO_TYPE_STRING:
914 case MONO_TYPE_SZARRAY:
915 case MONO_TYPE_ARRAY:
917 cinfo->args [n].size = sizeof (gpointer);
918 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
921 case MONO_TYPE_GENERICINST:
922 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
923 cinfo->args [n].size = sizeof (gpointer);
924 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
929 case MONO_TYPE_TYPEDBYREF:
930 case MONO_TYPE_VALUETYPE: {
936 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
937 size = sizeof (MonoTypedRef);
938 align = sizeof (gpointer);
940 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
942 size = mono_class_native_size (klass, &align);
944 size = mono_class_value_size (klass, &align);
946 DEBUG(printf ("load %d bytes struct\n",
947 mono_class_native_size (sig->params [i]->data.klass, NULL)));
950 align_size += (sizeof (gpointer) - 1);
951 align_size &= ~(sizeof (gpointer) - 1);
952 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
953 cinfo->args [n].storage = RegTypeStructByVal;
954 /* FIXME: align stack_size if needed */
956 if (align >= 8 && (gr & 1))
959 if (gr > ARMREG_R3) {
960 cinfo->args [n].size = 0;
961 cinfo->args [n].vtsize = nwords;
963 int rest = ARMREG_R3 - gr + 1;
964 int n_in_regs = rest >= nwords? nwords: rest;
966 cinfo->args [n].size = n_in_regs;
967 cinfo->args [n].vtsize = nwords - n_in_regs;
968 cinfo->args [n].reg = gr;
972 cinfo->args [n].offset = stack_size;
973 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
974 stack_size += nwords * sizeof (gpointer);
981 cinfo->args [n].size = 8;
982 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
986 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
990 /* Handle the case where there are no implicit arguments */
991 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
992 /* Prevent implicit arguments and sig_cookie from
993 being passed in registers */
995 /* Emit the signature cookie just before the implicit arguments */
996 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1000 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1001 switch (simpletype->type) {
1002 case MONO_TYPE_BOOLEAN:
1007 case MONO_TYPE_CHAR:
1013 case MONO_TYPE_FNPTR:
1014 case MONO_TYPE_CLASS:
1015 case MONO_TYPE_OBJECT:
1016 case MONO_TYPE_SZARRAY:
1017 case MONO_TYPE_ARRAY:
1018 case MONO_TYPE_STRING:
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1024 cinfo->ret.storage = RegTypeIRegPair;
1025 cinfo->ret.reg = ARMREG_R0;
1029 cinfo->ret.storage = RegTypeFP;
1030 cinfo->ret.reg = ARMREG_R0;
1031 /* FIXME: cinfo->ret.reg = ???;
1032 cinfo->ret.storage = RegTypeFP;*/
1034 case MONO_TYPE_GENERICINST:
1035 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1036 cinfo->ret.storage = RegTypeGeneral;
1037 cinfo->ret.reg = ARMREG_R0;
1041 case MONO_TYPE_VALUETYPE:
1042 case MONO_TYPE_TYPEDBYREF:
1043 if (cinfo->ret.storage != RegTypeStructByVal)
1044 cinfo->ret.storage = RegTypeStructByAddr;
1046 case MONO_TYPE_VOID:
1049 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1053 /* align stack size to 8 */
1054 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1055 stack_size = (stack_size + 7) & ~7;
1057 cinfo->stack_usage = stack_size;
1064 * Set var information according to the calling convention. arm version.
1065 * The locals var stuff should most likely be split in another method.
1068 mono_arch_allocate_vars (MonoCompile *cfg)
1070 MonoMethodSignature *sig;
1071 MonoMethodHeader *header;
1073 int i, offset, size, align, curinst;
1074 int frame_reg = ARMREG_FP;
1078 sig = mono_method_signature (cfg->method);
1080 if (!cfg->arch.cinfo)
1081 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1082 cinfo = cfg->arch.cinfo;
1084 /* FIXME: this will change when we use FP as gcc does */
1085 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1087 /* allow room for the vararg method args: void* and long/double */
1088 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1089 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1091 header = cfg->header;
1094 * We use the frame register also for any method that has
1095 * exception clauses. This way, when the handlers are called,
1096 * the code will reference local variables using the frame reg instead of
1097 * the stack pointer: if we had to restore the stack pointer, we'd
1098 * corrupt the method frames that are already on the stack (since
1099 * filters get called before stack unwinding happens) when the filter
1100 * code would call any method (this also applies to finally etc.).
1102 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1103 frame_reg = ARMREG_FP;
1104 cfg->frame_reg = frame_reg;
1105 if (frame_reg != ARMREG_SP) {
1106 cfg->used_int_regs |= 1 << frame_reg;
1109 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1110 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1111 cfg->used_int_regs |= (1 << ARMREG_V5);
1115 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1116 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1117 case MONO_TYPE_VOID:
1120 cfg->ret->opcode = OP_REGVAR;
1121 cfg->ret->inst_c0 = ARMREG_R0;
1125 /* local vars are at a positive offset from the stack pointer */
1127 * also note that if the function uses alloca, we use FP
1128 * to point at the local variables.
1130 offset = 0; /* linkage area */
1131 /* align the offset to 16 bytes: not sure this is needed here */
1133 //offset &= ~(8 - 1);
1135 /* add parameter area size for called functions */
1136 offset += cfg->param_area;
1139 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1142 /* allow room to save the return value */
1143 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1146 /* the MonoLMF structure is stored just below the stack pointer */
1147 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1148 if (cinfo->ret.storage == RegTypeStructByVal) {
1149 cfg->ret->opcode = OP_REGOFFSET;
1150 cfg->ret->inst_basereg = cfg->frame_reg;
1151 offset += sizeof (gpointer) - 1;
1152 offset &= ~(sizeof (gpointer) - 1);
1153 cfg->ret->inst_offset = - offset;
1155 ins = cfg->vret_addr;
1156 offset += sizeof(gpointer) - 1;
1157 offset &= ~(sizeof(gpointer) - 1);
1158 ins->inst_offset = offset;
1159 ins->opcode = OP_REGOFFSET;
1160 ins->inst_basereg = frame_reg;
1161 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1162 printf ("vret_addr =");
1163 mono_print_ins (cfg->vret_addr);
1166 offset += sizeof(gpointer);
1169 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1170 if (cfg->arch.seq_point_info_var) {
1173 ins = cfg->arch.seq_point_info_var;
1177 offset += align - 1;
1178 offset &= ~(align - 1);
1179 ins->opcode = OP_REGOFFSET;
1180 ins->inst_basereg = frame_reg;
1181 ins->inst_offset = offset;
1184 ins = cfg->arch.ss_trigger_page_var;
1187 offset += align - 1;
1188 offset &= ~(align - 1);
1189 ins->opcode = OP_REGOFFSET;
1190 ins->inst_basereg = frame_reg;
1191 ins->inst_offset = offset;
1195 curinst = cfg->locals_start;
1196 for (i = curinst; i < cfg->num_varinfo; ++i) {
1197 ins = cfg->varinfo [i];
1198 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1201 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1202 * pinvoke wrappers when they call functions returning structure */
1203 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1204 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1208 size = mono_type_size (ins->inst_vtype, &align);
1210 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1211 * since it loads/stores misaligned words, which don't do the right thing.
1213 if (align < 4 && size >= 4)
1215 offset += align - 1;
1216 offset &= ~(align - 1);
1217 ins->opcode = OP_REGOFFSET;
1218 ins->inst_offset = offset;
1219 ins->inst_basereg = frame_reg;
1221 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1226 ins = cfg->args [curinst];
1227 if (ins->opcode != OP_REGVAR) {
1228 ins->opcode = OP_REGOFFSET;
1229 ins->inst_basereg = frame_reg;
1230 offset += sizeof (gpointer) - 1;
1231 offset &= ~(sizeof (gpointer) - 1);
1232 ins->inst_offset = offset;
1233 offset += sizeof (gpointer);
1238 if (sig->call_convention == MONO_CALL_VARARG) {
1242 /* Allocate a local slot to hold the sig cookie address */
1243 offset += align - 1;
1244 offset &= ~(align - 1);
1245 cfg->sig_cookie = offset;
1249 for (i = 0; i < sig->param_count; ++i) {
1250 ins = cfg->args [curinst];
1252 if (ins->opcode != OP_REGVAR) {
1253 ins->opcode = OP_REGOFFSET;
1254 ins->inst_basereg = frame_reg;
1255 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1257 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1258 * since it loads/stores misaligned words, which don't do the right thing.
1260 if (align < 4 && size >= 4)
1262 /* The code in the prolog () stores words when storing vtypes received in a register */
1263 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1265 offset += align - 1;
1266 offset &= ~(align - 1);
1267 ins->inst_offset = offset;
1273 /* align the offset to 8 bytes */
1278 cfg->stack_offset = offset;
1282 mono_arch_create_vars (MonoCompile *cfg)
1284 MonoMethodSignature *sig;
1287 sig = mono_method_signature (cfg->method);
1289 if (!cfg->arch.cinfo)
1290 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1291 cinfo = cfg->arch.cinfo;
1293 if (cinfo->ret.storage == RegTypeStructByVal)
1294 cfg->ret_var_is_local = TRUE;
1296 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1297 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1298 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1299 printf ("vret_addr = ");
1300 mono_print_ins (cfg->vret_addr);
1304 if (cfg->gen_seq_points && cfg->compile_aot) {
1305 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1306 ins->flags |= MONO_INST_VOLATILE;
1307 cfg->arch.seq_point_info_var = ins;
1309 /* Allocate a separate variable for this to save 1 load per seq point */
1310 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1311 ins->flags |= MONO_INST_VOLATILE;
1312 cfg->arch.ss_trigger_page_var = ins;
1317 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1319 MonoMethodSignature *tmp_sig;
1322 if (call->tail_call)
1325 /* FIXME: Add support for signature tokens to AOT */
1326 cfg->disable_aot = TRUE;
1328 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1331 * mono_ArgIterator_Setup assumes the signature cookie is
1332 * passed first and all the arguments which were before it are
1333 * passed on the stack after the signature. So compensate by
1334 * passing a different signature.
1336 tmp_sig = mono_metadata_signature_dup (call->signature);
1337 tmp_sig->param_count -= call->signature->sentinelpos;
1338 tmp_sig->sentinelpos = 0;
1339 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1341 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1342 sig_arg->dreg = mono_alloc_ireg (cfg);
1343 sig_arg->inst_p0 = tmp_sig;
1344 MONO_ADD_INS (cfg->cbb, sig_arg);
1346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1351 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1356 LLVMCallInfo *linfo;
1358 n = sig->param_count + sig->hasthis;
1360 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1362 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1365 * LLVM always uses the native ABI while we use our own ABI, the
1366 * only difference is the handling of vtypes:
1367 * - we only pass/receive them in registers in some cases, and only
1368 * in 1 or 2 integer registers.
1370 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1371 cfg->exception_message = g_strdup ("unknown ret conv");
1372 cfg->disable_llvm = TRUE;
1376 for (i = 0; i < n; ++i) {
1377 ainfo = cinfo->args + i;
1379 linfo->args [i].storage = LLVMArgNone;
1381 switch (ainfo->storage) {
1382 case RegTypeGeneral:
1383 case RegTypeIRegPair:
1385 linfo->args [i].storage = LLVMArgInIReg;
1388 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1389 cfg->disable_llvm = TRUE;
1399 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1402 MonoMethodSignature *sig;
1406 sig = call->signature;
1407 n = sig->param_count + sig->hasthis;
1409 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1411 for (i = 0; i < n; ++i) {
1412 ArgInfo *ainfo = cinfo->args + i;
1415 if (i >= sig->hasthis)
1416 t = sig->params [i - sig->hasthis];
1418 t = &mono_defaults.int_class->byval_arg;
1419 t = mini_type_get_underlying_type (NULL, t);
1421 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1422 /* Emit the signature cookie just before the implicit arguments */
1423 emit_sig_cookie (cfg, call, cinfo);
1426 in = call->args [i];
1428 switch (ainfo->storage) {
1429 case RegTypeGeneral:
1430 case RegTypeIRegPair:
1431 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1432 MONO_INST_NEW (cfg, ins, OP_MOVE);
1433 ins->dreg = mono_alloc_ireg (cfg);
1434 ins->sreg1 = in->dreg + 1;
1435 MONO_ADD_INS (cfg->cbb, ins);
1436 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1438 MONO_INST_NEW (cfg, ins, OP_MOVE);
1439 ins->dreg = mono_alloc_ireg (cfg);
1440 ins->sreg1 = in->dreg + 2;
1441 MONO_ADD_INS (cfg->cbb, ins);
1442 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1443 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1444 #ifndef MONO_ARCH_SOFT_FLOAT
1448 if (ainfo->size == 4) {
1449 #ifdef MONO_ARCH_SOFT_FLOAT
1450 /* mono_emit_call_args () have already done the r8->r4 conversion */
1451 /* The converted value is in an int vreg */
1452 MONO_INST_NEW (cfg, ins, OP_MOVE);
1453 ins->dreg = mono_alloc_ireg (cfg);
1454 ins->sreg1 = in->dreg;
1455 MONO_ADD_INS (cfg->cbb, ins);
1456 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1458 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1459 creg = mono_alloc_ireg (cfg);
1460 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1461 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1464 #ifdef MONO_ARCH_SOFT_FLOAT
1465 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1466 ins->dreg = mono_alloc_ireg (cfg);
1467 ins->sreg1 = in->dreg;
1468 MONO_ADD_INS (cfg->cbb, ins);
1469 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1471 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1472 ins->dreg = mono_alloc_ireg (cfg);
1473 ins->sreg1 = in->dreg;
1474 MONO_ADD_INS (cfg->cbb, ins);
1475 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1478 creg = mono_alloc_ireg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1480 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1481 creg = mono_alloc_ireg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1483 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1486 cfg->flags |= MONO_CFG_HAS_FPOUT;
1488 MONO_INST_NEW (cfg, ins, OP_MOVE);
1489 ins->dreg = mono_alloc_ireg (cfg);
1490 ins->sreg1 = in->dreg;
1491 MONO_ADD_INS (cfg->cbb, ins);
1493 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1496 case RegTypeStructByAddr:
1499 /* FIXME: where si the data allocated? */
1500 arg->backend.reg3 = ainfo->reg;
1501 call->used_iregs |= 1 << ainfo->reg;
1502 g_assert_not_reached ();
1505 case RegTypeStructByVal:
1506 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1507 ins->opcode = OP_OUTARG_VT;
1508 ins->sreg1 = in->dreg;
1509 ins->klass = in->klass;
1510 ins->inst_p0 = call;
1511 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1512 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1513 MONO_ADD_INS (cfg->cbb, ins);
1516 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1518 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1519 if (t->type == MONO_TYPE_R8) {
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1522 #ifdef MONO_ARCH_SOFT_FLOAT
1523 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1532 case RegTypeBaseGen:
1533 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1535 MONO_INST_NEW (cfg, ins, OP_MOVE);
1536 ins->dreg = mono_alloc_ireg (cfg);
1537 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1538 MONO_ADD_INS (cfg->cbb, ins);
1539 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1540 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1543 #ifdef MONO_ARCH_SOFT_FLOAT
1544 g_assert_not_reached ();
1547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1548 creg = mono_alloc_ireg (cfg);
1549 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1550 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1551 creg = mono_alloc_ireg (cfg);
1552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1554 cfg->flags |= MONO_CFG_HAS_FPOUT;
1556 g_assert_not_reached ();
1563 arg->backend.reg3 = ainfo->reg;
1564 /* FP args are passed in int regs */
1565 call->used_iregs |= 1 << ainfo->reg;
1566 if (ainfo->size == 8) {
1567 arg->opcode = OP_OUTARG_R8;
1568 call->used_iregs |= 1 << (ainfo->reg + 1);
1570 arg->opcode = OP_OUTARG_R4;
1573 cfg->flags |= MONO_CFG_HAS_FPOUT;
1577 g_assert_not_reached ();
1581 /* Handle the case where there are no implicit arguments */
1582 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1583 emit_sig_cookie (cfg, call, cinfo);
1585 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1588 if (cinfo->ret.storage == RegTypeStructByVal) {
1589 /* The JIT will transform this into a normal call */
1590 call->vret_in_reg = TRUE;
1592 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1593 vtarg->sreg1 = call->vret_var->dreg;
1594 vtarg->dreg = mono_alloc_preg (cfg);
1595 MONO_ADD_INS (cfg->cbb, vtarg);
1597 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1601 call->stack_usage = cinfo->stack_usage;
1607 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1609 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1610 ArgInfo *ainfo = ins->inst_p1;
1611 int ovf_size = ainfo->vtsize;
1612 int doffset = ainfo->offset;
1613 int i, soffset, dreg;
1616 for (i = 0; i < ainfo->size; ++i) {
1617 dreg = mono_alloc_ireg (cfg);
1618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1619 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1620 soffset += sizeof (gpointer);
1622 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1624 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1628 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1630 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1633 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1636 if (COMPILE_LLVM (cfg)) {
1637 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1639 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1640 ins->sreg1 = val->dreg + 1;
1641 ins->sreg2 = val->dreg + 2;
1642 MONO_ADD_INS (cfg->cbb, ins);
1646 #ifdef MONO_ARCH_SOFT_FLOAT
1647 if (ret->type == MONO_TYPE_R8) {
1650 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1651 ins->dreg = cfg->ret->dreg;
1652 ins->sreg1 = val->dreg;
1653 MONO_ADD_INS (cfg->cbb, ins);
1656 if (ret->type == MONO_TYPE_R4) {
1657 /* Already converted to an int in method_to_ir () */
1658 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1661 #elif defined(ARM_FPU_VFP)
1662 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1665 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1666 ins->dreg = cfg->ret->dreg;
1667 ins->sreg1 = val->dreg;
1668 MONO_ADD_INS (cfg->cbb, ins);
1672 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1673 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1680 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1683 #endif /* #ifndef DISABLE_JIT */
1686 mono_arch_is_inst_imm (gint64 imm)
1691 #define DYN_CALL_STACK_ARGS 6
1694 MonoMethodSignature *sig;
1699 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1705 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1709 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1712 switch (cinfo->ret.storage) {
1714 case RegTypeGeneral:
1715 case RegTypeIRegPair:
1716 case RegTypeStructByAddr:
1721 #elif defined(ARM_FPU_VFP)
1730 for (i = 0; i < cinfo->nargs; ++i) {
1731 switch (cinfo->args [i].storage) {
1732 case RegTypeGeneral:
1734 case RegTypeIRegPair:
1737 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1740 case RegTypeStructByVal:
1741 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1749 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1750 for (i = 0; i < sig->param_count; ++i) {
1751 MonoType *t = sig->params [i];
1759 #ifdef MONO_ARCH_SOFT_FLOAT
1778 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1780 ArchDynCallInfo *info;
1783 cinfo = get_call_info (NULL, sig, FALSE);
1785 if (!dyn_call_supported (cinfo, sig)) {
1790 info = g_new0 (ArchDynCallInfo, 1);
1791 // FIXME: Preprocess the info to speed up start_dyn_call ()
1793 info->cinfo = cinfo;
1795 return (MonoDynCallInfo*)info;
1799 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1801 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1803 g_free (ainfo->cinfo);
1808 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1810 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1811 DynCallArgs *p = (DynCallArgs*)buf;
1812 int arg_index, greg, i, j;
1813 MonoMethodSignature *sig = dinfo->sig;
1815 g_assert (buf_len >= sizeof (DynCallArgs));
1823 if (dinfo->cinfo->vtype_retaddr)
1824 p->regs [greg ++] = (mgreg_t)ret;
1827 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1829 for (i = 0; i < sig->param_count; i++) {
1830 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1831 gpointer *arg = args [arg_index ++];
1832 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1835 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1837 else if (ainfo->storage == RegTypeBase)
1838 slot = PARAM_REGS + (ainfo->offset / 4);
1840 g_assert_not_reached ();
1843 p->regs [slot] = (mgreg_t)*arg;
1848 case MONO_TYPE_STRING:
1849 case MONO_TYPE_CLASS:
1850 case MONO_TYPE_ARRAY:
1851 case MONO_TYPE_SZARRAY:
1852 case MONO_TYPE_OBJECT:
1856 p->regs [slot] = (mgreg_t)*arg;
1858 case MONO_TYPE_BOOLEAN:
1860 p->regs [slot] = *(guint8*)arg;
1863 p->regs [slot] = *(gint8*)arg;
1866 p->regs [slot] = *(gint16*)arg;
1869 case MONO_TYPE_CHAR:
1870 p->regs [slot] = *(guint16*)arg;
1873 p->regs [slot] = *(gint32*)arg;
1876 p->regs [slot] = *(guint32*)arg;
1880 p->regs [slot ++] = (mgreg_t)arg [0];
1881 p->regs [slot] = (mgreg_t)arg [1];
1884 p->regs [slot] = *(mgreg_t*)arg;
1887 p->regs [slot ++] = (mgreg_t)arg [0];
1888 p->regs [slot] = (mgreg_t)arg [1];
1890 case MONO_TYPE_GENERICINST:
1891 if (MONO_TYPE_IS_REFERENCE (t)) {
1892 p->regs [slot] = (mgreg_t)*arg;
1897 case MONO_TYPE_VALUETYPE:
1898 g_assert (ainfo->storage == RegTypeStructByVal);
1900 if (ainfo->size == 0)
1901 slot = PARAM_REGS + (ainfo->offset / 4);
1905 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1906 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1909 g_assert_not_reached ();
1915 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1917 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1918 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1919 guint8 *ret = ((DynCallArgs*)buf)->ret;
1920 mgreg_t res = ((DynCallArgs*)buf)->res;
1921 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1923 switch (mono_type_get_underlying_type (sig->ret)->type) {
1924 case MONO_TYPE_VOID:
1925 *(gpointer*)ret = NULL;
1927 case MONO_TYPE_STRING:
1928 case MONO_TYPE_CLASS:
1929 case MONO_TYPE_ARRAY:
1930 case MONO_TYPE_SZARRAY:
1931 case MONO_TYPE_OBJECT:
1935 *(gpointer*)ret = (gpointer)res;
1941 case MONO_TYPE_BOOLEAN:
1942 *(guint8*)ret = res;
1945 *(gint16*)ret = res;
1948 case MONO_TYPE_CHAR:
1949 *(guint16*)ret = res;
1952 *(gint32*)ret = res;
1955 *(guint32*)ret = res;
1959 /* This handles endianness as well */
1960 ((gint32*)ret) [0] = res;
1961 ((gint32*)ret) [1] = res2;
1963 case MONO_TYPE_GENERICINST:
1964 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1965 *(gpointer*)ret = (gpointer)res;
1970 case MONO_TYPE_VALUETYPE:
1971 g_assert (ainfo->cinfo->vtype_retaddr);
1974 #if defined(ARM_FPU_VFP)
1976 *(float*)ret = *(float*)&res;
1978 case MONO_TYPE_R8: {
1984 *(double*)ret = *(double*)®s;
1989 g_assert_not_reached ();
1996 * Allow tracing to work with this interface (with an optional argument)
2000 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2004 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2005 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2006 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2007 code = emit_call_reg (code, ARMREG_R2);
2020 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2023 int save_mode = SAVE_NONE;
2025 MonoMethod *method = cfg->method;
2026 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2027 int save_offset = cfg->param_area;
2031 offset = code - cfg->native_code;
2032 /* we need about 16 instructions */
2033 if (offset > (cfg->code_size - 16 * 4)) {
2034 cfg->code_size *= 2;
2035 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2036 code = cfg->native_code + offset;
2039 case MONO_TYPE_VOID:
2040 /* special case string .ctor icall */
2041 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2042 save_mode = SAVE_ONE;
2044 save_mode = SAVE_NONE;
2048 save_mode = SAVE_TWO;
2052 save_mode = SAVE_FP;
2054 case MONO_TYPE_VALUETYPE:
2055 save_mode = SAVE_STRUCT;
2058 save_mode = SAVE_ONE;
2062 switch (save_mode) {
2064 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2065 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2066 if (enable_arguments) {
2067 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2068 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2072 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2073 if (enable_arguments) {
2074 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2078 /* FIXME: what reg? */
2079 if (enable_arguments) {
2080 /* FIXME: what reg? */
2084 if (enable_arguments) {
2085 /* FIXME: get the actual address */
2086 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2094 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2095 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2096 code = emit_call_reg (code, ARMREG_IP);
2098 switch (save_mode) {
2100 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2101 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2104 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2118 * The immediate field for cond branches is big enough for all reasonable methods
2120 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2121 if (0 && ins->inst_true_bb->native_offset) { \
2122 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2124 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2125 ARM_B_COND (code, (condcode), 0); \
2128 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2130 /* emit an exception if condition is fail
2132 * We assign the extra code used to throw the implicit exceptions
2133 * to cfg->bb_exit as far as the big branch handling is concerned
2135 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2137 mono_add_patch_info (cfg, code - cfg->native_code, \
2138 MONO_PATCH_INFO_EXC, exc_name); \
2139 ARM_BL_COND (code, (condcode), 0); \
2142 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2145 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2150 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2152 MonoInst *ins, *n, *last_ins = NULL;
2154 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2155 switch (ins->opcode) {
2158 /* Already done by an arch-independent pass */
2160 case OP_LOAD_MEMBASE:
2161 case OP_LOADI4_MEMBASE:
2163 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2164 * OP_LOAD_MEMBASE offset(basereg), reg
2166 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2167 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2168 ins->inst_basereg == last_ins->inst_destbasereg &&
2169 ins->inst_offset == last_ins->inst_offset) {
2170 if (ins->dreg == last_ins->sreg1) {
2171 MONO_DELETE_INS (bb, ins);
2174 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2175 ins->opcode = OP_MOVE;
2176 ins->sreg1 = last_ins->sreg1;
2180 * Note: reg1 must be different from the basereg in the second load
2181 * OP_LOAD_MEMBASE offset(basereg), reg1
2182 * OP_LOAD_MEMBASE offset(basereg), reg2
2184 * OP_LOAD_MEMBASE offset(basereg), reg1
2185 * OP_MOVE reg1, reg2
2187 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2188 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2189 ins->inst_basereg != last_ins->dreg &&
2190 ins->inst_basereg == last_ins->inst_basereg &&
2191 ins->inst_offset == last_ins->inst_offset) {
2193 if (ins->dreg == last_ins->dreg) {
2194 MONO_DELETE_INS (bb, ins);
2197 ins->opcode = OP_MOVE;
2198 ins->sreg1 = last_ins->dreg;
2201 //g_assert_not_reached ();
2205 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2206 * OP_LOAD_MEMBASE offset(basereg), reg
2208 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2209 * OP_ICONST reg, imm
2211 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2212 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2213 ins->inst_basereg == last_ins->inst_destbasereg &&
2214 ins->inst_offset == last_ins->inst_offset) {
2215 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2216 ins->opcode = OP_ICONST;
2217 ins->inst_c0 = last_ins->inst_imm;
2218 g_assert_not_reached (); // check this rule
2222 case OP_LOADU1_MEMBASE:
2223 case OP_LOADI1_MEMBASE:
2224 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2225 ins->inst_basereg == last_ins->inst_destbasereg &&
2226 ins->inst_offset == last_ins->inst_offset) {
2227 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2228 ins->sreg1 = last_ins->sreg1;
2231 case OP_LOADU2_MEMBASE:
2232 case OP_LOADI2_MEMBASE:
2233 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2234 ins->inst_basereg == last_ins->inst_destbasereg &&
2235 ins->inst_offset == last_ins->inst_offset) {
2236 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2237 ins->sreg1 = last_ins->sreg1;
2241 ins->opcode = OP_MOVE;
2245 if (ins->dreg == ins->sreg1) {
2246 MONO_DELETE_INS (bb, ins);
2250 * OP_MOVE sreg, dreg
2251 * OP_MOVE dreg, sreg
2253 if (last_ins && last_ins->opcode == OP_MOVE &&
2254 ins->sreg1 == last_ins->dreg &&
2255 ins->dreg == last_ins->sreg1) {
2256 MONO_DELETE_INS (bb, ins);
2264 bb->last_ins = last_ins;
2268 * the branch_cc_table should maintain the order of these
2282 branch_cc_table [] = {
2296 #define NEW_INS(cfg,dest,op) do { \
2297 MONO_INST_NEW ((cfg), (dest), (op)); \
2298 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2302 map_to_reg_reg_op (int op)
2311 case OP_COMPARE_IMM:
2313 case OP_ICOMPARE_IMM:
2327 case OP_LOAD_MEMBASE:
2328 return OP_LOAD_MEMINDEX;
2329 case OP_LOADI4_MEMBASE:
2330 return OP_LOADI4_MEMINDEX;
2331 case OP_LOADU4_MEMBASE:
2332 return OP_LOADU4_MEMINDEX;
2333 case OP_LOADU1_MEMBASE:
2334 return OP_LOADU1_MEMINDEX;
2335 case OP_LOADI2_MEMBASE:
2336 return OP_LOADI2_MEMINDEX;
2337 case OP_LOADU2_MEMBASE:
2338 return OP_LOADU2_MEMINDEX;
2339 case OP_LOADI1_MEMBASE:
2340 return OP_LOADI1_MEMINDEX;
2341 case OP_STOREI1_MEMBASE_REG:
2342 return OP_STOREI1_MEMINDEX;
2343 case OP_STOREI2_MEMBASE_REG:
2344 return OP_STOREI2_MEMINDEX;
2345 case OP_STOREI4_MEMBASE_REG:
2346 return OP_STOREI4_MEMINDEX;
2347 case OP_STORE_MEMBASE_REG:
2348 return OP_STORE_MEMINDEX;
2349 case OP_STORER4_MEMBASE_REG:
2350 return OP_STORER4_MEMINDEX;
2351 case OP_STORER8_MEMBASE_REG:
2352 return OP_STORER8_MEMINDEX;
2353 case OP_STORE_MEMBASE_IMM:
2354 return OP_STORE_MEMBASE_REG;
2355 case OP_STOREI1_MEMBASE_IMM:
2356 return OP_STOREI1_MEMBASE_REG;
2357 case OP_STOREI2_MEMBASE_IMM:
2358 return OP_STOREI2_MEMBASE_REG;
2359 case OP_STOREI4_MEMBASE_IMM:
2360 return OP_STOREI4_MEMBASE_REG;
2362 g_assert_not_reached ();
2366 * Remove from the instruction list the instructions that can't be
2367 * represented with very simple instructions with no register
2371 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2373 MonoInst *ins, *temp, *last_ins = NULL;
2374 int rot_amount, imm8, low_imm;
2376 MONO_BB_FOR_EACH_INS (bb, ins) {
2378 switch (ins->opcode) {
2382 case OP_COMPARE_IMM:
2383 case OP_ICOMPARE_IMM:
2397 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2398 NEW_INS (cfg, temp, OP_ICONST);
2399 temp->inst_c0 = ins->inst_imm;
2400 temp->dreg = mono_alloc_ireg (cfg);
2401 ins->sreg2 = temp->dreg;
2402 ins->opcode = mono_op_imm_to_op (ins->opcode);
2404 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2410 if (ins->inst_imm == 1) {
2411 ins->opcode = OP_MOVE;
2414 if (ins->inst_imm == 0) {
2415 ins->opcode = OP_ICONST;
2419 imm8 = mono_is_power_of_two (ins->inst_imm);
2421 ins->opcode = OP_SHL_IMM;
2422 ins->inst_imm = imm8;
2425 NEW_INS (cfg, temp, OP_ICONST);
2426 temp->inst_c0 = ins->inst_imm;
2427 temp->dreg = mono_alloc_ireg (cfg);
2428 ins->sreg2 = temp->dreg;
2429 ins->opcode = OP_IMUL;
2435 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2436 /* ARM sets the C flag to 1 if there was _no_ overflow */
2437 ins->next->opcode = OP_COND_EXC_NC;
2439 case OP_LOCALLOC_IMM:
2440 NEW_INS (cfg, temp, OP_ICONST);
2441 temp->inst_c0 = ins->inst_imm;
2442 temp->dreg = mono_alloc_ireg (cfg);
2443 ins->sreg1 = temp->dreg;
2444 ins->opcode = OP_LOCALLOC;
2446 case OP_LOAD_MEMBASE:
2447 case OP_LOADI4_MEMBASE:
2448 case OP_LOADU4_MEMBASE:
2449 case OP_LOADU1_MEMBASE:
2450 /* we can do two things: load the immed in a register
2451 * and use an indexed load, or see if the immed can be
2452 * represented as an ad_imm + a load with a smaller offset
2453 * that fits. We just do the first for now, optimize later.
2455 if (arm_is_imm12 (ins->inst_offset))
2457 NEW_INS (cfg, temp, OP_ICONST);
2458 temp->inst_c0 = ins->inst_offset;
2459 temp->dreg = mono_alloc_ireg (cfg);
2460 ins->sreg2 = temp->dreg;
2461 ins->opcode = map_to_reg_reg_op (ins->opcode);
2463 case OP_LOADI2_MEMBASE:
2464 case OP_LOADU2_MEMBASE:
2465 case OP_LOADI1_MEMBASE:
2466 if (arm_is_imm8 (ins->inst_offset))
2468 NEW_INS (cfg, temp, OP_ICONST);
2469 temp->inst_c0 = ins->inst_offset;
2470 temp->dreg = mono_alloc_ireg (cfg);
2471 ins->sreg2 = temp->dreg;
2472 ins->opcode = map_to_reg_reg_op (ins->opcode);
2474 case OP_LOADR4_MEMBASE:
2475 case OP_LOADR8_MEMBASE:
2476 if (arm_is_fpimm8 (ins->inst_offset))
2478 low_imm = ins->inst_offset & 0x1ff;
2479 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2480 NEW_INS (cfg, temp, OP_ADD_IMM);
2481 temp->inst_imm = ins->inst_offset & ~0x1ff;
2482 temp->sreg1 = ins->inst_basereg;
2483 temp->dreg = mono_alloc_ireg (cfg);
2484 ins->inst_basereg = temp->dreg;
2485 ins->inst_offset = low_imm;
2488 /* VFP/FPA doesn't have indexed load instructions */
2489 g_assert_not_reached ();
2491 case OP_STORE_MEMBASE_REG:
2492 case OP_STOREI4_MEMBASE_REG:
2493 case OP_STOREI1_MEMBASE_REG:
2494 if (arm_is_imm12 (ins->inst_offset))
2496 NEW_INS (cfg, temp, OP_ICONST);
2497 temp->inst_c0 = ins->inst_offset;
2498 temp->dreg = mono_alloc_ireg (cfg);
2499 ins->sreg2 = temp->dreg;
2500 ins->opcode = map_to_reg_reg_op (ins->opcode);
2502 case OP_STOREI2_MEMBASE_REG:
2503 if (arm_is_imm8 (ins->inst_offset))
2505 NEW_INS (cfg, temp, OP_ICONST);
2506 temp->inst_c0 = ins->inst_offset;
2507 temp->dreg = mono_alloc_ireg (cfg);
2508 ins->sreg2 = temp->dreg;
2509 ins->opcode = map_to_reg_reg_op (ins->opcode);
2511 case OP_STORER4_MEMBASE_REG:
2512 case OP_STORER8_MEMBASE_REG:
2513 if (arm_is_fpimm8 (ins->inst_offset))
2515 low_imm = ins->inst_offset & 0x1ff;
2516 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2517 NEW_INS (cfg, temp, OP_ADD_IMM);
2518 temp->inst_imm = ins->inst_offset & ~0x1ff;
2519 temp->sreg1 = ins->inst_destbasereg;
2520 temp->dreg = mono_alloc_ireg (cfg);
2521 ins->inst_destbasereg = temp->dreg;
2522 ins->inst_offset = low_imm;
2525 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2526 /* VFP/FPA doesn't have indexed store instructions */
2527 g_assert_not_reached ();
2529 case OP_STORE_MEMBASE_IMM:
2530 case OP_STOREI1_MEMBASE_IMM:
2531 case OP_STOREI2_MEMBASE_IMM:
2532 case OP_STOREI4_MEMBASE_IMM:
2533 NEW_INS (cfg, temp, OP_ICONST);
2534 temp->inst_c0 = ins->inst_imm;
2535 temp->dreg = mono_alloc_ireg (cfg);
2536 ins->sreg1 = temp->dreg;
2537 ins->opcode = map_to_reg_reg_op (ins->opcode);
2539 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2541 gboolean swap = FALSE;
2545 /* Optimized away */
2550 /* Some fp compares require swapped operands */
2551 switch (ins->next->opcode) {
2553 ins->next->opcode = OP_FBLT;
2557 ins->next->opcode = OP_FBLT_UN;
2561 ins->next->opcode = OP_FBGE;
2565 ins->next->opcode = OP_FBGE_UN;
2573 ins->sreg1 = ins->sreg2;
2582 bb->last_ins = last_ins;
2583 bb->max_vreg = cfg->next_vreg;
2587 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2591 if (long_ins->opcode == OP_LNEG) {
2593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2600 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2602 /* sreg is a float, dreg is an integer reg */
2604 ARM_FIXZ (code, dreg, sreg);
2605 #elif defined(ARM_FPU_VFP)
2607 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2609 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2610 ARM_FMRS (code, dreg, ARM_VFP_F0);
2614 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2615 else if (size == 2) {
2616 ARM_SHL_IMM (code, dreg, dreg, 16);
2617 ARM_SHR_IMM (code, dreg, dreg, 16);
2621 ARM_SHL_IMM (code, dreg, dreg, 24);
2622 ARM_SAR_IMM (code, dreg, dreg, 24);
2623 } else if (size == 2) {
2624 ARM_SHL_IMM (code, dreg, dreg, 16);
2625 ARM_SAR_IMM (code, dreg, dreg, 16);
2631 #endif /* #ifndef DISABLE_JIT */
2635 const guchar *target;
2640 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2643 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2644 PatchData *pdata = (PatchData*)user_data;
2645 guchar *code = data;
2646 guint32 *thunks = data;
2647 guint32 *endthunks = (guint32*)(code + bsize);
2649 int difflow, diffhigh;
2651 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2652 difflow = (char*)pdata->code - (char*)thunks;
2653 diffhigh = (char*)pdata->code - (char*)endthunks;
2654 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2658 * The thunk is composed of 3 words:
2659 * load constant from thunks [2] into ARM_IP
2662 * Note that the LR register is already setup
2664 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2665 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2666 while (thunks < endthunks) {
2667 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2668 if (thunks [2] == (guint32)pdata->target) {
2669 arm_patch (pdata->code, (guchar*)thunks);
2670 mono_arch_flush_icache (pdata->code, 4);
2673 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2674 /* found a free slot instead: emit thunk */
2675 /* ARMREG_IP is fine to use since this can't be an IMT call
2678 code = (guchar*)thunks;
2679 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2680 if (thumb_supported)
2681 ARM_BX (code, ARMREG_IP);
2683 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2684 thunks [2] = (guint32)pdata->target;
2685 mono_arch_flush_icache ((guchar*)thunks, 12);
2687 arm_patch (pdata->code, (guchar*)thunks);
2688 mono_arch_flush_icache (pdata->code, 4);
2692 /* skip 12 bytes, the size of the thunk */
2696 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2702 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2707 domain = mono_domain_get ();
2710 pdata.target = target;
2711 pdata.absolute = absolute;
2714 mono_domain_lock (domain);
2715 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2718 /* this uses the first available slot */
2720 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2722 mono_domain_unlock (domain);
2724 if (pdata.found != 1)
2725 g_print ("thunk failed for %p from %p\n", target, code);
2726 g_assert (pdata.found == 1);
2730 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2732 guint32 *code32 = (void*)code;
2733 guint32 ins = *code32;
2734 guint32 prim = (ins >> 25) & 7;
2735 guint32 tval = GPOINTER_TO_UINT (target);
2737 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2738 if (prim == 5) { /* 101b */
2739 /* the diff starts 8 bytes from the branch opcode */
2740 gint diff = target - code - 8;
2742 gint tmask = 0xffffffff;
2743 if (tval & 1) { /* entering thumb mode */
2744 diff = target - 1 - code - 8;
2745 g_assert (thumb_supported);
2746 tbits = 0xf << 28; /* bl->blx bit pattern */
2747 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2748 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2752 tmask = ~(1 << 24); /* clear the link bit */
2753 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2758 if (diff <= 33554431) {
2760 ins = (ins & 0xff000000) | diff;
2762 *code32 = ins | tbits;
2766 /* diff between 0 and -33554432 */
2767 if (diff >= -33554432) {
2769 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2771 *code32 = ins | tbits;
2776 handle_thunk (domain, TRUE, code, target);
2781 * The alternative call sequences looks like this:
2783 * ldr ip, [pc] // loads the address constant
2784 * b 1f // jumps around the constant
2785 * address constant embedded in the code
2790 * There are two cases for patching:
2791 * a) at the end of method emission: in this case code points to the start
2792 * of the call sequence
2793 * b) during runtime patching of the call site: in this case code points
2794 * to the mov pc, ip instruction
2796 * We have to handle also the thunk jump code sequence:
2800 * address constant // execution never reaches here
2802 if ((ins & 0x0ffffff0) == 0x12fff10) {
2803 /* Branch and exchange: the address is constructed in a reg
2804 * We can patch BX when the code sequence is the following:
2805 * ldr ip, [pc, #0] ; 0x8
2812 guint8 *emit = (guint8*)ccode;
2813 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2815 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2816 ARM_BX (emit, ARMREG_IP);
2818 /*patching from magic trampoline*/
2819 if (ins == ccode [3]) {
2820 g_assert (code32 [-4] == ccode [0]);
2821 g_assert (code32 [-3] == ccode [1]);
2822 g_assert (code32 [-1] == ccode [2]);
2823 code32 [-2] = (guint32)target;
2826 /*patching from JIT*/
2827 if (ins == ccode [0]) {
2828 g_assert (code32 [1] == ccode [1]);
2829 g_assert (code32 [3] == ccode [2]);
2830 g_assert (code32 [4] == ccode [3]);
2831 code32 [2] = (guint32)target;
2834 g_assert_not_reached ();
2835 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2843 guint8 *emit = (guint8*)ccode;
2844 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2846 ARM_BLX_REG (emit, ARMREG_IP);
2848 g_assert (code32 [-3] == ccode [0]);
2849 g_assert (code32 [-2] == ccode [1]);
2850 g_assert (code32 [0] == ccode [2]);
2852 code32 [-1] = (guint32)target;
2855 guint32 *tmp = ccode;
2856 guint8 *emit = (guint8*)tmp;
2857 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2858 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2859 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2860 ARM_BX (emit, ARMREG_IP);
2861 if (ins == ccode [2]) {
2862 g_assert_not_reached (); // should be -2 ...
2863 code32 [-1] = (guint32)target;
2866 if (ins == ccode [0]) {
2867 /* handles both thunk jump code and the far call sequence */
2868 code32 [2] = (guint32)target;
2871 g_assert_not_reached ();
2873 // g_print ("patched with 0x%08x\n", ins);
2877 arm_patch (guchar *code, const guchar *target)
2879 arm_patch_general (NULL, code, target);
2883 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2884 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2885 * to be used with the emit macros.
2886 * Return -1 otherwise.
2889 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2892 for (i = 0; i < 31; i+= 2) {
2893 res = (val << (32 - i)) | (val >> i);
2896 *rot_amount = i? 32 - i: 0;
2903 * Emits in code a sequence of instructions that load the value 'val'
2904 * into the dreg register. Uses at most 4 instructions.
2907 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2909 int imm8, rot_amount;
2911 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2912 /* skip the constant pool */
2918 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2919 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2920 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2921 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2924 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2926 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2930 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2932 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2934 if (val & 0xFF0000) {
2935 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2937 if (val & 0xFF000000) {
2938 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2940 } else if (val & 0xFF00) {
2941 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2942 if (val & 0xFF0000) {
2943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2945 if (val & 0xFF000000) {
2946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2948 } else if (val & 0xFF0000) {
2949 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2950 if (val & 0xFF000000) {
2951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2954 //g_assert_not_reached ();
2960 mono_arm_thumb_supported (void)
2962 return thumb_supported;
2968 * emit_load_volatile_arguments:
2970 * Load volatile arguments from the stack to the original input registers.
2971 * Required before a tail call.
2974 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2976 MonoMethod *method = cfg->method;
2977 MonoMethodSignature *sig;
2982 /* FIXME: Generate intermediate code instead */
2984 sig = mono_method_signature (method);
2986 /* This is the opposite of the code in emit_prolog */
2990 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2992 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2993 ArgInfo *ainfo = &cinfo->ret;
2994 inst = cfg->vret_addr;
2995 g_assert (arm_is_imm12 (inst->inst_offset));
2996 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2998 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2999 ArgInfo *ainfo = cinfo->args + i;
3000 inst = cfg->args [pos];
3002 if (cfg->verbose_level > 2)
3003 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3004 if (inst->opcode == OP_REGVAR) {
3005 if (ainfo->storage == RegTypeGeneral)
3006 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3007 else if (ainfo->storage == RegTypeFP) {
3008 g_assert_not_reached ();
3009 } else if (ainfo->storage == RegTypeBase) {
3013 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3014 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3016 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3017 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3021 g_assert_not_reached ();
3023 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3024 switch (ainfo->size) {
3031 g_assert (arm_is_imm12 (inst->inst_offset));
3032 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3033 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3034 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3037 if (arm_is_imm12 (inst->inst_offset)) {
3038 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3040 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3041 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3045 } else if (ainfo->storage == RegTypeBaseGen) {
3048 } else if (ainfo->storage == RegTypeBase) {
3050 } else if (ainfo->storage == RegTypeFP) {
3051 g_assert_not_reached ();
3052 } else if (ainfo->storage == RegTypeStructByVal) {
3053 int doffset = inst->inst_offset;
3057 if (mono_class_from_mono_type (inst->inst_vtype))
3058 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3059 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3060 if (arm_is_imm12 (doffset)) {
3061 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3063 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3064 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3066 soffset += sizeof (gpointer);
3067 doffset += sizeof (gpointer);
3072 } else if (ainfo->storage == RegTypeStructByAddr) {
3087 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3092 guint8 *code = cfg->native_code + cfg->code_len;
3093 MonoInst *last_ins = NULL;
3094 guint last_offset = 0;
3096 int imm8, rot_amount;
3098 /* we don't align basic blocks of loops on arm */
3100 if (cfg->verbose_level > 2)
3101 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3103 cpos = bb->max_offset;
3105 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3106 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3107 //g_assert (!mono_compile_aot);
3110 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3111 /* this is not thread save, but good enough */
3112 /* fixme: howto handle overflows? */
3113 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3116 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3117 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3118 (gpointer)"mono_break");
3119 code = emit_call_seq (cfg, code);
3122 MONO_BB_FOR_EACH_INS (bb, ins) {
3123 offset = code - cfg->native_code;
3125 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3127 if (offset > (cfg->code_size - max_len - 16)) {
3128 cfg->code_size *= 2;
3129 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3130 code = cfg->native_code + offset;
3132 // if (ins->cil_code)
3133 // g_print ("cil code\n");
3134 mono_debug_record_line_number (cfg, ins, offset);
3136 switch (ins->opcode) {
3137 case OP_MEMORY_BARRIER:
3140 #ifdef HAVE_AEABI_READ_TP
3141 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3142 (gpointer)"__aeabi_read_tp");
3143 code = emit_call_seq (cfg, code);
3145 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3147 g_assert_not_reached ();
3151 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3152 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3155 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3156 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3158 case OP_STOREI1_MEMBASE_IMM:
3159 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3160 g_assert (arm_is_imm12 (ins->inst_offset));
3161 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3163 case OP_STOREI2_MEMBASE_IMM:
3164 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3165 g_assert (arm_is_imm8 (ins->inst_offset));
3166 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3168 case OP_STORE_MEMBASE_IMM:
3169 case OP_STOREI4_MEMBASE_IMM:
3170 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3171 g_assert (arm_is_imm12 (ins->inst_offset));
3172 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3174 case OP_STOREI1_MEMBASE_REG:
3175 g_assert (arm_is_imm12 (ins->inst_offset));
3176 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3178 case OP_STOREI2_MEMBASE_REG:
3179 g_assert (arm_is_imm8 (ins->inst_offset));
3180 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3182 case OP_STORE_MEMBASE_REG:
3183 case OP_STOREI4_MEMBASE_REG:
3184 /* this case is special, since it happens for spill code after lowering has been called */
3185 if (arm_is_imm12 (ins->inst_offset)) {
3186 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3188 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3189 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3192 case OP_STOREI1_MEMINDEX:
3193 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3195 case OP_STOREI2_MEMINDEX:
3196 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3198 case OP_STORE_MEMINDEX:
3199 case OP_STOREI4_MEMINDEX:
3200 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3203 g_assert_not_reached ();
3205 case OP_LOAD_MEMINDEX:
3206 case OP_LOADI4_MEMINDEX:
3207 case OP_LOADU4_MEMINDEX:
3208 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3210 case OP_LOADI1_MEMINDEX:
3211 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3213 case OP_LOADU1_MEMINDEX:
3214 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3216 case OP_LOADI2_MEMINDEX:
3217 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3219 case OP_LOADU2_MEMINDEX:
3220 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3222 case OP_LOAD_MEMBASE:
3223 case OP_LOADI4_MEMBASE:
3224 case OP_LOADU4_MEMBASE:
3225 /* this case is special, since it happens for spill code after lowering has been called */
3226 if (arm_is_imm12 (ins->inst_offset)) {
3227 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3229 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3230 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3233 case OP_LOADI1_MEMBASE:
3234 g_assert (arm_is_imm8 (ins->inst_offset));
3235 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3237 case OP_LOADU1_MEMBASE:
3238 g_assert (arm_is_imm12 (ins->inst_offset));
3239 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3241 case OP_LOADU2_MEMBASE:
3242 g_assert (arm_is_imm8 (ins->inst_offset));
3243 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3245 case OP_LOADI2_MEMBASE:
3246 g_assert (arm_is_imm8 (ins->inst_offset));
3247 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3249 case OP_ICONV_TO_I1:
3250 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3251 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3253 case OP_ICONV_TO_I2:
3254 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3255 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3257 case OP_ICONV_TO_U1:
3258 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3260 case OP_ICONV_TO_U2:
3261 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3262 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3266 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3268 case OP_COMPARE_IMM:
3269 case OP_ICOMPARE_IMM:
3270 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3271 g_assert (imm8 >= 0);
3272 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3276 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3277 * So instead of emitting a trap, we emit a call a C function and place a
3280 //*(int*)code = 0xef9f0001;
3283 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3284 (gpointer)"mono_break");
3285 code = emit_call_seq (cfg, code);
3287 case OP_RELAXED_NOP:
3292 case OP_DUMMY_STORE:
3293 case OP_NOT_REACHED:
3296 case OP_SEQ_POINT: {
3298 MonoInst *info_var = cfg->arch.seq_point_info_var;
3299 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3301 int dreg = ARMREG_LR;
3304 * For AOT, we use one got slot per method, which will point to a
3305 * SeqPointInfo structure, containing all the information required
3306 * by the code below.
3308 if (cfg->compile_aot) {
3309 g_assert (info_var);
3310 g_assert (info_var->opcode == OP_REGOFFSET);
3311 g_assert (arm_is_imm12 (info_var->inst_offset));
3315 * Read from the single stepping trigger page. This will cause a
3316 * SIGSEGV when single stepping is enabled.
3317 * We do this _before_ the breakpoint, so single stepping after
3318 * a breakpoint is hit will step to the next IL offset.
3320 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3322 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3323 if (cfg->compile_aot) {
3324 /* Load the trigger page addr from the variable initialized in the prolog */
3325 var = ss_trigger_page_var;
3327 g_assert (var->opcode == OP_REGOFFSET);
3328 g_assert (arm_is_imm12 (var->inst_offset));
3329 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3331 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3333 *(int*)code = (int)ss_trigger_page;
3336 ARM_LDR_IMM (code, dreg, dreg, 0);
3339 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3341 if (cfg->compile_aot) {
3342 guint32 offset = code - cfg->native_code;
3345 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3346 /* Add the offset */
3347 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3348 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3350 * Have to emit nops to keep the difference between the offset
3351 * stored in seq_points and breakpoint instruction constant,
3352 * mono_arch_get_ip_for_breakpoint () depends on this.
3355 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3359 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3362 g_assert (!(val & 0xFF000000));
3363 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3364 ARM_LDR_IMM (code, dreg, dreg, 0);
3366 /* What is faster, a branch or a load ? */
3367 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3368 /* The breakpoint instruction */
3369 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3372 * A placeholder for a possible breakpoint inserted by
3373 * mono_arch_set_breakpoint ().
3375 for (i = 0; i < 4; ++i)
3382 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3385 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3389 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3392 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3393 g_assert (imm8 >= 0);
3394 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3398 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3399 g_assert (imm8 >= 0);
3400 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3404 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3405 g_assert (imm8 >= 0);
3406 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3409 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3410 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3412 case OP_IADD_OVF_UN:
3413 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3414 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3417 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3418 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3420 case OP_ISUB_OVF_UN:
3421 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3422 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3424 case OP_ADD_OVF_CARRY:
3425 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3426 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3428 case OP_ADD_OVF_UN_CARRY:
3429 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3430 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3432 case OP_SUB_OVF_CARRY:
3433 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3434 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3436 case OP_SUB_OVF_UN_CARRY:
3437 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3438 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3442 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3445 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3446 g_assert (imm8 >= 0);
3447 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3450 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3454 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3458 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3459 g_assert (imm8 >= 0);
3460 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3465 g_assert (imm8 >= 0);
3466 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3468 case OP_ARM_RSBS_IMM:
3469 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3470 g_assert (imm8 >= 0);
3471 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3473 case OP_ARM_RSC_IMM:
3474 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3475 g_assert (imm8 >= 0);
3476 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3479 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3483 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3484 g_assert (imm8 >= 0);
3485 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3493 /* crappy ARM arch doesn't have a DIV instruction */
3494 g_assert_not_reached ();
3496 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3500 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3501 g_assert (imm8 >= 0);
3502 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3505 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3509 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3510 g_assert (imm8 >= 0);
3511 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3514 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3519 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3520 else if (ins->dreg != ins->sreg1)
3521 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3524 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3529 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3530 else if (ins->dreg != ins->sreg1)
3531 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3534 case OP_ISHR_UN_IMM:
3536 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3537 else if (ins->dreg != ins->sreg1)
3538 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3541 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3544 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3547 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3550 if (ins->dreg == ins->sreg2)
3551 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3553 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3556 g_assert_not_reached ();
3559 /* FIXME: handle ovf/ sreg2 != dreg */
3560 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3561 /* FIXME: MUL doesn't set the C/O flags on ARM */
3563 case OP_IMUL_OVF_UN:
3564 /* FIXME: handle ovf/ sreg2 != dreg */
3565 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3566 /* FIXME: MUL doesn't set the C/O flags on ARM */
3569 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3572 /* Load the GOT offset */
3573 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3574 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3576 *(gpointer*)code = NULL;
3578 /* Load the value from the GOT */
3579 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3581 case OP_ICONV_TO_I4:
3582 case OP_ICONV_TO_U4:
3584 if (ins->dreg != ins->sreg1)
3585 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3588 int saved = ins->sreg2;
3589 if (ins->sreg2 == ARM_LSW_REG) {
3590 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3593 if (ins->sreg1 != ARM_LSW_REG)
3594 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3595 if (saved != ARM_MSW_REG)
3596 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3601 ARM_MVFD (code, ins->dreg, ins->sreg1);
3602 #elif defined(ARM_FPU_VFP)
3603 ARM_CPYD (code, ins->dreg, ins->sreg1);
3606 case OP_FCONV_TO_R4:
3608 ARM_MVFS (code, ins->dreg, ins->sreg1);
3609 #elif defined(ARM_FPU_VFP)
3610 ARM_CVTD (code, ins->dreg, ins->sreg1);
3611 ARM_CVTS (code, ins->dreg, ins->dreg);
3616 * Keep in sync with mono_arch_emit_epilog
3618 g_assert (!cfg->method->save_lmf);
3620 code = emit_load_volatile_arguments (cfg, code);
3622 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3623 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3624 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3625 if (cfg->compile_aot) {
3626 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3628 *(gpointer*)code = NULL;
3630 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3636 /* ensure ins->sreg1 is not NULL */
3637 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3640 g_assert (cfg->sig_cookie < 128);
3641 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3642 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3651 call = (MonoCallInst*)ins;
3652 if (ins->flags & MONO_INST_HAS_METHOD)
3653 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3655 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3656 code = emit_call_seq (cfg, code);
3657 code = emit_move_return_value (cfg, ins, code);
3663 case OP_VOIDCALL_REG:
3665 code = emit_call_reg (code, ins->sreg1);
3666 code = emit_move_return_value (cfg, ins, code);
3668 case OP_FCALL_MEMBASE:
3669 case OP_LCALL_MEMBASE:
3670 case OP_VCALL_MEMBASE:
3671 case OP_VCALL2_MEMBASE:
3672 case OP_VOIDCALL_MEMBASE:
3673 case OP_CALL_MEMBASE:
3674 g_assert (arm_is_imm12 (ins->inst_offset));
3675 g_assert (ins->sreg1 != ARMREG_LR);
3676 call = (MonoCallInst*)ins;
3677 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3678 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3679 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3681 * We can't embed the method in the code stream in PIC code, or
3683 * Instead, we put it in V5 in code emitted by
3684 * mono_arch_emit_imt_argument (), and embed NULL here to
3685 * signal the IMT thunk that the value is in V5.
3687 if (call->dynamic_imt_arg)
3688 *((gpointer*)code) = NULL;
3690 *((gpointer*)code) = (gpointer)call->method;
3693 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3694 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3696 code = emit_move_return_value (cfg, ins, code);
3699 /* keep alignment */
3700 int alloca_waste = cfg->param_area;
3703 /* round the size to 8 bytes */
3704 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3705 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3707 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3708 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3709 /* memzero the area: dreg holds the size, sp is the pointer */
3710 if (ins->flags & MONO_INST_INIT) {
3711 guint8 *start_loop, *branch_to_cond;
3712 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3713 branch_to_cond = code;
3716 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3717 arm_patch (branch_to_cond, code);
3718 /* decrement by 4 and set flags */
3719 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3720 ARM_B_COND (code, ARMCOND_GE, 0);
3721 arm_patch (code - 4, start_loop);
3723 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3728 MonoInst *var = cfg->dyn_call_var;
3730 g_assert (var->opcode == OP_REGOFFSET);
3731 g_assert (arm_is_imm12 (var->inst_offset));
3733 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3734 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3736 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3738 /* Save args buffer */
3739 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3741 /* Set stack slots using R0 as scratch reg */
3742 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3743 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3744 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3745 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3748 /* Set argument registers */
3749 for (i = 0; i < PARAM_REGS; ++i)
3750 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3753 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3754 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3757 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3758 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3759 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3763 if (ins->sreg1 != ARMREG_R0)
3764 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3765 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3766 (gpointer)"mono_arch_throw_exception");
3767 code = emit_call_seq (cfg, code);
3771 if (ins->sreg1 != ARMREG_R0)
3772 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3773 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3774 (gpointer)"mono_arch_rethrow_exception");
3775 code = emit_call_seq (cfg, code);
3778 case OP_START_HANDLER: {
3779 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3781 if (arm_is_imm12 (spvar->inst_offset)) {
3782 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3784 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3785 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3789 case OP_ENDFILTER: {
3790 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3792 if (ins->sreg1 != ARMREG_R0)
3793 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3794 if (arm_is_imm12 (spvar->inst_offset)) {
3795 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3797 g_assert (ARMREG_IP != spvar->inst_basereg);
3798 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3799 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3801 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3804 case OP_ENDFINALLY: {
3805 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3807 if (arm_is_imm12 (spvar->inst_offset)) {
3808 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3810 g_assert (ARMREG_IP != spvar->inst_basereg);
3811 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3812 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3814 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3817 case OP_CALL_HANDLER:
3818 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3822 ins->inst_c0 = code - cfg->native_code;
3825 /*if (ins->inst_target_bb->native_offset) {
3827 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3829 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3834 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3838 * In the normal case we have:
3839 * ldr pc, [pc, ins->sreg1 << 2]
3842 * ldr lr, [pc, ins->sreg1 << 2]
3844 * After follows the data.
3845 * FIXME: add aot support.
3847 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3848 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3849 if (offset > (cfg->code_size - max_len - 16)) {
3850 cfg->code_size += max_len;
3851 cfg->code_size *= 2;
3852 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3853 code = cfg->native_code + offset;
3855 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3857 code += 4 * GPOINTER_TO_INT (ins->klass);
3861 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3862 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3866 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3867 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3871 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3872 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3876 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3877 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3881 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3882 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3884 case OP_COND_EXC_EQ:
3885 case OP_COND_EXC_NE_UN:
3886 case OP_COND_EXC_LT:
3887 case OP_COND_EXC_LT_UN:
3888 case OP_COND_EXC_GT:
3889 case OP_COND_EXC_GT_UN:
3890 case OP_COND_EXC_GE:
3891 case OP_COND_EXC_GE_UN:
3892 case OP_COND_EXC_LE:
3893 case OP_COND_EXC_LE_UN:
3894 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3896 case OP_COND_EXC_IEQ:
3897 case OP_COND_EXC_INE_UN:
3898 case OP_COND_EXC_ILT:
3899 case OP_COND_EXC_ILT_UN:
3900 case OP_COND_EXC_IGT:
3901 case OP_COND_EXC_IGT_UN:
3902 case OP_COND_EXC_IGE:
3903 case OP_COND_EXC_IGE_UN:
3904 case OP_COND_EXC_ILE:
3905 case OP_COND_EXC_ILE_UN:
3906 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3909 case OP_COND_EXC_IC:
3910 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3912 case OP_COND_EXC_OV:
3913 case OP_COND_EXC_IOV:
3914 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3916 case OP_COND_EXC_NC:
3917 case OP_COND_EXC_INC:
3918 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3920 case OP_COND_EXC_NO:
3921 case OP_COND_EXC_INO:
3922 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3934 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3937 /* floating point opcodes */
3940 if (cfg->compile_aot) {
3941 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3943 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3945 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3948 /* FIXME: we can optimize the imm load by dealing with part of
3949 * the displacement in LDFD (aligning to 512).
3951 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3952 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3956 if (cfg->compile_aot) {
3957 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3959 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3962 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3963 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3966 case OP_STORER8_MEMBASE_REG:
3967 /* This is generated by the local regalloc pass which runs after the lowering pass */
3968 if (!arm_is_fpimm8 (ins->inst_offset)) {
3969 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3970 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3971 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3973 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3976 case OP_LOADR8_MEMBASE:
3977 /* This is generated by the local regalloc pass which runs after the lowering pass */
3978 if (!arm_is_fpimm8 (ins->inst_offset)) {
3979 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3980 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3981 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3983 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3986 case OP_STORER4_MEMBASE_REG:
3987 g_assert (arm_is_fpimm8 (ins->inst_offset));
3988 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3990 case OP_LOADR4_MEMBASE:
3991 g_assert (arm_is_fpimm8 (ins->inst_offset));
3992 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3994 case OP_ICONV_TO_R_UN: {
3996 tmpreg = ins->dreg == 0? 1: 0;
3997 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3998 ARM_FLTD (code, ins->dreg, ins->sreg1);
3999 ARM_B_COND (code, ARMCOND_GE, 8);
4000 /* save the temp register */
4001 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4002 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
4003 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
4004 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
4005 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
4006 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4007 /* skip the constant pool */
4010 *(int*)code = 0x41f00000;
4015 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4016 * adfltd fdest, fdest, ftemp
4020 case OP_ICONV_TO_R4:
4021 ARM_FLTS (code, ins->dreg, ins->sreg1);
4023 case OP_ICONV_TO_R8:
4024 ARM_FLTD (code, ins->dreg, ins->sreg1);
4027 #elif defined(ARM_FPU_VFP)
4030 if (cfg->compile_aot) {
4031 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4033 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4035 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4038 /* FIXME: we can optimize the imm load by dealing with part of
4039 * the displacement in LDFD (aligning to 512).
4041 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4042 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4046 if (cfg->compile_aot) {
4047 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4049 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4051 ARM_CVTS (code, ins->dreg, ins->dreg);
4053 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4054 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4055 ARM_CVTS (code, ins->dreg, ins->dreg);
4058 case OP_STORER8_MEMBASE_REG:
4059 /* This is generated by the local regalloc pass which runs after the lowering pass */
4060 if (!arm_is_fpimm8 (ins->inst_offset)) {
4061 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4062 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4063 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4065 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4068 case OP_LOADR8_MEMBASE:
4069 /* This is generated by the local regalloc pass which runs after the lowering pass */
4070 if (!arm_is_fpimm8 (ins->inst_offset)) {
4071 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4072 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4073 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4075 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4078 case OP_STORER4_MEMBASE_REG:
4079 g_assert (arm_is_fpimm8 (ins->inst_offset));
4080 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4081 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4083 case OP_LOADR4_MEMBASE:
4084 g_assert (arm_is_fpimm8 (ins->inst_offset));
4085 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4086 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4088 case OP_ICONV_TO_R_UN: {
4089 g_assert_not_reached ();
4092 case OP_ICONV_TO_R4:
4093 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4094 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4095 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4097 case OP_ICONV_TO_R8:
4098 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4099 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4103 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4104 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4105 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4107 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4113 case OP_FCONV_TO_I1:
4114 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4116 case OP_FCONV_TO_U1:
4117 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4119 case OP_FCONV_TO_I2:
4120 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4122 case OP_FCONV_TO_U2:
4123 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4125 case OP_FCONV_TO_I4:
4127 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4129 case OP_FCONV_TO_U4:
4131 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4133 case OP_FCONV_TO_I8:
4134 case OP_FCONV_TO_U8:
4135 g_assert_not_reached ();
4136 /* Implemented as helper calls */
4138 case OP_LCONV_TO_R_UN:
4139 g_assert_not_reached ();
4140 /* Implemented as helper calls */
4142 case OP_LCONV_TO_OVF_I4_2: {
4143 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4145 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4148 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4149 high_bit_not_set = code;
4150 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4152 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4153 valid_negative = code;
4154 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4155 invalid_negative = code;
4156 ARM_B_COND (code, ARMCOND_AL, 0);
4158 arm_patch (high_bit_not_set, code);
4160 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4161 valid_positive = code;
4162 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4164 arm_patch (invalid_negative, code);
4165 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4167 arm_patch (valid_negative, code);
4168 arm_patch (valid_positive, code);
4170 if (ins->dreg != ins->sreg1)
4171 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4176 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4179 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4182 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4185 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4188 ARM_MNFD (code, ins->dreg, ins->sreg1);
4190 #elif defined(ARM_FPU_VFP)
4192 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4195 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4198 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4201 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4204 ARM_NEGD (code, ins->dreg, ins->sreg1);
4209 g_assert_not_reached ();
4213 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4214 #elif defined(ARM_FPU_VFP)
4215 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4221 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4222 #elif defined(ARM_FPU_VFP)
4223 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4226 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4227 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4231 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4232 #elif defined(ARM_FPU_VFP)
4233 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4236 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4237 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4241 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4242 #elif defined(ARM_FPU_VFP)
4243 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4246 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4247 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4248 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4253 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4254 #elif defined(ARM_FPU_VFP)
4255 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4258 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4259 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4264 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4265 #elif defined(ARM_FPU_VFP)
4266 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4269 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4270 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4271 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4273 /* ARM FPA flags table:
4274 * N Less than ARMCOND_MI
4275 * Z Equal ARMCOND_EQ
4276 * C Greater Than or Equal ARMCOND_CS
4277 * V Unordered ARMCOND_VS
4280 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4283 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4286 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4289 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4290 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4296 g_assert_not_reached ();
4300 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4302 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4303 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4304 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4308 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4309 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4314 if (ins->dreg != ins->sreg1)
4315 ARM_MVFD (code, ins->dreg, ins->sreg1);
4316 #elif defined(ARM_FPU_VFP)
4317 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4318 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4320 *(guint32*)code = 0xffffffff;
4322 *(guint32*)code = 0x7fefffff;
4324 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4326 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4327 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4329 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4331 ARM_CPYD (code, ins->dreg, ins->sreg1);
4336 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4337 g_assert_not_reached ();
4340 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4341 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4342 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4343 g_assert_not_reached ();
4349 last_offset = offset;
4352 cfg->code_len = code - cfg->native_code;
4355 #endif /* DISABLE_JIT */
4357 #ifdef HAVE_AEABI_READ_TP
4358 void __aeabi_read_tp (void);
4362 mono_arch_register_lowlevel_calls (void)
4364 /* The signature doesn't matter */
4365 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4366 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4368 #ifndef MONO_CROSS_COMPILE
4369 #ifdef HAVE_AEABI_READ_TP
4370 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4375 #define patch_lis_ori(ip,val) do {\
4376 guint16 *__lis_ori = (guint16*)(ip); \
4377 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4378 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4382 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4384 MonoJumpInfo *patch_info;
4385 gboolean compile_aot = !run_cctors;
4387 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4388 unsigned char *ip = patch_info->ip.i + code;
4389 const unsigned char *target;
4391 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4392 gpointer *jt = (gpointer*)(ip + 8);
4394 /* jt is the inlined jump table, 2 instructions after ip
4395 * In the normal case we store the absolute addresses,
4396 * otherwise the displacements.
4398 for (i = 0; i < patch_info->data.table->table_size; i++)
4399 jt [i] = code + (int)patch_info->data.table->table [i];
4402 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4405 switch (patch_info->type) {
4406 case MONO_PATCH_INFO_BB:
4407 case MONO_PATCH_INFO_LABEL:
4410 /* No need to patch these */
4415 switch (patch_info->type) {
4416 case MONO_PATCH_INFO_IP:
4417 g_assert_not_reached ();
4418 patch_lis_ori (ip, ip);
4420 case MONO_PATCH_INFO_METHOD_REL:
4421 g_assert_not_reached ();
4422 *((gpointer *)(ip)) = code + patch_info->data.offset;
4424 case MONO_PATCH_INFO_METHODCONST:
4425 case MONO_PATCH_INFO_CLASS:
4426 case MONO_PATCH_INFO_IMAGE:
4427 case MONO_PATCH_INFO_FIELD:
4428 case MONO_PATCH_INFO_VTABLE:
4429 case MONO_PATCH_INFO_IID:
4430 case MONO_PATCH_INFO_SFLDA:
4431 case MONO_PATCH_INFO_LDSTR:
4432 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4433 case MONO_PATCH_INFO_LDTOKEN:
4434 g_assert_not_reached ();
4435 /* from OP_AOTCONST : lis + ori */
4436 patch_lis_ori (ip, target);
4438 case MONO_PATCH_INFO_R4:
4439 case MONO_PATCH_INFO_R8:
4440 g_assert_not_reached ();
4441 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4443 case MONO_PATCH_INFO_EXC_NAME:
4444 g_assert_not_reached ();
4445 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4447 case MONO_PATCH_INFO_NONE:
4448 case MONO_PATCH_INFO_BB_OVF:
4449 case MONO_PATCH_INFO_EXC_OVF:
4450 /* everything is dealt with at epilog output time */
4455 arm_patch_general (domain, ip, target);
4462 * Stack frame layout:
4464 * ------------------- fp
4465 * MonoLMF structure or saved registers
4466 * -------------------
4468 * -------------------
4470 * -------------------
4471 * optional 8 bytes for tracing
4472 * -------------------
4473 * param area size is cfg->param_area
4474 * ------------------- sp
4477 mono_arch_emit_prolog (MonoCompile *cfg)
4479 MonoMethod *method = cfg->method;
4481 MonoMethodSignature *sig;
4483 int alloc_size, pos, max_offset, i, rot_amount;
4488 int prev_sp_offset, reg_offset;
4490 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4493 sig = mono_method_signature (method);
4494 cfg->code_size = 256 + sig->param_count * 20;
4495 code = cfg->native_code = g_malloc (cfg->code_size);
4497 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4499 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4501 alloc_size = cfg->stack_offset;
4504 if (!method->save_lmf) {
4505 /* We save SP by storing it into IP and saving IP */
4506 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4507 prev_sp_offset = 8; /* ip and lr */
4508 for (i = 0; i < 16; ++i) {
4509 if (cfg->used_int_regs & (1 << i))
4510 prev_sp_offset += 4;
4512 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4514 for (i = 0; i < 16; ++i) {
4515 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4516 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4521 ARM_PUSH (code, 0x5ff0);
4522 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4523 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4525 for (i = 0; i < 16; ++i) {
4526 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4527 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4531 pos += sizeof (MonoLMF) - prev_sp_offset;
4535 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4536 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4537 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4538 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4541 /* the stack used in the pushed regs */
4542 if (prev_sp_offset & 4)
4544 cfg->stack_usage = alloc_size;
4546 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4547 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4549 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4550 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4552 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4554 if (cfg->frame_reg != ARMREG_SP) {
4555 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4556 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4558 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4559 prev_sp_offset += alloc_size;
4561 /* compute max_offset in order to use short forward jumps
4562 * we could skip do it on arm because the immediate displacement
4563 * for jumps is large enough, it may be useful later for constant pools
4566 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4567 MonoInst *ins = bb->code;
4568 bb->max_offset = max_offset;
4570 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4573 MONO_BB_FOR_EACH_INS (bb, ins)
4574 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4577 /* store runtime generic context */
4578 if (cfg->rgctx_var) {
4579 MonoInst *ins = cfg->rgctx_var;
4581 g_assert (ins->opcode == OP_REGOFFSET);
4583 if (arm_is_imm12 (ins->inst_offset)) {
4584 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4586 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4587 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4591 /* load arguments allocated to register from the stack */
4594 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4596 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4597 ArgInfo *ainfo = &cinfo->ret;
4598 inst = cfg->vret_addr;
4599 g_assert (arm_is_imm12 (inst->inst_offset));
4600 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4603 if (sig->call_convention == MONO_CALL_VARARG) {
4604 ArgInfo *cookie = &cinfo->sig_cookie;
4606 /* Save the sig cookie address */
4607 g_assert (cookie->storage == RegTypeBase);
4609 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4610 g_assert (arm_is_imm12 (cfg->sig_cookie));
4611 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4612 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4615 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4616 ArgInfo *ainfo = cinfo->args + i;
4617 inst = cfg->args [pos];
4619 if (cfg->verbose_level > 2)
4620 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4621 if (inst->opcode == OP_REGVAR) {
4622 if (ainfo->storage == RegTypeGeneral)
4623 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4624 else if (ainfo->storage == RegTypeFP) {
4625 g_assert_not_reached ();
4626 } else if (ainfo->storage == RegTypeBase) {
4627 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4628 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4630 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4631 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4634 g_assert_not_reached ();
4636 if (cfg->verbose_level > 2)
4637 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4639 /* the argument should be put on the stack: FIXME handle size != word */
4640 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4641 switch (ainfo->size) {
4643 if (arm_is_imm12 (inst->inst_offset))
4644 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4646 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4647 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4651 if (arm_is_imm8 (inst->inst_offset)) {
4652 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4654 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4655 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4659 g_assert (arm_is_imm12 (inst->inst_offset));
4660 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4661 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4662 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4665 if (arm_is_imm12 (inst->inst_offset)) {
4666 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4668 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4669 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4673 } else if (ainfo->storage == RegTypeBaseGen) {
4674 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4675 g_assert (arm_is_imm12 (inst->inst_offset));
4676 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4677 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4678 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4679 } else if (ainfo->storage == RegTypeBase) {
4680 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4681 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4683 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4684 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4687 switch (ainfo->size) {
4689 if (arm_is_imm8 (inst->inst_offset)) {
4690 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4692 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4693 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4697 if (arm_is_imm8 (inst->inst_offset)) {
4698 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4700 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4701 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4705 if (arm_is_imm12 (inst->inst_offset)) {
4706 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4708 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4709 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4711 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4712 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4714 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4715 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4717 if (arm_is_imm12 (inst->inst_offset + 4)) {
4718 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4720 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4721 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4725 if (arm_is_imm12 (inst->inst_offset)) {
4726 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4728 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4729 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4733 } else if (ainfo->storage == RegTypeFP) {
4734 g_assert_not_reached ();
4735 } else if (ainfo->storage == RegTypeStructByVal) {
4736 int doffset = inst->inst_offset;
4740 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4741 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4742 if (arm_is_imm12 (doffset)) {
4743 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4745 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4746 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4748 soffset += sizeof (gpointer);
4749 doffset += sizeof (gpointer);
4751 if (ainfo->vtsize) {
4752 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4753 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4754 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4756 } else if (ainfo->storage == RegTypeStructByAddr) {
4757 g_assert_not_reached ();
4758 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4759 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4761 g_assert_not_reached ();
4766 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4767 if (cfg->compile_aot)
4768 /* AOT code is only used in the root domain */
4769 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4771 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4772 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4773 (gpointer)"mono_jit_thread_attach");
4774 code = emit_call_seq (cfg, code);
4777 if (method->save_lmf) {
4778 gboolean get_lmf_fast = FALSE;
4780 #ifdef HAVE_AEABI_READ_TP
4781 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4783 if (lmf_addr_tls_offset != -1) {
4784 get_lmf_fast = TRUE;
4786 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4787 (gpointer)"__aeabi_read_tp");
4788 code = emit_call_seq (cfg, code);
4790 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4791 get_lmf_fast = TRUE;
4794 if (!get_lmf_fast) {
4795 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4796 (gpointer)"mono_get_lmf_addr");
4797 code = emit_call_seq (cfg, code);
4799 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4800 /* lmf_offset is the offset from the previous stack pointer,
4801 * alloc_size is the total stack space allocated, so the offset
4802 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4803 * The pointer to the struct is put in r1 (new_lmf).
4804 * r2 is used as scratch
4805 * The callee-saved registers are already in the MonoLMF structure
4807 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4808 /* r0 is the result from mono_get_lmf_addr () */
4809 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4810 /* new_lmf->previous_lmf = *lmf_addr */
4811 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4812 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4813 /* *(lmf_addr) = r1 */
4814 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4815 /* Skip method (only needed for trampoline LMF frames) */
4816 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4817 /* save the current IP */
4818 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4819 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4823 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4825 if (cfg->arch.seq_point_info_var) {
4826 MonoInst *ins = cfg->arch.seq_point_info_var;
4828 /* Initialize the variable from a GOT slot */
4829 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4830 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4832 *(gpointer*)code = NULL;
4834 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4836 g_assert (ins->opcode == OP_REGOFFSET);
4838 if (arm_is_imm12 (ins->inst_offset)) {
4839 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4841 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4842 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4846 /* Initialize ss_trigger_page_var */
4848 MonoInst *info_var = cfg->arch.seq_point_info_var;
4849 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4850 int dreg = ARMREG_LR;
4853 g_assert (info_var->opcode == OP_REGOFFSET);
4854 g_assert (arm_is_imm12 (info_var->inst_offset));
4856 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4857 /* Load the trigger page addr */
4858 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4859 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4863 cfg->code_len = code - cfg->native_code;
4864 g_assert (cfg->code_len < cfg->code_size);
4871 mono_arch_emit_epilog (MonoCompile *cfg)
4873 MonoMethod *method = cfg->method;
4874 int pos, i, rot_amount;
4875 int max_epilog_size = 16 + 20*4;
4879 if (cfg->method->save_lmf)
4880 max_epilog_size += 128;
4882 if (mono_jit_trace_calls != NULL)
4883 max_epilog_size += 50;
4885 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4886 max_epilog_size += 50;
4888 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4889 cfg->code_size *= 2;
4890 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4891 mono_jit_stats.code_reallocs++;
4895 * Keep in sync with OP_JMP
4897 code = cfg->native_code + cfg->code_len;
4899 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4900 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4904 /* Load returned vtypes into registers if needed */
4905 cinfo = cfg->arch.cinfo;
4906 if (cinfo->ret.storage == RegTypeStructByVal) {
4907 MonoInst *ins = cfg->ret;
4909 if (arm_is_imm12 (ins->inst_offset)) {
4910 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4912 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4913 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4917 if (method->save_lmf) {
4919 /* all but r0-r3, sp and pc */
4920 pos += sizeof (MonoLMF) - (4 * 10);
4922 /* r2 contains the pointer to the current LMF */
4923 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4924 /* ip = previous_lmf */
4925 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4927 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4928 /* *(lmf_addr) = previous_lmf */
4929 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4930 /* FIXME: speedup: there is no actual need to restore the registers if
4931 * we didn't actually change them (idea from Zoltan).
4934 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4935 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4936 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4938 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4939 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4941 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4942 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4944 /* FIXME: add v4 thumb interworking support */
4945 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4948 cfg->code_len = code - cfg->native_code;
4950 g_assert (cfg->code_len < cfg->code_size);
4954 /* remove once throw_exception_by_name is eliminated */
4956 exception_id_by_name (const char *name)
4958 if (strcmp (name, "IndexOutOfRangeException") == 0)
4959 return MONO_EXC_INDEX_OUT_OF_RANGE;
4960 if (strcmp (name, "OverflowException") == 0)
4961 return MONO_EXC_OVERFLOW;
4962 if (strcmp (name, "ArithmeticException") == 0)
4963 return MONO_EXC_ARITHMETIC;
4964 if (strcmp (name, "DivideByZeroException") == 0)
4965 return MONO_EXC_DIVIDE_BY_ZERO;
4966 if (strcmp (name, "InvalidCastException") == 0)
4967 return MONO_EXC_INVALID_CAST;
4968 if (strcmp (name, "NullReferenceException") == 0)
4969 return MONO_EXC_NULL_REF;
4970 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4971 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4972 g_error ("Unknown intrinsic exception %s\n", name);
4977 mono_arch_emit_exceptions (MonoCompile *cfg)
4979 MonoJumpInfo *patch_info;
4982 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4983 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4984 int max_epilog_size = 50;
4986 /* count the number of exception infos */
4989 * make sure we have enough space for exceptions
4991 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4992 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4993 i = exception_id_by_name (patch_info->data.target);
4994 if (!exc_throw_found [i]) {
4995 max_epilog_size += 32;
4996 exc_throw_found [i] = TRUE;
5001 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5002 cfg->code_size *= 2;
5003 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5004 mono_jit_stats.code_reallocs++;
5007 code = cfg->native_code + cfg->code_len;
5009 /* add code to raise exceptions */
5010 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5011 switch (patch_info->type) {
5012 case MONO_PATCH_INFO_EXC: {
5013 MonoClass *exc_class;
5014 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5016 i = exception_id_by_name (patch_info->data.target);
5017 if (exc_throw_pos [i]) {
5018 arm_patch (ip, exc_throw_pos [i]);
5019 patch_info->type = MONO_PATCH_INFO_NONE;
5022 exc_throw_pos [i] = code;
5024 arm_patch (ip, code);
5026 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5027 g_assert (exc_class);
5029 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5030 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5031 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5032 patch_info->data.name = "mono_arch_throw_corlib_exception";
5033 patch_info->ip.i = code - cfg->native_code;
5035 *(guint32*)(gpointer)code = exc_class->type_token;
5045 cfg->code_len = code - cfg->native_code;
5047 g_assert (cfg->code_len < cfg->code_size);
5051 #endif /* #ifndef DISABLE_JIT */
5053 static gboolean tls_offset_inited = FALSE;
5056 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5058 if (!tls_offset_inited) {
5059 tls_offset_inited = TRUE;
5061 lmf_tls_offset = mono_get_lmf_tls_offset ();
5062 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5067 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5072 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5079 mono_arch_print_tree (MonoInst *tree, int arity)
5085 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5087 return mono_get_domain_intrinsic (cfg);
5091 mono_arch_get_patch_offset (guint8 *code)
5098 mono_arch_flush_register_windows (void)
5102 #ifdef MONO_ARCH_HAVE_IMT
5107 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5109 if (cfg->compile_aot) {
5110 int method_reg = mono_alloc_ireg (cfg);
5113 call->dynamic_imt_arg = TRUE;
5116 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5118 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5119 ins->dreg = method_reg;
5120 ins->inst_p0 = call->method;
5121 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5122 MONO_ADD_INS (cfg->cbb, ins);
5124 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5126 } else if (cfg->generic_context) {
5128 /* Always pass in a register for simplicity */
5129 call->dynamic_imt_arg = TRUE;
5131 cfg->uses_rgctx_reg = TRUE;
5134 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5137 int method_reg = mono_alloc_preg (cfg);
5139 MONO_INST_NEW (cfg, ins, OP_PCONST);
5140 ins->inst_p0 = call->method;
5141 ins->dreg = method_reg;
5142 MONO_ADD_INS (cfg->cbb, ins);
5144 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5149 #endif /* DISABLE_JIT */
5152 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5154 guint32 *code_ptr = (guint32*)code;
5156 /* The IMT value is stored in the code stream right after the LDC instruction. */
5157 if (!IS_LDR_PC (code_ptr [0])) {
5158 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5159 g_assert (IS_LDR_PC (code_ptr [0]));
5161 if (code_ptr [1] == 0)
5162 /* This is AOTed code, the IMT method is in V5 */
5163 return (MonoMethod*)regs [ARMREG_V5];
5165 return (MonoMethod*) code_ptr [1];
5169 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5171 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5174 #define ENABLE_WRONG_METHOD_CHECK 0
5175 #define BASE_SIZE (6 * 4)
5176 #define BSEARCH_ENTRY_SIZE (4 * 4)
5177 #define CMP_SIZE (3 * 4)
5178 #define BRANCH_SIZE (1 * 4)
5179 #define CALL_SIZE (2 * 4)
5180 #define WMC_SIZE (5 * 4)
5181 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5184 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5186 guint32 delta = DISTANCE (target, code);
5188 g_assert (delta >= 0 && delta <= 0xFFF);
5189 *target = *target | delta;
5195 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5196 gpointer fail_tramp)
5198 int size, i, extra_space = 0;
5199 arminstr_t *code, *start, *vtable_target = NULL;
5200 gboolean large_offsets = FALSE;
5201 guint32 **constant_pool_starts;
5204 constant_pool_starts = g_new0 (guint32*, count);
5207 * We might be called with a fail_tramp from the IMT builder code even if
5208 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5210 //g_assert (!fail_tramp);
5212 for (i = 0; i < count; ++i) {
5213 MonoIMTCheckItem *item = imt_entries [i];
5214 if (item->is_equals) {
5215 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5216 item->chunk_size += 32;
5217 large_offsets = TRUE;
5220 if (item->check_target_idx) {
5221 if (!item->compare_done)
5222 item->chunk_size += CMP_SIZE;
5223 item->chunk_size += BRANCH_SIZE;
5225 #if ENABLE_WRONG_METHOD_CHECK
5226 item->chunk_size += WMC_SIZE;
5229 item->chunk_size += CALL_SIZE;
5231 item->chunk_size += BSEARCH_ENTRY_SIZE;
5232 imt_entries [item->check_target_idx]->compare_done = TRUE;
5234 size += item->chunk_size;
5238 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5240 start = code = mono_domain_code_reserve (domain, size);
5243 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5244 for (i = 0; i < count; ++i) {
5245 MonoIMTCheckItem *item = imt_entries [i];
5246 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5251 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5253 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5254 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5255 vtable_target = code;
5256 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5258 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5259 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5260 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5262 for (i = 0; i < count; ++i) {
5263 MonoIMTCheckItem *item = imt_entries [i];
5264 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5265 gint32 vtable_offset;
5267 item->code_target = (guint8*)code;
5269 if (item->is_equals) {
5270 if (item->check_target_idx) {
5271 if (!item->compare_done) {
5273 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5274 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5276 item->jmp_code = (guint8*)code;
5277 ARM_B_COND (code, ARMCOND_NE, 0);
5279 /*Enable the commented code to assert on wrong method*/
5280 #if ENABLE_WRONG_METHOD_CHECK
5282 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5283 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5284 ARM_B_COND (code, ARMCOND_NE, 1);
5290 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5291 if (!arm_is_imm12 (vtable_offset)) {
5293 * We need to branch to a computed address but we don't have
5294 * a free register to store it, since IP must contain the
5295 * vtable address. So we push the two values to the stack, and
5296 * load them both using LDM.
5298 /* Compute target address */
5299 vtable_offset_ins = code;
5300 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5301 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5302 /* Save it to the fourth slot */
5303 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5304 /* Restore registers and branch */
5305 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5307 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5309 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5311 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5312 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5316 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5318 /*must emit after unconditional branch*/
5319 if (vtable_target) {
5320 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5321 item->chunk_size += 4;
5322 vtable_target = NULL;
5325 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5326 constant_pool_starts [i] = code;
5328 code += extra_space;
5332 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5333 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5335 item->jmp_code = (guint8*)code;
5336 ARM_B_COND (code, ARMCOND_GE, 0);
5341 for (i = 0; i < count; ++i) {
5342 MonoIMTCheckItem *item = imt_entries [i];
5343 if (item->jmp_code) {
5344 if (item->check_target_idx)
5345 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5347 if (i > 0 && item->is_equals) {
5349 arminstr_t *space_start = constant_pool_starts [i];
5350 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5351 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5358 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5359 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5364 g_free (constant_pool_starts);
5366 mono_arch_flush_icache ((guint8*)start, size);
5367 mono_stats.imt_thunks_size += code - start;
5369 g_assert (DISTANCE (start, code) <= size);
5376 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5378 if (reg == ARMREG_SP)
5379 return (gpointer)ctx->esp;
5381 return (gpointer)ctx->regs [reg];
5385 * mono_arch_set_breakpoint:
5387 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5388 * The location should contain code emitted by OP_SEQ_POINT.
5391 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5394 guint32 native_offset = ip - (guint8*)ji->code_start;
5397 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5399 g_assert (native_offset % 4 == 0);
5400 g_assert (info->bp_addrs [native_offset / 4] == 0);
5401 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5403 int dreg = ARMREG_LR;
5405 /* Read from another trigger page */
5406 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5408 *(int*)code = (int)bp_trigger_page;
5410 ARM_LDR_IMM (code, dreg, dreg, 0);
5412 mono_arch_flush_icache (code - 16, 16);
5415 /* This is currently implemented by emitting an SWI instruction, which
5416 * qemu/linux seems to convert to a SIGILL.
5418 *(int*)code = (0xef << 24) | 8;
5420 mono_arch_flush_icache (code - 4, 4);
5426 * mono_arch_clear_breakpoint:
5428 * Clear the breakpoint at IP.
5431 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5437 guint32 native_offset = ip - (guint8*)ji->code_start;
5438 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5440 g_assert (native_offset % 4 == 0);
5441 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5442 info->bp_addrs [native_offset / 4] = 0;
5444 for (i = 0; i < 4; ++i)
5447 mono_arch_flush_icache (ip, code - ip);
5452 * mono_arch_start_single_stepping:
5454 * Start single stepping.
5457 mono_arch_start_single_stepping (void)
5459 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5463 * mono_arch_stop_single_stepping:
5465 * Stop single stepping.
5468 mono_arch_stop_single_stepping (void)
5470 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5474 #define DBG_SIGNAL SIGBUS
5476 #define DBG_SIGNAL SIGSEGV
5480 * mono_arch_is_single_step_event:
5482 * Return whenever the machine state in SIGCTX corresponds to a single
5486 mono_arch_is_single_step_event (void *info, void *sigctx)
5488 siginfo_t *sinfo = info;
5490 /* Sometimes the address is off by 4 */
5491 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5498 * mono_arch_is_breakpoint_event:
5500 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5503 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5505 siginfo_t *sinfo = info;
5507 if (sinfo->si_signo == DBG_SIGNAL) {
5508 /* Sometimes the address is off by 4 */
5509 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5519 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5521 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5532 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5534 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5542 * mono_arch_skip_breakpoint:
5544 * See mini-amd64.c for docs.
5547 mono_arch_skip_breakpoint (MonoContext *ctx)
5549 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5553 * mono_arch_skip_single_step:
5555 * See mini-amd64.c for docs.
5558 mono_arch_skip_single_step (MonoContext *ctx)
5560 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5564 * mono_arch_get_seq_point_info:
5566 * See mini-amd64.c for docs.
5569 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5574 // FIXME: Add a free function
5576 mono_domain_lock (domain);
5577 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5579 mono_domain_unlock (domain);
5582 ji = mono_jit_info_table_find (domain, (char*)code);
5585 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5587 info->ss_trigger_page = ss_trigger_page;
5588 info->bp_trigger_page = bp_trigger_page;
5590 mono_domain_lock (domain);
5591 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5593 mono_domain_unlock (domain);