2 * mini-arm.c: ARM backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
8 * (C) 2003 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/debug-helpers.h>
15 #include <mono/utils/mono-mmap.h>
22 #include "mono/arch/arm/arm-fpa-codegen.h"
23 #elif defined(ARM_FPU_VFP)
24 #include "mono/arch/arm/arm-vfp-codegen.h"
27 #if defined(__ARM_EABI__) && defined(__linux__) && !defined(PLATFORM_ANDROID)
28 #define HAVE_AEABI_READ_TP 1
31 static gint lmf_tls_offset = -1;
32 static gint lmf_addr_tls_offset = -1;
34 /* This mutex protects architecture specific caches */
35 #define mono_mini_arch_lock() EnterCriticalSection (&mini_arch_mutex)
36 #define mono_mini_arch_unlock() LeaveCriticalSection (&mini_arch_mutex)
37 static CRITICAL_SECTION mini_arch_mutex;
39 static int v5_supported = 0;
40 static int v7_supported = 0;
41 static int thumb_supported = 0;
44 * The code generated for sequence points reads from this location, which is
45 * made read-only when single stepping is enabled.
47 static gpointer ss_trigger_page;
49 /* Enabled breakpoints read from this trigger page */
50 static gpointer bp_trigger_page;
52 /* Structure used by the sequence points in AOTed code */
54 gpointer ss_trigger_page;
55 gpointer bp_trigger_page;
56 guint8* bp_addrs [MONO_ZERO_LEN_ARRAY];
61 * floating point support: on ARM it is a mess, there are at least 3
62 * different setups, each of which binary incompat with the other.
63 * 1) FPA: old and ugly, but unfortunately what current distros use
64 * the double binary format has the two words swapped. 8 double registers.
65 * Implemented usually by kernel emulation.
66 * 2) softfloat: the compiler emulates all the fp ops. Usually uses the
67 * ugly swapped double format (I guess a softfloat-vfp exists, too, though).
68 * 3) VFP: the new and actually sensible and useful FP support. Implemented
69 * in HW or kernel-emulated, requires new tools. I think this is what symbian uses.
71 * The plan is to write the FPA support first. softfloat can be tested in a chroot.
73 int mono_exc_esp_offset = 0;
75 #define arm_is_imm12(v) ((v) > -4096 && (v) < 4096)
76 #define arm_is_imm8(v) ((v) > -256 && (v) < 256)
77 #define arm_is_fpimm8(v) ((v) >= -1020 && (v) <= 1020)
79 #define LDR_MASK ((0xf << ARMCOND_SHIFT) | (3 << 26) | (1 << 22) | (1 << 20) | (15 << 12))
80 #define LDR_PC_VAL ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 26) | (0 << 22) | (1 << 20) | (15 << 12))
81 #define IS_LDR_PC(val) (((val) & LDR_MASK) == LDR_PC_VAL)
83 #define ADD_LR_PC_4 ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 25) | (1 << 23) | (ARMREG_PC << 16) | (ARMREG_LR << 12) | 4)
84 #define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
87 /* A variant of ARM_LDR_IMM which can handle large offsets */
88 #define ARM_LDR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
89 if (arm_is_imm12 ((offset))) { \
90 ARM_LDR_IMM (code, (dreg), (basereg), (offset)); \
92 g_assert ((scratch_reg) != (basereg)); \
93 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
94 ARM_LDR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
98 #define ARM_STR_IMM_GENERAL(code, dreg, basereg, offset, scratch_reg) do { \
99 if (arm_is_imm12 ((offset))) { \
100 ARM_STR_IMM (code, (dreg), (basereg), (offset)); \
102 g_assert ((scratch_reg) != (basereg)); \
103 code = mono_arm_emit_load_imm (code, (scratch_reg), (offset)); \
104 ARM_STR_REG_REG (code, (dreg), (basereg), (scratch_reg)); \
109 mono_arch_regname (int reg)
111 static const char * rnames[] = {
112 "arm_r0", "arm_r1", "arm_r2", "arm_r3", "arm_v1",
113 "arm_v2", "arm_v3", "arm_v4", "arm_v5", "arm_v6",
114 "arm_v7", "arm_fp", "arm_ip", "arm_sp", "arm_lr",
117 if (reg >= 0 && reg < 16)
123 mono_arch_fregname (int reg)
125 static const char * rnames[] = {
126 "arm_f0", "arm_f1", "arm_f2", "arm_f3", "arm_f4",
127 "arm_f5", "arm_f6", "arm_f7", "arm_f8", "arm_f9",
128 "arm_f10", "arm_f11", "arm_f12", "arm_f13", "arm_f14",
129 "arm_f15", "arm_f16", "arm_f17", "arm_f18", "arm_f19",
130 "arm_f20", "arm_f21", "arm_f22", "arm_f23", "arm_f24",
131 "arm_f25", "arm_f26", "arm_f27", "arm_f28", "arm_f29",
134 if (reg >= 0 && reg < 32)
142 emit_big_add (guint8 *code, int dreg, int sreg, int imm)
144 int imm8, rot_amount;
145 if ((imm8 = mono_arm_is_rotated_imm8 (imm, &rot_amount)) >= 0) {
146 ARM_ADD_REG_IMM (code, dreg, sreg, imm8, rot_amount);
149 g_assert (dreg != sreg);
150 code = mono_arm_emit_load_imm (code, dreg, imm);
151 ARM_ADD_REG_REG (code, dreg, dreg, sreg);
156 emit_memcpy (guint8 *code, int size, int dreg, int doffset, int sreg, int soffset)
158 /* we can use r0-r3, since this is called only for incoming args on the stack */
159 if (size > sizeof (gpointer) * 4) {
161 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
162 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
163 start_loop = code = mono_arm_emit_load_imm (code, ARMREG_R2, size);
164 ARM_LDR_IMM (code, ARMREG_R3, ARMREG_R0, 0);
165 ARM_STR_IMM (code, ARMREG_R3, ARMREG_R1, 0);
166 ARM_ADD_REG_IMM8 (code, ARMREG_R0, ARMREG_R0, 4);
167 ARM_ADD_REG_IMM8 (code, ARMREG_R1, ARMREG_R1, 4);
168 ARM_SUBS_REG_IMM8 (code, ARMREG_R2, ARMREG_R2, 4);
169 ARM_B_COND (code, ARMCOND_NE, 0);
170 arm_patch (code - 4, start_loop);
173 if (arm_is_imm12 (doffset) && arm_is_imm12 (doffset + size) &&
174 arm_is_imm12 (soffset) && arm_is_imm12 (soffset + size)) {
176 ARM_LDR_IMM (code, ARMREG_LR, sreg, soffset);
177 ARM_STR_IMM (code, ARMREG_LR, dreg, doffset);
183 code = emit_big_add (code, ARMREG_R0, sreg, soffset);
184 code = emit_big_add (code, ARMREG_R1, dreg, doffset);
185 doffset = soffset = 0;
187 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R0, soffset);
188 ARM_STR_IMM (code, ARMREG_LR, ARMREG_R1, doffset);
194 g_assert (size == 0);
199 emit_call_reg (guint8 *code, int reg)
202 ARM_BLX_REG (code, reg);
204 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
208 ARM_MOV_REG_REG (code, ARMREG_PC, reg);
214 emit_call_seq (MonoCompile *cfg, guint8 *code)
216 if (cfg->method->dynamic) {
217 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
219 *(gpointer*)code = NULL;
221 code = emit_call_reg (code, ARMREG_IP);
229 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
231 switch (ins->opcode) {
234 case OP_FCALL_MEMBASE:
236 if (ins->dreg != ARM_FPA_F0)
237 ARM_MVFD (code, ins->dreg, ARM_FPA_F0);
238 #elif defined(ARM_FPU_VFP)
239 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
240 ARM_FMSR (code, ins->dreg, ARMREG_R0);
241 ARM_CVTS (code, ins->dreg, ins->dreg);
243 ARM_FMDRR (code, ARMREG_R0, ARMREG_R1, ins->dreg);
252 #endif /* #ifndef DISABLE_JIT */
255 * mono_arch_get_argument_info:
256 * @csig: a method signature
257 * @param_count: the number of parameters to consider
258 * @arg_info: an array to store the result infos
260 * Gathers information on parameters such as size, alignment and
261 * padding. arg_info should be large enought to hold param_count + 1 entries.
263 * Returns the size of the activation frame.
266 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
268 int k, frame_size = 0;
269 guint32 size, align, pad;
272 if (MONO_TYPE_ISSTRUCT (csig->ret)) {
273 frame_size += sizeof (gpointer);
277 arg_info [0].offset = offset;
280 frame_size += sizeof (gpointer);
284 arg_info [0].size = frame_size;
286 for (k = 0; k < param_count; k++) {
287 size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
289 /* ignore alignment for now */
292 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
293 arg_info [k].pad = pad;
295 arg_info [k + 1].pad = 0;
296 arg_info [k + 1].size = size;
298 arg_info [k + 1].offset = offset;
302 align = MONO_ARCH_FRAME_ALIGNMENT;
303 frame_size += pad = (align - (frame_size & (align - 1))) & (align - 1);
304 arg_info [k].pad = pad;
310 decode_vcall_slot_from_ldr (guint32 ldr, mgreg_t *regs, int *displacement)
314 reg = (ldr >> 16 ) & 0xf;
315 offset = ldr & 0xfff;
316 if (((ldr >> 23) & 1) == 0) /*U bit, 0 means negative and 1 positive*/
318 /*g_print ("found vcall at r%d + %d for code at %p 0x%x\n", reg, offset, code, *code);*/
319 o = (gpointer)regs [reg];
321 *displacement = offset;
326 mono_arch_get_vcall_slot (guint8 *code_ptr, mgreg_t *regs, int *displacement)
328 guint32* code = (guint32*)code_ptr;
330 /* Locate the address of the method-specific trampoline. The call using
331 the vtable slot that took the processing flow to 'arch_create_jit_trampoline'
332 looks something like this:
341 The call sequence could be also:
344 function pointer literal
348 Note that on ARM5+ we can use one instruction instead of the last two.
349 Therefore, we need to locate the 'ldr rA' instruction to know which
350 register was used to hold the method addrs.
353 /* This is the instruction after "ldc pc, xxx", "mov pc, xxx" or "bl xxx" could be either the IMT value or some other instruction*/
356 /* Three possible code sequences can happen here:
360 * ldr pc, [rX - #offset]
366 * ldr pc, [rX - #offset]
368 * direct branch with bl:
372 * direct branch with mov:
376 * We only need to identify interface and virtual calls, the others can be ignored.
379 if (IS_LDR_PC (code [-1]) && code [-2] == ADD_LR_PC_4)
380 return decode_vcall_slot_from_ldr (code [-1], regs, displacement);
382 if (IS_LDR_PC (code [0]) && code [-1] == MOV_LR_PC)
383 return decode_vcall_slot_from_ldr (code [0], regs, displacement);
388 #define MAX_ARCH_DELEGATE_PARAMS 3
391 get_delegate_invoke_impl (gboolean has_target, gboolean param_count, guint32 *code_size)
393 guint8 *code, *start;
396 start = code = mono_global_codeman_reserve (12);
398 /* Replace the this argument with the target */
399 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
400 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, target));
401 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
403 g_assert ((code - start) <= 12);
405 mono_arch_flush_icache (start, 12);
409 size = 8 + param_count * 4;
410 start = code = mono_global_codeman_reserve (size);
412 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R0, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
413 /* slide down the arguments */
414 for (i = 0; i < param_count; ++i) {
415 ARM_MOV_REG_REG (code, (ARMREG_R0 + i), (ARMREG_R0 + i + 1));
417 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
419 g_assert ((code - start) <= size);
421 mono_arch_flush_icache (start, size);
425 *code_size = code - start;
431 * mono_arch_get_delegate_invoke_impls:
433 * Return a list of MonoAotTrampInfo structures for the delegate invoke impl
437 mono_arch_get_delegate_invoke_impls (void)
444 code = get_delegate_invoke_impl (TRUE, 0, &code_len);
445 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
447 for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
448 code = get_delegate_invoke_impl (FALSE, i, &code_len);
449 res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
456 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
458 guint8 *code, *start;
460 /* FIXME: Support more cases */
461 if (MONO_TYPE_ISSTRUCT (sig->ret))
465 static guint8* cached = NULL;
466 mono_mini_arch_lock ();
468 mono_mini_arch_unlock ();
473 start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
475 start = get_delegate_invoke_impl (TRUE, 0, NULL);
477 mono_mini_arch_unlock ();
480 static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
483 if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
485 for (i = 0; i < sig->param_count; ++i)
486 if (!mono_is_regsize_var (sig->params [i]))
489 mono_mini_arch_lock ();
490 code = cache [sig->param_count];
492 mono_mini_arch_unlock ();
497 char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
498 start = mono_aot_get_named_code (name);
501 start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
503 cache [sig->param_count] = start;
504 mono_mini_arch_unlock ();
512 mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
514 /* FIXME: handle returning a struct */
515 if (MONO_TYPE_ISSTRUCT (sig->ret))
516 return (gpointer)regs [ARMREG_R1];
517 return (gpointer)regs [ARMREG_R0];
521 * Initialize the cpu to execute managed code.
524 mono_arch_cpu_init (void)
529 * Initialize architecture specific code.
532 mono_arch_init (void)
534 InitializeCriticalSection (&mini_arch_mutex);
536 ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
537 bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
538 mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
542 * Cleanup architecture specific code.
545 mono_arch_cleanup (void)
550 * This function returns the optimizations supported on this cpu.
553 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
556 const char *cpu_arch = getenv ("MONO_CPU_ARCH");
557 if (cpu_arch != NULL) {
558 thumb_supported = strstr (cpu_arch, "thumb") != NULL;
559 if (strncmp (cpu_arch, "armv", 4) == 0) {
560 v5_supported = cpu_arch [4] >= '5';
561 v7_supported = cpu_arch [4] >= '7';
565 thumb_supported = TRUE;
570 FILE *file = fopen ("/proc/cpuinfo", "r");
572 while ((line = fgets (buf, 512, file))) {
573 if (strncmp (line, "Processor", 9) == 0) {
574 char *ver = strstr (line, "(v");
575 if (ver && (ver [2] == '5' || ver [2] == '6' || ver [2] == '7'))
577 if (ver && (ver [2] == '7'))
581 if (strncmp (line, "Features", 8) == 0) {
582 char *th = strstr (line, "thumb");
584 thumb_supported = TRUE;
592 /*printf ("features: v5: %d, thumb: %d\n", v5_supported, thumb_supported);*/
597 /* no arm-specific optimizations yet */
605 is_regsize_var (MonoType *t) {
608 t = mini_type_get_underlying_type (NULL, t);
615 case MONO_TYPE_FNPTR:
617 case MONO_TYPE_OBJECT:
618 case MONO_TYPE_STRING:
619 case MONO_TYPE_CLASS:
620 case MONO_TYPE_SZARRAY:
621 case MONO_TYPE_ARRAY:
623 case MONO_TYPE_GENERICINST:
624 if (!mono_type_generic_inst_is_valuetype (t))
627 case MONO_TYPE_VALUETYPE:
634 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
639 for (i = 0; i < cfg->num_varinfo; i++) {
640 MonoInst *ins = cfg->varinfo [i];
641 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
644 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
647 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode != OP_LOCAL && ins->opcode != OP_ARG))
650 /* we can only allocate 32 bit values */
651 if (is_regsize_var (ins->inst_vtype)) {
652 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
653 g_assert (i == vmv->idx);
654 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
661 #define USE_EXTRA_TEMPS 0
664 mono_arch_get_global_int_regs (MonoCompile *cfg)
669 * FIXME: Interface calls might go through a static rgctx trampoline which
670 * sets V5, but it doesn't save it, so we need to save it ourselves, and
673 if (cfg->flags & MONO_CFG_HAS_CALLS)
674 cfg->uses_rgctx_reg = TRUE;
676 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V1));
677 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V2));
678 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V3));
679 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V4));
680 if (!(cfg->compile_aot || cfg->uses_rgctx_reg))
681 /* V5 is reserved for passing the vtable/rgctx/IMT method */
682 regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V5));
683 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V6));*/
684 /*regs = g_list_prepend (regs, GUINT_TO_POINTER (ARMREG_V7));*/
690 * mono_arch_regalloc_cost:
692 * Return the cost, in number of memory references, of the action of
693 * allocating the variable VMV into a register during global register
697 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
703 #endif /* #ifndef DISABLE_JIT */
705 #ifndef __GNUC_PREREQ
706 #define __GNUC_PREREQ(maj, min) (0)
710 mono_arch_flush_icache (guint8 *code, gint size)
713 sys_icache_invalidate (code, size);
714 #elif __GNUC_PREREQ(4, 1)
715 __clear_cache (code, code + size);
716 #elif defined(PLATFORM_ANDROID)
717 const int syscall = 0xf0002;
725 : "r" (code), "r" (code + size), "r" (syscall)
726 : "r0", "r1", "r7", "r2"
729 __asm __volatile ("mov r0, %0\n"
732 "swi 0x9f0002 @ sys_cacheflush"
734 : "r" (code), "r" (code + size), "r" (0)
735 : "r0", "r1", "r3" );
752 guint16 vtsize; /* in param area */
755 guint8 size : 4; /* 1, 2, 4, 8, or regs used by RegTypeStructByVal */
762 gboolean vtype_retaddr;
771 /*#define __alignof__(a) sizeof(a)*/
772 #define __alignof__(type) G_STRUCT_OFFSET(struct { char c; type x; }, x)
778 add_general (guint *gr, guint *stack_size, ArgInfo *ainfo, gboolean simple)
781 if (*gr > ARMREG_R3) {
782 ainfo->offset = *stack_size;
783 ainfo->reg = ARMREG_SP; /* in the caller */
784 ainfo->storage = RegTypeBase;
787 ainfo->storage = RegTypeGeneral;
791 #if defined(__APPLE__) && defined(MONO_CROSS_COMPILE)
794 int i8_align = __alignof__ (gint64);
798 gboolean split = i8_align == 4;
800 gboolean split = TRUE;
803 if (*gr == ARMREG_R3 && split) {
804 /* first word in r3 and the second on the stack */
805 ainfo->offset = *stack_size;
806 ainfo->reg = ARMREG_SP; /* in the caller */
807 ainfo->storage = RegTypeBaseGen;
809 } else if (*gr >= ARMREG_R3) {
811 /* darwin aligns longs to 4 byte only */
817 ainfo->offset = *stack_size;
818 ainfo->reg = ARMREG_SP; /* in the caller */
819 ainfo->storage = RegTypeBase;
823 if (i8_align == 8 && ((*gr) & 1))
826 ainfo->storage = RegTypeIRegPair;
835 get_call_info (MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
838 int n = sig->hasthis + sig->param_count;
839 MonoType *simpletype;
840 guint32 stack_size = 0;
844 cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * n));
846 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
851 /* FIXME: handle returning a struct */
852 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
855 if (is_pinvoke && mono_class_native_size (mono_class_from_mono_type (sig->ret), &align) <= sizeof (gpointer)) {
856 cinfo->ret.storage = RegTypeStructByVal;
858 add_general (&gr, &stack_size, &cinfo->ret, TRUE);
859 cinfo->struct_ret = ARMREG_R0;
860 cinfo->vtype_retaddr = TRUE;
866 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
869 DEBUG(printf("params: %d\n", sig->param_count));
870 for (i = 0; i < sig->param_count; ++i) {
871 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
872 /* Prevent implicit arguments and sig_cookie from
873 being passed in registers */
875 /* Emit the signature cookie just before the implicit arguments */
876 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
878 DEBUG(printf("param %d: ", i));
879 if (sig->params [i]->byref) {
880 DEBUG(printf("byref\n"));
881 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
885 simpletype = mini_type_get_underlying_type (NULL, sig->params [i]);
886 switch (simpletype->type) {
887 case MONO_TYPE_BOOLEAN:
890 cinfo->args [n].size = 1;
891 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
897 cinfo->args [n].size = 2;
898 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
903 cinfo->args [n].size = 4;
904 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
910 case MONO_TYPE_FNPTR:
911 case MONO_TYPE_CLASS:
912 case MONO_TYPE_OBJECT:
913 case MONO_TYPE_STRING:
914 case MONO_TYPE_SZARRAY:
915 case MONO_TYPE_ARRAY:
917 cinfo->args [n].size = sizeof (gpointer);
918 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
921 case MONO_TYPE_GENERICINST:
922 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
923 cinfo->args [n].size = sizeof (gpointer);
924 add_general (&gr, &stack_size, cinfo->args + n, TRUE);
929 case MONO_TYPE_TYPEDBYREF:
930 case MONO_TYPE_VALUETYPE: {
936 if (simpletype->type == MONO_TYPE_TYPEDBYREF) {
937 size = sizeof (MonoTypedRef);
938 align = sizeof (gpointer);
940 MonoClass *klass = mono_class_from_mono_type (sig->params [i]);
942 size = mono_class_native_size (klass, &align);
944 size = mono_class_value_size (klass, &align);
946 DEBUG(printf ("load %d bytes struct\n",
947 mono_class_native_size (sig->params [i]->data.klass, NULL)));
950 align_size += (sizeof (gpointer) - 1);
951 align_size &= ~(sizeof (gpointer) - 1);
952 nwords = (align_size + sizeof (gpointer) -1 ) / sizeof (gpointer);
953 cinfo->args [n].storage = RegTypeStructByVal;
954 /* FIXME: align stack_size if needed */
956 if (align >= 8 && (gr & 1))
959 if (gr > ARMREG_R3) {
960 cinfo->args [n].size = 0;
961 cinfo->args [n].vtsize = nwords;
963 int rest = ARMREG_R3 - gr + 1;
964 int n_in_regs = rest >= nwords? nwords: rest;
966 cinfo->args [n].size = n_in_regs;
967 cinfo->args [n].vtsize = nwords - n_in_regs;
968 cinfo->args [n].reg = gr;
972 cinfo->args [n].offset = stack_size;
973 /*g_print ("offset for arg %d at %d\n", n, stack_size);*/
974 stack_size += nwords * sizeof (gpointer);
981 cinfo->args [n].size = 8;
982 add_general (&gr, &stack_size, cinfo->args + n, FALSE);
986 g_error ("Can't trampoline 0x%x", sig->params [i]->type);
990 /* Handle the case where there are no implicit arguments */
991 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
992 /* Prevent implicit arguments and sig_cookie from
993 being passed in registers */
995 /* Emit the signature cookie just before the implicit arguments */
996 add_general (&gr, &stack_size, &cinfo->sig_cookie, TRUE);
1000 simpletype = mini_type_get_underlying_type (NULL, sig->ret);
1001 switch (simpletype->type) {
1002 case MONO_TYPE_BOOLEAN:
1007 case MONO_TYPE_CHAR:
1013 case MONO_TYPE_FNPTR:
1014 case MONO_TYPE_CLASS:
1015 case MONO_TYPE_OBJECT:
1016 case MONO_TYPE_SZARRAY:
1017 case MONO_TYPE_ARRAY:
1018 case MONO_TYPE_STRING:
1019 cinfo->ret.storage = RegTypeGeneral;
1020 cinfo->ret.reg = ARMREG_R0;
1024 cinfo->ret.storage = RegTypeIRegPair;
1025 cinfo->ret.reg = ARMREG_R0;
1029 cinfo->ret.storage = RegTypeFP;
1030 cinfo->ret.reg = ARMREG_R0;
1031 /* FIXME: cinfo->ret.reg = ???;
1032 cinfo->ret.storage = RegTypeFP;*/
1034 case MONO_TYPE_GENERICINST:
1035 if (!mono_type_generic_inst_is_valuetype (simpletype)) {
1036 cinfo->ret.storage = RegTypeGeneral;
1037 cinfo->ret.reg = ARMREG_R0;
1041 case MONO_TYPE_VALUETYPE:
1042 case MONO_TYPE_TYPEDBYREF:
1043 if (cinfo->ret.storage != RegTypeStructByVal)
1044 cinfo->ret.storage = RegTypeStructByAddr;
1046 case MONO_TYPE_VOID:
1049 g_error ("Can't handle as return value 0x%x", sig->ret->type);
1053 /* align stack size to 8 */
1054 DEBUG (printf (" stack size: %d (%d)\n", (stack_size + 15) & ~15, stack_size));
1055 stack_size = (stack_size + 7) & ~7;
1057 cinfo->stack_usage = stack_size;
1064 * Set var information according to the calling convention. arm version.
1065 * The locals var stuff should most likely be split in another method.
1068 mono_arch_allocate_vars (MonoCompile *cfg)
1070 MonoMethodSignature *sig;
1071 MonoMethodHeader *header;
1073 int i, offset, size, align, curinst;
1074 int frame_reg = ARMREG_FP;
1078 sig = mono_method_signature (cfg->method);
1080 if (!cfg->arch.cinfo)
1081 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1082 cinfo = cfg->arch.cinfo;
1084 /* FIXME: this will change when we use FP as gcc does */
1085 cfg->flags |= MONO_CFG_HAS_SPILLUP;
1087 /* allow room for the vararg method args: void* and long/double */
1088 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1089 cfg->param_area = MAX (cfg->param_area, sizeof (gpointer)*8);
1091 header = cfg->header;
1094 * We use the frame register also for any method that has
1095 * exception clauses. This way, when the handlers are called,
1096 * the code will reference local variables using the frame reg instead of
1097 * the stack pointer: if we had to restore the stack pointer, we'd
1098 * corrupt the method frames that are already on the stack (since
1099 * filters get called before stack unwinding happens) when the filter
1100 * code would call any method (this also applies to finally etc.).
1102 if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
1103 frame_reg = ARMREG_FP;
1104 cfg->frame_reg = frame_reg;
1105 if (frame_reg != ARMREG_SP) {
1106 cfg->used_int_regs |= 1 << frame_reg;
1109 if (cfg->compile_aot || cfg->uses_rgctx_reg)
1110 /* V5 is reserved for passing the vtable/rgctx/IMT method */
1111 cfg->used_int_regs |= (1 << ARMREG_V5);
1115 if (!MONO_TYPE_ISSTRUCT (sig->ret)) {
1116 switch (mini_type_get_underlying_type (NULL, sig->ret)->type) {
1117 case MONO_TYPE_VOID:
1120 cfg->ret->opcode = OP_REGVAR;
1121 cfg->ret->inst_c0 = ARMREG_R0;
1125 /* local vars are at a positive offset from the stack pointer */
1127 * also note that if the function uses alloca, we use FP
1128 * to point at the local variables.
1130 offset = 0; /* linkage area */
1131 /* align the offset to 16 bytes: not sure this is needed here */
1133 //offset &= ~(8 - 1);
1135 /* add parameter area size for called functions */
1136 offset += cfg->param_area;
1139 if (cfg->flags & MONO_CFG_HAS_FPOUT)
1142 /* allow room to save the return value */
1143 if (mono_jit_trace_calls != NULL && mono_trace_eval (cfg->method))
1146 /* the MonoLMF structure is stored just below the stack pointer */
1147 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
1148 if (cinfo->ret.storage == RegTypeStructByVal) {
1149 cfg->ret->opcode = OP_REGOFFSET;
1150 cfg->ret->inst_basereg = cfg->frame_reg;
1151 offset += sizeof (gpointer) - 1;
1152 offset &= ~(sizeof (gpointer) - 1);
1153 cfg->ret->inst_offset = - offset;
1155 ins = cfg->vret_addr;
1156 offset += sizeof(gpointer) - 1;
1157 offset &= ~(sizeof(gpointer) - 1);
1158 ins->inst_offset = offset;
1159 ins->opcode = OP_REGOFFSET;
1160 ins->inst_basereg = frame_reg;
1161 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1162 printf ("vret_addr =");
1163 mono_print_ins (cfg->vret_addr);
1166 offset += sizeof(gpointer);
1169 /* Allocate these first so they have a small offset, OP_SEQ_POINT depends on this */
1170 if (cfg->arch.seq_point_info_var) {
1173 ins = cfg->arch.seq_point_info_var;
1177 offset += align - 1;
1178 offset &= ~(align - 1);
1179 ins->opcode = OP_REGOFFSET;
1180 ins->inst_basereg = frame_reg;
1181 ins->inst_offset = offset;
1184 ins = cfg->arch.ss_trigger_page_var;
1187 offset += align - 1;
1188 offset &= ~(align - 1);
1189 ins->opcode = OP_REGOFFSET;
1190 ins->inst_basereg = frame_reg;
1191 ins->inst_offset = offset;
1195 curinst = cfg->locals_start;
1196 for (i = curinst; i < cfg->num_varinfo; ++i) {
1197 ins = cfg->varinfo [i];
1198 if ((ins->flags & MONO_INST_IS_DEAD) || ins->opcode == OP_REGVAR || ins->opcode == OP_REGOFFSET)
1201 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
1202 * pinvoke wrappers when they call functions returning structure */
1203 if (ins->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (ins->inst_vtype) && ins->inst_vtype->type != MONO_TYPE_TYPEDBYREF) {
1204 size = mono_class_native_size (mono_class_from_mono_type (ins->inst_vtype), &ualign);
1208 size = mono_type_size (ins->inst_vtype, &align);
1210 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1211 * since it loads/stores misaligned words, which don't do the right thing.
1213 if (align < 4 && size >= 4)
1215 offset += align - 1;
1216 offset &= ~(align - 1);
1217 ins->opcode = OP_REGOFFSET;
1218 ins->inst_offset = offset;
1219 ins->inst_basereg = frame_reg;
1221 //g_print ("allocating local %d to %d\n", i, inst->inst_offset);
1226 ins = cfg->args [curinst];
1227 if (ins->opcode != OP_REGVAR) {
1228 ins->opcode = OP_REGOFFSET;
1229 ins->inst_basereg = frame_reg;
1230 offset += sizeof (gpointer) - 1;
1231 offset &= ~(sizeof (gpointer) - 1);
1232 ins->inst_offset = offset;
1233 offset += sizeof (gpointer);
1238 if (sig->call_convention == MONO_CALL_VARARG) {
1242 /* Allocate a local slot to hold the sig cookie address */
1243 offset += align - 1;
1244 offset &= ~(align - 1);
1245 cfg->sig_cookie = offset;
1249 for (i = 0; i < sig->param_count; ++i) {
1250 ins = cfg->args [curinst];
1252 if (ins->opcode != OP_REGVAR) {
1253 ins->opcode = OP_REGOFFSET;
1254 ins->inst_basereg = frame_reg;
1255 size = mini_type_stack_size_full (NULL, sig->params [i], &ualign, sig->pinvoke);
1257 /* FIXME: if a structure is misaligned, our memcpy doesn't work,
1258 * since it loads/stores misaligned words, which don't do the right thing.
1260 if (align < 4 && size >= 4)
1262 /* The code in the prolog () stores words when storing vtypes received in a register */
1263 if (MONO_TYPE_ISSTRUCT (sig->params [i]))
1265 offset += align - 1;
1266 offset &= ~(align - 1);
1267 ins->inst_offset = offset;
1273 /* align the offset to 8 bytes */
1278 cfg->stack_offset = offset;
1282 mono_arch_create_vars (MonoCompile *cfg)
1284 MonoMethodSignature *sig;
1287 sig = mono_method_signature (cfg->method);
1289 if (!cfg->arch.cinfo)
1290 cfg->arch.cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1291 cinfo = cfg->arch.cinfo;
1293 if (cinfo->ret.storage == RegTypeStructByVal)
1294 cfg->ret_var_is_local = TRUE;
1296 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
1297 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1298 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1299 printf ("vret_addr = ");
1300 mono_print_ins (cfg->vret_addr);
1304 if (cfg->gen_seq_points && cfg->compile_aot) {
1305 MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1306 ins->flags |= MONO_INST_VOLATILE;
1307 cfg->arch.seq_point_info_var = ins;
1309 /* Allocate a separate variable for this to save 1 load per seq point */
1310 ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
1311 ins->flags |= MONO_INST_VOLATILE;
1312 cfg->arch.ss_trigger_page_var = ins;
1317 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1319 MonoMethodSignature *tmp_sig;
1322 if (call->tail_call)
1325 /* FIXME: Add support for signature tokens to AOT */
1326 cfg->disable_aot = TRUE;
1328 g_assert (cinfo->sig_cookie.storage == RegTypeBase);
1331 * mono_ArgIterator_Setup assumes the signature cookie is
1332 * passed first and all the arguments which were before it are
1333 * passed on the stack after the signature. So compensate by
1334 * passing a different signature.
1336 tmp_sig = mono_metadata_signature_dup (call->signature);
1337 tmp_sig->param_count -= call->signature->sentinelpos;
1338 tmp_sig->sentinelpos = 0;
1339 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1341 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1342 sig_arg->dreg = mono_alloc_ireg (cfg);
1343 sig_arg->inst_p0 = tmp_sig;
1344 MONO_ADD_INS (cfg->cbb, sig_arg);
1346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, cinfo->sig_cookie.offset, sig_arg->dreg);
1351 mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
1356 LLVMCallInfo *linfo;
1358 n = sig->param_count + sig->hasthis;
1360 cinfo = get_call_info (cfg->mempool, sig, sig->pinvoke);
1362 linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
1365 * LLVM always uses the native ABI while we use our own ABI, the
1366 * only difference is the handling of vtypes:
1367 * - we only pass/receive them in registers in some cases, and only
1368 * in 1 or 2 integer registers.
1370 if (cinfo->ret.storage != RegTypeGeneral && cinfo->ret.storage != RegTypeNone && cinfo->ret.storage != RegTypeFP && cinfo->ret.storage != RegTypeIRegPair) {
1371 cfg->exception_message = g_strdup ("unknown ret conv");
1372 cfg->disable_llvm = TRUE;
1376 for (i = 0; i < n; ++i) {
1377 ainfo = cinfo->args + i;
1379 linfo->args [i].storage = LLVMArgNone;
1381 switch (ainfo->storage) {
1382 case RegTypeGeneral:
1383 case RegTypeIRegPair:
1385 linfo->args [i].storage = LLVMArgInIReg;
1388 cfg->exception_message = g_strdup_printf ("ainfo->storage (%d)", ainfo->storage);
1389 cfg->disable_llvm = TRUE;
1399 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1402 MonoMethodSignature *sig;
1406 sig = call->signature;
1407 n = sig->param_count + sig->hasthis;
1409 cinfo = get_call_info (NULL, sig, sig->pinvoke);
1411 for (i = 0; i < n; ++i) {
1412 ArgInfo *ainfo = cinfo->args + i;
1415 if (i >= sig->hasthis)
1416 t = sig->params [i - sig->hasthis];
1418 t = &mono_defaults.int_class->byval_arg;
1419 t = mini_type_get_underlying_type (NULL, t);
1421 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1422 /* Emit the signature cookie just before the implicit arguments */
1423 emit_sig_cookie (cfg, call, cinfo);
1426 in = call->args [i];
1428 switch (ainfo->storage) {
1429 case RegTypeGeneral:
1430 case RegTypeIRegPair:
1431 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1432 MONO_INST_NEW (cfg, ins, OP_MOVE);
1433 ins->dreg = mono_alloc_ireg (cfg);
1434 ins->sreg1 = in->dreg + 1;
1435 MONO_ADD_INS (cfg->cbb, ins);
1436 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1438 MONO_INST_NEW (cfg, ins, OP_MOVE);
1439 ins->dreg = mono_alloc_ireg (cfg);
1440 ins->sreg1 = in->dreg + 2;
1441 MONO_ADD_INS (cfg->cbb, ins);
1442 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1443 } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
1444 #ifndef MONO_ARCH_SOFT_FLOAT
1448 if (ainfo->size == 4) {
1449 #ifdef MONO_ARCH_SOFT_FLOAT
1450 /* mono_emit_call_args () have already done the r8->r4 conversion */
1451 /* The converted value is in an int vreg */
1452 MONO_INST_NEW (cfg, ins, OP_MOVE);
1453 ins->dreg = mono_alloc_ireg (cfg);
1454 ins->sreg1 = in->dreg;
1455 MONO_ADD_INS (cfg->cbb, ins);
1456 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1458 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1459 creg = mono_alloc_ireg (cfg);
1460 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1461 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1464 #ifdef MONO_ARCH_SOFT_FLOAT
1465 MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
1466 ins->dreg = mono_alloc_ireg (cfg);
1467 ins->sreg1 = in->dreg;
1468 MONO_ADD_INS (cfg->cbb, ins);
1469 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1471 MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
1472 ins->dreg = mono_alloc_ireg (cfg);
1473 ins->sreg1 = in->dreg;
1474 MONO_ADD_INS (cfg->cbb, ins);
1475 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
1477 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1478 creg = mono_alloc_ireg (cfg);
1479 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1480 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
1481 creg = mono_alloc_ireg (cfg);
1482 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
1483 mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
1486 cfg->flags |= MONO_CFG_HAS_FPOUT;
1488 MONO_INST_NEW (cfg, ins, OP_MOVE);
1489 ins->dreg = mono_alloc_ireg (cfg);
1490 ins->sreg1 = in->dreg;
1491 MONO_ADD_INS (cfg->cbb, ins);
1493 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
1496 case RegTypeStructByAddr:
1499 /* FIXME: where si the data allocated? */
1500 arg->backend.reg3 = ainfo->reg;
1501 call->used_iregs |= 1 << ainfo->reg;
1502 g_assert_not_reached ();
1505 case RegTypeStructByVal:
1506 MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
1507 ins->opcode = OP_OUTARG_VT;
1508 ins->sreg1 = in->dreg;
1509 ins->klass = in->klass;
1510 ins->inst_p0 = call;
1511 ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1512 memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
1513 MONO_ADD_INS (cfg->cbb, ins);
1516 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1517 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1518 } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
1519 if (t->type == MONO_TYPE_R8) {
1520 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1522 #ifdef MONO_ARCH_SOFT_FLOAT
1523 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1525 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1529 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
1532 case RegTypeBaseGen:
1533 if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
1534 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
1535 MONO_INST_NEW (cfg, ins, OP_MOVE);
1536 ins->dreg = mono_alloc_ireg (cfg);
1537 ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
1538 MONO_ADD_INS (cfg->cbb, ins);
1539 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
1540 } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
1543 #ifdef MONO_ARCH_SOFT_FLOAT
1544 g_assert_not_reached ();
1547 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
1548 creg = mono_alloc_ireg (cfg);
1549 mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
1550 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
1551 creg = mono_alloc_ireg (cfg);
1552 MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
1553 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
1554 cfg->flags |= MONO_CFG_HAS_FPOUT;
1556 g_assert_not_reached ();
1563 arg->backend.reg3 = ainfo->reg;
1564 /* FP args are passed in int regs */
1565 call->used_iregs |= 1 << ainfo->reg;
1566 if (ainfo->size == 8) {
1567 arg->opcode = OP_OUTARG_R8;
1568 call->used_iregs |= 1 << (ainfo->reg + 1);
1570 arg->opcode = OP_OUTARG_R4;
1573 cfg->flags |= MONO_CFG_HAS_FPOUT;
1577 g_assert_not_reached ();
1581 /* Handle the case where there are no implicit arguments */
1582 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
1583 emit_sig_cookie (cfg, call, cinfo);
1585 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1588 if (cinfo->ret.storage == RegTypeStructByVal) {
1589 /* The JIT will transform this into a normal call */
1590 call->vret_in_reg = TRUE;
1592 MONO_INST_NEW (cfg, vtarg, OP_MOVE);
1593 vtarg->sreg1 = call->vret_var->dreg;
1594 vtarg->dreg = mono_alloc_preg (cfg);
1595 MONO_ADD_INS (cfg->cbb, vtarg);
1597 mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
1601 call->stack_usage = cinfo->stack_usage;
1607 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1609 MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
1610 ArgInfo *ainfo = ins->inst_p1;
1611 int ovf_size = ainfo->vtsize;
1612 int doffset = ainfo->offset;
1613 int i, soffset, dreg;
1616 for (i = 0; i < ainfo->size; ++i) {
1617 dreg = mono_alloc_ireg (cfg);
1618 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
1619 mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
1620 soffset += sizeof (gpointer);
1622 //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
1624 mini_emit_memcpy (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
1628 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1630 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1633 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1636 if (COMPILE_LLVM (cfg)) {
1637 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1639 MONO_INST_NEW (cfg, ins, OP_SETLRET);
1640 ins->sreg1 = val->dreg + 1;
1641 ins->sreg2 = val->dreg + 2;
1642 MONO_ADD_INS (cfg->cbb, ins);
1646 #ifdef MONO_ARCH_SOFT_FLOAT
1647 if (ret->type == MONO_TYPE_R8) {
1650 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1651 ins->dreg = cfg->ret->dreg;
1652 ins->sreg1 = val->dreg;
1653 MONO_ADD_INS (cfg->cbb, ins);
1656 if (ret->type == MONO_TYPE_R4) {
1657 /* Already converted to an int in method_to_ir () */
1658 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1661 #elif defined(ARM_FPU_VFP)
1662 if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
1665 MONO_INST_NEW (cfg, ins, OP_SETFRET);
1666 ins->dreg = cfg->ret->dreg;
1667 ins->sreg1 = val->dreg;
1668 MONO_ADD_INS (cfg->cbb, ins);
1672 if (ret->type == MONO_TYPE_R4 || ret->type == MONO_TYPE_R8) {
1673 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1680 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1683 #endif /* #ifndef DISABLE_JIT */
1686 mono_arch_is_inst_imm (gint64 imm)
1691 #define DYN_CALL_STACK_ARGS 6
1694 MonoMethodSignature *sig;
1699 mgreg_t regs [PARAM_REGS + DYN_CALL_STACK_ARGS];
1705 dyn_call_supported (CallInfo *cinfo, MonoMethodSignature *sig)
1709 if (sig->hasthis + sig->param_count > PARAM_REGS + DYN_CALL_STACK_ARGS)
1712 switch (cinfo->ret.storage) {
1714 case RegTypeGeneral:
1715 case RegTypeIRegPair:
1716 case RegTypeStructByAddr:
1721 #elif defined(ARM_FPU_VFP)
1730 for (i = 0; i < cinfo->nargs; ++i) {
1731 switch (cinfo->args [i].storage) {
1732 case RegTypeGeneral:
1734 case RegTypeIRegPair:
1737 if (cinfo->args [i].offset >= (DYN_CALL_STACK_ARGS * sizeof (gpointer)))
1740 case RegTypeStructByVal:
1741 if (cinfo->args [i].reg + cinfo->args [i].vtsize >= PARAM_REGS + DYN_CALL_STACK_ARGS)
1749 // FIXME: Can't use cinfo only as it doesn't contain info about I8/float */
1750 for (i = 0; i < sig->param_count; ++i) {
1751 MonoType *t = sig->params [i];
1759 #ifdef MONO_ARCH_SOFT_FLOAT
1778 mono_arch_dyn_call_prepare (MonoMethodSignature *sig)
1780 ArchDynCallInfo *info;
1783 cinfo = get_call_info (NULL, sig, FALSE);
1785 if (!dyn_call_supported (cinfo, sig)) {
1790 info = g_new0 (ArchDynCallInfo, 1);
1791 // FIXME: Preprocess the info to speed up start_dyn_call ()
1793 info->cinfo = cinfo;
1795 return (MonoDynCallInfo*)info;
1799 mono_arch_dyn_call_free (MonoDynCallInfo *info)
1801 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1803 g_free (ainfo->cinfo);
1808 mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, guint8 *buf, int buf_len)
1810 ArchDynCallInfo *dinfo = (ArchDynCallInfo*)info;
1811 DynCallArgs *p = (DynCallArgs*)buf;
1812 int arg_index, greg, i, j;
1813 MonoMethodSignature *sig = dinfo->sig;
1815 g_assert (buf_len >= sizeof (DynCallArgs));
1823 if (dinfo->cinfo->vtype_retaddr)
1824 p->regs [greg ++] = (mgreg_t)ret;
1827 p->regs [greg ++] = (mgreg_t)*(args [arg_index ++]);
1829 for (i = 0; i < sig->param_count; i++) {
1830 MonoType *t = mono_type_get_underlying_type (sig->params [i]);
1831 gpointer *arg = args [arg_index ++];
1832 ArgInfo *ainfo = &dinfo->cinfo->args [i + sig->hasthis];
1835 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair || ainfo->storage == RegTypeStructByVal)
1837 else if (ainfo->storage == RegTypeBase)
1838 slot = PARAM_REGS + (ainfo->offset / 4);
1840 g_assert_not_reached ();
1843 p->regs [slot] = (mgreg_t)*arg;
1848 case MONO_TYPE_STRING:
1849 case MONO_TYPE_CLASS:
1850 case MONO_TYPE_ARRAY:
1851 case MONO_TYPE_SZARRAY:
1852 case MONO_TYPE_OBJECT:
1856 p->regs [slot] = (mgreg_t)*arg;
1858 case MONO_TYPE_BOOLEAN:
1860 p->regs [slot] = *(guint8*)arg;
1863 p->regs [slot] = *(gint8*)arg;
1866 p->regs [slot] = *(gint16*)arg;
1869 case MONO_TYPE_CHAR:
1870 p->regs [slot] = *(guint16*)arg;
1873 p->regs [slot] = *(gint32*)arg;
1876 p->regs [slot] = *(guint32*)arg;
1880 p->regs [slot ++] = (mgreg_t)arg [0];
1881 p->regs [slot] = (mgreg_t)arg [1];
1884 p->regs [slot] = *(mgreg_t*)arg;
1887 p->regs [slot ++] = (mgreg_t)arg [0];
1888 p->regs [slot] = (mgreg_t)arg [1];
1890 case MONO_TYPE_GENERICINST:
1891 if (MONO_TYPE_IS_REFERENCE (t)) {
1892 p->regs [slot] = (mgreg_t)*arg;
1897 case MONO_TYPE_VALUETYPE:
1898 g_assert (ainfo->storage == RegTypeStructByVal);
1900 if (ainfo->size == 0)
1901 slot = PARAM_REGS + (ainfo->offset / 4);
1905 for (j = 0; j < ainfo->size + ainfo->vtsize; ++j)
1906 p->regs [slot ++] = ((mgreg_t*)arg) [j];
1909 g_assert_not_reached ();
1915 mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
1917 ArchDynCallInfo *ainfo = (ArchDynCallInfo*)info;
1918 MonoMethodSignature *sig = ((ArchDynCallInfo*)info)->sig;
1919 guint8 *ret = ((DynCallArgs*)buf)->ret;
1920 mgreg_t res = ((DynCallArgs*)buf)->res;
1921 mgreg_t res2 = ((DynCallArgs*)buf)->res2;
1923 switch (mono_type_get_underlying_type (sig->ret)->type) {
1924 case MONO_TYPE_VOID:
1925 *(gpointer*)ret = NULL;
1927 case MONO_TYPE_STRING:
1928 case MONO_TYPE_CLASS:
1929 case MONO_TYPE_ARRAY:
1930 case MONO_TYPE_SZARRAY:
1931 case MONO_TYPE_OBJECT:
1935 *(gpointer*)ret = (gpointer)res;
1941 case MONO_TYPE_BOOLEAN:
1942 *(guint8*)ret = res;
1945 *(gint16*)ret = res;
1948 case MONO_TYPE_CHAR:
1949 *(guint16*)ret = res;
1952 *(gint32*)ret = res;
1955 *(guint32*)ret = res;
1959 /* This handles endianness as well */
1960 ((gint32*)ret) [0] = res;
1961 ((gint32*)ret) [1] = res2;
1963 case MONO_TYPE_GENERICINST:
1964 if (MONO_TYPE_IS_REFERENCE (sig->ret)) {
1965 *(gpointer*)ret = (gpointer)res;
1970 case MONO_TYPE_VALUETYPE:
1971 g_assert (ainfo->cinfo->vtype_retaddr);
1974 #if defined(ARM_FPU_VFP)
1976 *(float*)ret = *(float*)&res;
1978 case MONO_TYPE_R8: {
1984 *(double*)ret = *(double*)®s;
1989 g_assert_not_reached ();
1996 * Allow tracing to work with this interface (with an optional argument)
2000 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
2004 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2005 ARM_MOV_REG_IMM8 (code, ARMREG_R1, 0); /* NULL ebp for now */
2006 code = mono_arm_emit_load_imm (code, ARMREG_R2, (guint32)func);
2007 code = emit_call_reg (code, ARMREG_R2);
2020 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
2023 int save_mode = SAVE_NONE;
2025 MonoMethod *method = cfg->method;
2026 int rtype = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type;
2027 int save_offset = cfg->param_area;
2031 offset = code - cfg->native_code;
2032 /* we need about 16 instructions */
2033 if (offset > (cfg->code_size - 16 * 4)) {
2034 cfg->code_size *= 2;
2035 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2036 code = cfg->native_code + offset;
2039 case MONO_TYPE_VOID:
2040 /* special case string .ctor icall */
2041 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
2042 save_mode = SAVE_ONE;
2044 save_mode = SAVE_NONE;
2048 save_mode = SAVE_TWO;
2052 save_mode = SAVE_FP;
2054 case MONO_TYPE_VALUETYPE:
2055 save_mode = SAVE_STRUCT;
2058 save_mode = SAVE_ONE;
2062 switch (save_mode) {
2064 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2065 ARM_STR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2066 if (enable_arguments) {
2067 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_R1);
2068 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2072 ARM_STR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2073 if (enable_arguments) {
2074 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2078 /* FIXME: what reg? */
2079 if (enable_arguments) {
2080 /* FIXME: what reg? */
2084 if (enable_arguments) {
2085 /* FIXME: get the actual address */
2086 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_R0);
2094 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->method);
2095 code = mono_arm_emit_load_imm (code, ARMREG_IP, (guint32)func);
2096 code = emit_call_reg (code, ARMREG_IP);
2098 switch (save_mode) {
2100 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2101 ARM_LDR_IMM (code, ARMREG_R1, cfg->frame_reg, save_offset + 4);
2104 ARM_LDR_IMM (code, ARMREG_R0, cfg->frame_reg, save_offset);
2118 * The immediate field for cond branches is big enough for all reasonable methods
2120 #define EMIT_COND_BRANCH_FLAGS(ins,condcode) \
2121 if (0 && ins->inst_true_bb->native_offset) { \
2122 ARM_B_COND (code, (condcode), (code - cfg->native_code + ins->inst_true_bb->native_offset) & 0xffffff); \
2124 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
2125 ARM_B_COND (code, (condcode), 0); \
2128 #define EMIT_COND_BRANCH(ins,cond) EMIT_COND_BRANCH_FLAGS(ins, branch_cc_table [(cond)])
2130 /* emit an exception if condition is fail
2132 * We assign the extra code used to throw the implicit exceptions
2133 * to cfg->bb_exit as far as the big branch handling is concerned
2135 #define EMIT_COND_SYSTEM_EXCEPTION_FLAGS(condcode,exc_name) \
2137 mono_add_patch_info (cfg, code - cfg->native_code, \
2138 MONO_PATCH_INFO_EXC, exc_name); \
2139 ARM_BL_COND (code, (condcode), 0); \
2142 #define EMIT_COND_SYSTEM_EXCEPTION(cond,exc_name) EMIT_COND_SYSTEM_EXCEPTION_FLAGS(branch_cc_table [(cond)], (exc_name))
2145 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
2150 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
2152 MonoInst *ins, *n, *last_ins = NULL;
2154 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
2155 switch (ins->opcode) {
2158 /* Already done by an arch-independent pass */
2160 case OP_LOAD_MEMBASE:
2161 case OP_LOADI4_MEMBASE:
2163 * OP_STORE_MEMBASE_REG reg, offset(basereg)
2164 * OP_LOAD_MEMBASE offset(basereg), reg
2166 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
2167 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
2168 ins->inst_basereg == last_ins->inst_destbasereg &&
2169 ins->inst_offset == last_ins->inst_offset) {
2170 if (ins->dreg == last_ins->sreg1) {
2171 MONO_DELETE_INS (bb, ins);
2174 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2175 ins->opcode = OP_MOVE;
2176 ins->sreg1 = last_ins->sreg1;
2180 * Note: reg1 must be different from the basereg in the second load
2181 * OP_LOAD_MEMBASE offset(basereg), reg1
2182 * OP_LOAD_MEMBASE offset(basereg), reg2
2184 * OP_LOAD_MEMBASE offset(basereg), reg1
2185 * OP_MOVE reg1, reg2
2187 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
2188 || last_ins->opcode == OP_LOAD_MEMBASE) &&
2189 ins->inst_basereg != last_ins->dreg &&
2190 ins->inst_basereg == last_ins->inst_basereg &&
2191 ins->inst_offset == last_ins->inst_offset) {
2193 if (ins->dreg == last_ins->dreg) {
2194 MONO_DELETE_INS (bb, ins);
2197 ins->opcode = OP_MOVE;
2198 ins->sreg1 = last_ins->dreg;
2201 //g_assert_not_reached ();
2205 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2206 * OP_LOAD_MEMBASE offset(basereg), reg
2208 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
2209 * OP_ICONST reg, imm
2211 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
2212 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
2213 ins->inst_basereg == last_ins->inst_destbasereg &&
2214 ins->inst_offset == last_ins->inst_offset) {
2215 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
2216 ins->opcode = OP_ICONST;
2217 ins->inst_c0 = last_ins->inst_imm;
2218 g_assert_not_reached (); // check this rule
2222 case OP_LOADU1_MEMBASE:
2223 case OP_LOADI1_MEMBASE:
2224 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
2225 ins->inst_basereg == last_ins->inst_destbasereg &&
2226 ins->inst_offset == last_ins->inst_offset) {
2227 ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
2228 ins->sreg1 = last_ins->sreg1;
2231 case OP_LOADU2_MEMBASE:
2232 case OP_LOADI2_MEMBASE:
2233 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
2234 ins->inst_basereg == last_ins->inst_destbasereg &&
2235 ins->inst_offset == last_ins->inst_offset) {
2236 ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
2237 ins->sreg1 = last_ins->sreg1;
2241 ins->opcode = OP_MOVE;
2245 if (ins->dreg == ins->sreg1) {
2246 MONO_DELETE_INS (bb, ins);
2250 * OP_MOVE sreg, dreg
2251 * OP_MOVE dreg, sreg
2253 if (last_ins && last_ins->opcode == OP_MOVE &&
2254 ins->sreg1 == last_ins->dreg &&
2255 ins->dreg == last_ins->sreg1) {
2256 MONO_DELETE_INS (bb, ins);
2264 bb->last_ins = last_ins;
2268 * the branch_cc_table should maintain the order of these
2282 branch_cc_table [] = {
2296 #define NEW_INS(cfg,dest,op) do { \
2297 MONO_INST_NEW ((cfg), (dest), (op)); \
2298 mono_bblock_insert_before_ins (bb, ins, (dest)); \
2302 map_to_reg_reg_op (int op)
2311 case OP_COMPARE_IMM:
2313 case OP_ICOMPARE_IMM:
2327 case OP_LOAD_MEMBASE:
2328 return OP_LOAD_MEMINDEX;
2329 case OP_LOADI4_MEMBASE:
2330 return OP_LOADI4_MEMINDEX;
2331 case OP_LOADU4_MEMBASE:
2332 return OP_LOADU4_MEMINDEX;
2333 case OP_LOADU1_MEMBASE:
2334 return OP_LOADU1_MEMINDEX;
2335 case OP_LOADI2_MEMBASE:
2336 return OP_LOADI2_MEMINDEX;
2337 case OP_LOADU2_MEMBASE:
2338 return OP_LOADU2_MEMINDEX;
2339 case OP_LOADI1_MEMBASE:
2340 return OP_LOADI1_MEMINDEX;
2341 case OP_STOREI1_MEMBASE_REG:
2342 return OP_STOREI1_MEMINDEX;
2343 case OP_STOREI2_MEMBASE_REG:
2344 return OP_STOREI2_MEMINDEX;
2345 case OP_STOREI4_MEMBASE_REG:
2346 return OP_STOREI4_MEMINDEX;
2347 case OP_STORE_MEMBASE_REG:
2348 return OP_STORE_MEMINDEX;
2349 case OP_STORER4_MEMBASE_REG:
2350 return OP_STORER4_MEMINDEX;
2351 case OP_STORER8_MEMBASE_REG:
2352 return OP_STORER8_MEMINDEX;
2353 case OP_STORE_MEMBASE_IMM:
2354 return OP_STORE_MEMBASE_REG;
2355 case OP_STOREI1_MEMBASE_IMM:
2356 return OP_STOREI1_MEMBASE_REG;
2357 case OP_STOREI2_MEMBASE_IMM:
2358 return OP_STOREI2_MEMBASE_REG;
2359 case OP_STOREI4_MEMBASE_IMM:
2360 return OP_STOREI4_MEMBASE_REG;
2362 g_assert_not_reached ();
2366 * Remove from the instruction list the instructions that can't be
2367 * represented with very simple instructions with no register
2371 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2373 MonoInst *ins, *temp, *last_ins = NULL;
2374 int rot_amount, imm8, low_imm;
2376 MONO_BB_FOR_EACH_INS (bb, ins) {
2378 switch (ins->opcode) {
2382 case OP_COMPARE_IMM:
2383 case OP_ICOMPARE_IMM:
2397 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
2398 NEW_INS (cfg, temp, OP_ICONST);
2399 temp->inst_c0 = ins->inst_imm;
2400 temp->dreg = mono_alloc_ireg (cfg);
2401 ins->sreg2 = temp->dreg;
2402 ins->opcode = mono_op_imm_to_op (ins->opcode);
2404 if (ins->opcode == OP_SBB || ins->opcode == OP_ISBB || ins->opcode == OP_SUBCC)
2410 if (ins->inst_imm == 1) {
2411 ins->opcode = OP_MOVE;
2414 if (ins->inst_imm == 0) {
2415 ins->opcode = OP_ICONST;
2419 imm8 = mono_is_power_of_two (ins->inst_imm);
2421 ins->opcode = OP_SHL_IMM;
2422 ins->inst_imm = imm8;
2425 NEW_INS (cfg, temp, OP_ICONST);
2426 temp->inst_c0 = ins->inst_imm;
2427 temp->dreg = mono_alloc_ireg (cfg);
2428 ins->sreg2 = temp->dreg;
2429 ins->opcode = OP_IMUL;
2435 if (ins->next && (ins->next->opcode == OP_COND_EXC_C || ins->next->opcode == OP_COND_EXC_IC))
2436 /* ARM sets the C flag to 1 if there was _no_ overflow */
2437 ins->next->opcode = OP_COND_EXC_NC;
2439 case OP_LOCALLOC_IMM:
2440 NEW_INS (cfg, temp, OP_ICONST);
2441 temp->inst_c0 = ins->inst_imm;
2442 temp->dreg = mono_alloc_ireg (cfg);
2443 ins->sreg1 = temp->dreg;
2444 ins->opcode = OP_LOCALLOC;
2446 case OP_LOAD_MEMBASE:
2447 case OP_LOADI4_MEMBASE:
2448 case OP_LOADU4_MEMBASE:
2449 case OP_LOADU1_MEMBASE:
2450 /* we can do two things: load the immed in a register
2451 * and use an indexed load, or see if the immed can be
2452 * represented as an ad_imm + a load with a smaller offset
2453 * that fits. We just do the first for now, optimize later.
2455 if (arm_is_imm12 (ins->inst_offset))
2457 NEW_INS (cfg, temp, OP_ICONST);
2458 temp->inst_c0 = ins->inst_offset;
2459 temp->dreg = mono_alloc_ireg (cfg);
2460 ins->sreg2 = temp->dreg;
2461 ins->opcode = map_to_reg_reg_op (ins->opcode);
2463 case OP_LOADI2_MEMBASE:
2464 case OP_LOADU2_MEMBASE:
2465 case OP_LOADI1_MEMBASE:
2466 if (arm_is_imm8 (ins->inst_offset))
2468 NEW_INS (cfg, temp, OP_ICONST);
2469 temp->inst_c0 = ins->inst_offset;
2470 temp->dreg = mono_alloc_ireg (cfg);
2471 ins->sreg2 = temp->dreg;
2472 ins->opcode = map_to_reg_reg_op (ins->opcode);
2474 case OP_LOADR4_MEMBASE:
2475 case OP_LOADR8_MEMBASE:
2476 if (arm_is_fpimm8 (ins->inst_offset))
2478 low_imm = ins->inst_offset & 0x1ff;
2479 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
2480 NEW_INS (cfg, temp, OP_ADD_IMM);
2481 temp->inst_imm = ins->inst_offset & ~0x1ff;
2482 temp->sreg1 = ins->inst_basereg;
2483 temp->dreg = mono_alloc_ireg (cfg);
2484 ins->inst_basereg = temp->dreg;
2485 ins->inst_offset = low_imm;
2488 /* VFP/FPA doesn't have indexed load instructions */
2489 g_assert_not_reached ();
2491 case OP_STORE_MEMBASE_REG:
2492 case OP_STOREI4_MEMBASE_REG:
2493 case OP_STOREI1_MEMBASE_REG:
2494 if (arm_is_imm12 (ins->inst_offset))
2496 NEW_INS (cfg, temp, OP_ICONST);
2497 temp->inst_c0 = ins->inst_offset;
2498 temp->dreg = mono_alloc_ireg (cfg);
2499 ins->sreg2 = temp->dreg;
2500 ins->opcode = map_to_reg_reg_op (ins->opcode);
2502 case OP_STOREI2_MEMBASE_REG:
2503 if (arm_is_imm8 (ins->inst_offset))
2505 NEW_INS (cfg, temp, OP_ICONST);
2506 temp->inst_c0 = ins->inst_offset;
2507 temp->dreg = mono_alloc_ireg (cfg);
2508 ins->sreg2 = temp->dreg;
2509 ins->opcode = map_to_reg_reg_op (ins->opcode);
2511 case OP_STORER4_MEMBASE_REG:
2512 case OP_STORER8_MEMBASE_REG:
2513 if (arm_is_fpimm8 (ins->inst_offset))
2515 low_imm = ins->inst_offset & 0x1ff;
2516 if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
2517 NEW_INS (cfg, temp, OP_ADD_IMM);
2518 temp->inst_imm = ins->inst_offset & ~0x1ff;
2519 temp->sreg1 = ins->inst_destbasereg;
2520 temp->dreg = mono_alloc_ireg (cfg);
2521 ins->inst_destbasereg = temp->dreg;
2522 ins->inst_offset = low_imm;
2525 /*g_print ("fail with: %d (%d, %d)\n", ins->inst_offset, ins->inst_offset & ~0x1ff, low_imm);*/
2526 /* VFP/FPA doesn't have indexed store instructions */
2527 g_assert_not_reached ();
2529 case OP_STORE_MEMBASE_IMM:
2530 case OP_STOREI1_MEMBASE_IMM:
2531 case OP_STOREI2_MEMBASE_IMM:
2532 case OP_STOREI4_MEMBASE_IMM:
2533 NEW_INS (cfg, temp, OP_ICONST);
2534 temp->inst_c0 = ins->inst_imm;
2535 temp->dreg = mono_alloc_ireg (cfg);
2536 ins->sreg1 = temp->dreg;
2537 ins->opcode = map_to_reg_reg_op (ins->opcode);
2539 goto loop_start; /* make it handle the possibly big ins->inst_offset */
2541 gboolean swap = FALSE;
2545 /* Optimized away */
2550 /* Some fp compares require swapped operands */
2551 switch (ins->next->opcode) {
2553 ins->next->opcode = OP_FBLT;
2557 ins->next->opcode = OP_FBLT_UN;
2561 ins->next->opcode = OP_FBGE;
2565 ins->next->opcode = OP_FBGE_UN;
2573 ins->sreg1 = ins->sreg2;
2582 bb->last_ins = last_ins;
2583 bb->max_vreg = cfg->next_vreg;
2587 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *long_ins)
2591 if (long_ins->opcode == OP_LNEG) {
2593 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSBS_IMM, ins->dreg + 1, ins->sreg1 + 1, 0);
2594 MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ARM_RSC_IMM, ins->dreg + 2, ins->sreg1 + 2, 0);
2600 emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
2602 /* sreg is a float, dreg is an integer reg */
2604 ARM_FIXZ (code, dreg, sreg);
2605 #elif defined(ARM_FPU_VFP)
2607 ARM_TOSIZD (code, ARM_VFP_F0, sreg);
2609 ARM_TOUIZD (code, ARM_VFP_F0, sreg);
2610 ARM_FMRS (code, dreg, ARM_VFP_F0);
2614 ARM_AND_REG_IMM8 (code, dreg, dreg, 0xff);
2615 else if (size == 2) {
2616 ARM_SHL_IMM (code, dreg, dreg, 16);
2617 ARM_SHR_IMM (code, dreg, dreg, 16);
2621 ARM_SHL_IMM (code, dreg, dreg, 24);
2622 ARM_SAR_IMM (code, dreg, dreg, 24);
2623 } else if (size == 2) {
2624 ARM_SHL_IMM (code, dreg, dreg, 16);
2625 ARM_SAR_IMM (code, dreg, dreg, 16);
2631 #endif /* #ifndef DISABLE_JIT */
2635 const guchar *target;
2640 #define is_call_imm(diff) ((gint)(diff) >= -33554432 && (gint)(diff) <= 33554431)
2643 search_thunk_slot (void *data, int csize, int bsize, void *user_data) {
2644 PatchData *pdata = (PatchData*)user_data;
2645 guchar *code = data;
2646 guint32 *thunks = data;
2647 guint32 *endthunks = (guint32*)(code + bsize);
2649 int difflow, diffhigh;
2651 /* always ensure a call from pdata->code can reach to the thunks without further thunks */
2652 difflow = (char*)pdata->code - (char*)thunks;
2653 diffhigh = (char*)pdata->code - (char*)endthunks;
2654 if (!((is_call_imm (thunks) && is_call_imm (endthunks)) || (is_call_imm (difflow) && is_call_imm (diffhigh))))
2658 * The thunk is composed of 3 words:
2659 * load constant from thunks [2] into ARM_IP
2662 * Note that the LR register is already setup
2664 //g_print ("thunk nentries: %d\n", ((char*)endthunks - (char*)thunks)/16);
2665 if ((pdata->found == 2) || (pdata->code >= code && pdata->code <= code + csize)) {
2666 while (thunks < endthunks) {
2667 //g_print ("looking for target: %p at %p (%08x-%08x)\n", pdata->target, thunks, thunks [0], thunks [1]);
2668 if (thunks [2] == (guint32)pdata->target) {
2669 arm_patch (pdata->code, (guchar*)thunks);
2670 mono_arch_flush_icache (pdata->code, 4);
2673 } else if ((thunks [0] == 0) && (thunks [1] == 0) && (thunks [2] == 0)) {
2674 /* found a free slot instead: emit thunk */
2675 /* ARMREG_IP is fine to use since this can't be an IMT call
2678 code = (guchar*)thunks;
2679 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
2680 if (thumb_supported)
2681 ARM_BX (code, ARMREG_IP);
2683 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
2684 thunks [2] = (guint32)pdata->target;
2685 mono_arch_flush_icache ((guchar*)thunks, 12);
2687 arm_patch (pdata->code, (guchar*)thunks);
2688 mono_arch_flush_icache (pdata->code, 4);
2692 /* skip 12 bytes, the size of the thunk */
2696 //g_print ("failed thunk lookup for %p from %p at %p (%d entries)\n", pdata->target, pdata->code, data, count);
2702 handle_thunk (MonoDomain *domain, int absolute, guchar *code, const guchar *target)
2707 domain = mono_domain_get ();
2710 pdata.target = target;
2711 pdata.absolute = absolute;
2714 mono_domain_lock (domain);
2715 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2718 /* this uses the first available slot */
2720 mono_domain_code_foreach (domain, search_thunk_slot, &pdata);
2722 mono_domain_unlock (domain);
2724 if (pdata.found != 1)
2725 g_print ("thunk failed for %p from %p\n", target, code);
2726 g_assert (pdata.found == 1);
2730 arm_patch_general (MonoDomain *domain, guchar *code, const guchar *target)
2732 guint32 *code32 = (void*)code;
2733 guint32 ins = *code32;
2734 guint32 prim = (ins >> 25) & 7;
2735 guint32 tval = GPOINTER_TO_UINT (target);
2737 //g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2738 if (prim == 5) { /* 101b */
2739 /* the diff starts 8 bytes from the branch opcode */
2740 gint diff = target - code - 8;
2742 gint tmask = 0xffffffff;
2743 if (tval & 1) { /* entering thumb mode */
2744 diff = target - 1 - code - 8;
2745 g_assert (thumb_supported);
2746 tbits = 0xf << 28; /* bl->blx bit pattern */
2747 g_assert ((ins & (1 << 24))); /* it must be a bl, not b instruction */
2748 /* this low bit of the displacement is moved to bit 24 in the instruction encoding */
2752 tmask = ~(1 << 24); /* clear the link bit */
2753 /*g_print ("blx to thumb: target: %p, code: %p, diff: %d, mask: %x\n", target, code, diff, tmask);*/
2758 if (diff <= 33554431) {
2760 ins = (ins & 0xff000000) | diff;
2762 *code32 = ins | tbits;
2766 /* diff between 0 and -33554432 */
2767 if (diff >= -33554432) {
2769 ins = (ins & 0xff000000) | (diff & ~0xff000000);
2771 *code32 = ins | tbits;
2776 handle_thunk (domain, TRUE, code, target);
2781 * The alternative call sequences looks like this:
2783 * ldr ip, [pc] // loads the address constant
2784 * b 1f // jumps around the constant
2785 * address constant embedded in the code
2790 * There are two cases for patching:
2791 * a) at the end of method emission: in this case code points to the start
2792 * of the call sequence
2793 * b) during runtime patching of the call site: in this case code points
2794 * to the mov pc, ip instruction
2796 * We have to handle also the thunk jump code sequence:
2800 * address constant // execution never reaches here
2802 if ((ins & 0x0ffffff0) == 0x12fff10) {
2803 /* Branch and exchange: the address is constructed in a reg
2804 * We can patch BX when the code sequence is the following:
2805 * ldr ip, [pc, #0] ; 0x8
2812 guint8 *emit = (guint8*)ccode;
2813 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2815 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2816 ARM_BX (emit, ARMREG_IP);
2818 /*patching from magic trampoline*/
2819 if (ins == ccode [3]) {
2820 g_assert (code32 [-4] == ccode [0]);
2821 g_assert (code32 [-3] == ccode [1]);
2822 g_assert (code32 [-1] == ccode [2]);
2823 code32 [-2] = (guint32)target;
2826 /*patching from JIT*/
2827 if (ins == ccode [0]) {
2828 g_assert (code32 [1] == ccode [1]);
2829 g_assert (code32 [3] == ccode [2]);
2830 g_assert (code32 [4] == ccode [3]);
2831 code32 [2] = (guint32)target;
2834 g_assert_not_reached ();
2835 } else if ((ins & 0x0ffffff0) == 0x12fff30) {
2843 guint8 *emit = (guint8*)ccode;
2844 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2846 ARM_BLX_REG (emit, ARMREG_IP);
2848 g_assert (code32 [-3] == ccode [0]);
2849 g_assert (code32 [-2] == ccode [1]);
2850 g_assert (code32 [0] == ccode [2]);
2852 code32 [-1] = (guint32)target;
2855 guint32 *tmp = ccode;
2856 guint8 *emit = (guint8*)tmp;
2857 ARM_LDR_IMM (emit, ARMREG_IP, ARMREG_PC, 0);
2858 ARM_MOV_REG_REG (emit, ARMREG_LR, ARMREG_PC);
2859 ARM_MOV_REG_REG (emit, ARMREG_PC, ARMREG_IP);
2860 ARM_BX (emit, ARMREG_IP);
2861 if (ins == ccode [2]) {
2862 g_assert_not_reached (); // should be -2 ...
2863 code32 [-1] = (guint32)target;
2866 if (ins == ccode [0]) {
2867 /* handles both thunk jump code and the far call sequence */
2868 code32 [2] = (guint32)target;
2871 g_assert_not_reached ();
2873 // g_print ("patched with 0x%08x\n", ins);
2877 arm_patch (guchar *code, const guchar *target)
2879 arm_patch_general (NULL, code, target);
2883 * Return the >= 0 uimm8 value if val can be represented with a byte + rotation
2884 * (with the rotation amount in *rot_amount. rot_amount is already adjusted
2885 * to be used with the emit macros.
2886 * Return -1 otherwise.
2889 mono_arm_is_rotated_imm8 (guint32 val, gint *rot_amount)
2892 for (i = 0; i < 31; i+= 2) {
2893 res = (val << (32 - i)) | (val >> i);
2896 *rot_amount = i? 32 - i: 0;
2903 * Emits in code a sequence of instructions that load the value 'val'
2904 * into the dreg register. Uses at most 4 instructions.
2907 mono_arm_emit_load_imm (guint8 *code, int dreg, guint32 val)
2909 int imm8, rot_amount;
2911 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
2912 /* skip the constant pool */
2918 if ((imm8 = mono_arm_is_rotated_imm8 (val, &rot_amount)) >= 0) {
2919 ARM_MOV_REG_IMM (code, dreg, imm8, rot_amount);
2920 } else if ((imm8 = mono_arm_is_rotated_imm8 (~val, &rot_amount)) >= 0) {
2921 ARM_MVN_REG_IMM (code, dreg, imm8, rot_amount);
2924 ARM_MOVW_REG_IMM (code, dreg, val & 0xffff);
2926 ARM_MOVT_REG_IMM (code, dreg, (val >> 16) & 0xffff);
2930 ARM_MOV_REG_IMM8 (code, dreg, (val & 0xFF));
2932 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
2934 if (val & 0xFF0000) {
2935 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2937 if (val & 0xFF000000) {
2938 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2940 } else if (val & 0xFF00) {
2941 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF00) >> 8, 24);
2942 if (val & 0xFF0000) {
2943 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
2945 if (val & 0xFF000000) {
2946 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2948 } else if (val & 0xFF0000) {
2949 ARM_MOV_REG_IMM (code, dreg, (val & 0xFF0000) >> 16, 16);
2950 if (val & 0xFF000000) {
2951 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF000000) >> 24, 8);
2954 //g_assert_not_reached ();
2960 mono_arm_thumb_supported (void)
2962 return thumb_supported;
2968 * emit_load_volatile_arguments:
2970 * Load volatile arguments from the stack to the original input registers.
2971 * Required before a tail call.
2974 emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
2976 MonoMethod *method = cfg->method;
2977 MonoMethodSignature *sig;
2982 /* FIXME: Generate intermediate code instead */
2984 sig = mono_method_signature (method);
2986 /* This is the opposite of the code in emit_prolog */
2990 cinfo = get_call_info (NULL, sig, sig->pinvoke);
2992 if (MONO_TYPE_ISSTRUCT (sig->ret)) {
2993 ArgInfo *ainfo = &cinfo->ret;
2994 inst = cfg->vret_addr;
2995 g_assert (arm_is_imm12 (inst->inst_offset));
2996 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
2998 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2999 ArgInfo *ainfo = cinfo->args + i;
3000 inst = cfg->args [pos];
3002 if (cfg->verbose_level > 2)
3003 g_print ("Loading argument %d (type: %d)\n", i, ainfo->storage);
3004 if (inst->opcode == OP_REGVAR) {
3005 if (ainfo->storage == RegTypeGeneral)
3006 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
3007 else if (ainfo->storage == RegTypeFP) {
3008 g_assert_not_reached ();
3009 } else if (ainfo->storage == RegTypeBase) {
3013 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
3014 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
3016 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3017 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
3021 g_assert_not_reached ();
3023 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
3024 switch (ainfo->size) {
3031 g_assert (arm_is_imm12 (inst->inst_offset));
3032 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3033 g_assert (arm_is_imm12 (inst->inst_offset + 4));
3034 ARM_LDR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3037 if (arm_is_imm12 (inst->inst_offset)) {
3038 ARM_LDR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3040 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
3041 ARM_LDR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
3045 } else if (ainfo->storage == RegTypeBaseGen) {
3048 } else if (ainfo->storage == RegTypeBase) {
3050 } else if (ainfo->storage == RegTypeFP) {
3051 g_assert_not_reached ();
3052 } else if (ainfo->storage == RegTypeStructByVal) {
3053 int doffset = inst->inst_offset;
3057 if (mono_class_from_mono_type (inst->inst_vtype))
3058 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), NULL);
3059 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
3060 if (arm_is_imm12 (doffset)) {
3061 ARM_LDR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
3063 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
3064 ARM_LDR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
3066 soffset += sizeof (gpointer);
3067 doffset += sizeof (gpointer);
3072 } else if (ainfo->storage == RegTypeStructByAddr) {
3087 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
3092 guint8 *code = cfg->native_code + cfg->code_len;
3093 MonoInst *last_ins = NULL;
3094 guint last_offset = 0;
3096 int imm8, rot_amount;
3098 /* we don't align basic blocks of loops on arm */
3100 if (cfg->verbose_level > 2)
3101 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
3103 cpos = bb->max_offset;
3105 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
3106 //MonoCoverageInfo *cov = mono_get_coverage_info (cfg->method);
3107 //g_assert (!mono_compile_aot);
3110 // cov->data [bb->dfn].iloffset = bb->cil_code - cfg->cil_code;
3111 /* this is not thread save, but good enough */
3112 /* fixme: howto handle overflows? */
3113 //x86_inc_mem (code, &cov->data [bb->dfn].count);
3116 if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num) {
3117 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3118 (gpointer)"mono_break");
3119 code = emit_call_seq (cfg, code);
3122 MONO_BB_FOR_EACH_INS (bb, ins) {
3123 offset = code - cfg->native_code;
3125 max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
3127 if (offset > (cfg->code_size - max_len - 16)) {
3128 cfg->code_size *= 2;
3129 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3130 code = cfg->native_code + offset;
3132 // if (ins->cil_code)
3133 // g_print ("cil code\n");
3134 mono_debug_record_line_number (cfg, ins, offset);
3136 switch (ins->opcode) {
3137 case OP_MEMORY_BARRIER:
3140 #ifdef HAVE_AEABI_READ_TP
3141 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3142 (gpointer)"__aeabi_read_tp");
3143 code = emit_call_seq (cfg, code);
3145 ARM_LDR_IMM (code, ins->dreg, ARMREG_R0, ins->inst_offset);
3147 g_assert_not_reached ();
3151 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3152 ppc_mulhw (code, ppc_r3, ins->sreg1, ins->sreg2);
3155 ppc_mullw (code, ppc_r4, ins->sreg1, ins->sreg2);
3156 ppc_mulhwu (code, ppc_r3, ins->sreg1, ins->sreg2);
3158 case OP_STOREI1_MEMBASE_IMM:
3159 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFF);
3160 g_assert (arm_is_imm12 (ins->inst_offset));
3161 ARM_STRB_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3163 case OP_STOREI2_MEMBASE_IMM:
3164 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm & 0xFFFF);
3165 g_assert (arm_is_imm8 (ins->inst_offset));
3166 ARM_STRH_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3168 case OP_STORE_MEMBASE_IMM:
3169 case OP_STOREI4_MEMBASE_IMM:
3170 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_imm);
3171 g_assert (arm_is_imm12 (ins->inst_offset));
3172 ARM_STR_IMM (code, ARMREG_LR, ins->inst_destbasereg, ins->inst_offset);
3174 case OP_STOREI1_MEMBASE_REG:
3175 g_assert (arm_is_imm12 (ins->inst_offset));
3176 ARM_STRB_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3178 case OP_STOREI2_MEMBASE_REG:
3179 g_assert (arm_is_imm8 (ins->inst_offset));
3180 ARM_STRH_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3182 case OP_STORE_MEMBASE_REG:
3183 case OP_STOREI4_MEMBASE_REG:
3184 /* this case is special, since it happens for spill code after lowering has been called */
3185 if (arm_is_imm12 (ins->inst_offset)) {
3186 ARM_STR_IMM (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3188 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3189 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ARMREG_LR);
3192 case OP_STOREI1_MEMINDEX:
3193 ARM_STRB_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3195 case OP_STOREI2_MEMINDEX:
3196 ARM_STRH_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3198 case OP_STORE_MEMINDEX:
3199 case OP_STOREI4_MEMINDEX:
3200 ARM_STR_REG_REG (code, ins->sreg1, ins->inst_destbasereg, ins->sreg2);
3203 g_assert_not_reached ();
3205 case OP_LOAD_MEMINDEX:
3206 case OP_LOADI4_MEMINDEX:
3207 case OP_LOADU4_MEMINDEX:
3208 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3210 case OP_LOADI1_MEMINDEX:
3211 ARM_LDRSB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3213 case OP_LOADU1_MEMINDEX:
3214 ARM_LDRB_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3216 case OP_LOADI2_MEMINDEX:
3217 ARM_LDRSH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3219 case OP_LOADU2_MEMINDEX:
3220 ARM_LDRH_REG_REG (code, ins->dreg, ins->inst_basereg, ins->sreg2);
3222 case OP_LOAD_MEMBASE:
3223 case OP_LOADI4_MEMBASE:
3224 case OP_LOADU4_MEMBASE:
3225 /* this case is special, since it happens for spill code after lowering has been called */
3226 if (arm_is_imm12 (ins->inst_offset)) {
3227 ARM_LDR_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3229 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3230 ARM_LDR_REG_REG (code, ins->dreg, ins->inst_basereg, ARMREG_LR);
3233 case OP_LOADI1_MEMBASE:
3234 g_assert (arm_is_imm8 (ins->inst_offset));
3235 ARM_LDRSB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3237 case OP_LOADU1_MEMBASE:
3238 g_assert (arm_is_imm12 (ins->inst_offset));
3239 ARM_LDRB_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3241 case OP_LOADU2_MEMBASE:
3242 g_assert (arm_is_imm8 (ins->inst_offset));
3243 ARM_LDRH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3245 case OP_LOADI2_MEMBASE:
3246 g_assert (arm_is_imm8 (ins->inst_offset));
3247 ARM_LDRSH_IMM (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3249 case OP_ICONV_TO_I1:
3250 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 24);
3251 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 24);
3253 case OP_ICONV_TO_I2:
3254 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3255 ARM_SAR_IMM (code, ins->dreg, ins->dreg, 16);
3257 case OP_ICONV_TO_U1:
3258 ARM_AND_REG_IMM8 (code, ins->dreg, ins->sreg1, 0xff);
3260 case OP_ICONV_TO_U2:
3261 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, 16);
3262 ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
3266 ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
3268 case OP_COMPARE_IMM:
3269 case OP_ICOMPARE_IMM:
3270 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3271 g_assert (imm8 >= 0);
3272 ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
3276 * gdb does not like encountering the hw breakpoint ins in the debugged code.
3277 * So instead of emitting a trap, we emit a call a C function and place a
3280 //*(int*)code = 0xef9f0001;
3283 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3284 (gpointer)"mono_break");
3285 code = emit_call_seq (cfg, code);
3287 case OP_RELAXED_NOP:
3292 case OP_DUMMY_STORE:
3293 case OP_NOT_REACHED:
3296 case OP_SEQ_POINT: {
3298 MonoInst *info_var = cfg->arch.seq_point_info_var;
3299 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
3301 int dreg = ARMREG_LR;
3304 * For AOT, we use one got slot per method, which will point to a
3305 * SeqPointInfo structure, containing all the information required
3306 * by the code below.
3308 if (cfg->compile_aot) {
3309 g_assert (info_var);
3310 g_assert (info_var->opcode == OP_REGOFFSET);
3311 g_assert (arm_is_imm12 (info_var->inst_offset));
3315 * Read from the single stepping trigger page. This will cause a
3316 * SIGSEGV when single stepping is enabled.
3317 * We do this _before_ the breakpoint, so single stepping after
3318 * a breakpoint is hit will step to the next IL offset.
3320 g_assert (((guint64)(gsize)ss_trigger_page >> 32) == 0);
3322 if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
3323 if (cfg->compile_aot) {
3324 /* Load the trigger page addr from the variable initialized in the prolog */
3325 var = ss_trigger_page_var;
3327 g_assert (var->opcode == OP_REGOFFSET);
3328 g_assert (arm_is_imm12 (var->inst_offset));
3329 ARM_LDR_IMM (code, dreg, var->inst_basereg, var->inst_offset);
3331 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
3333 *(int*)code = (int)ss_trigger_page;
3336 ARM_LDR_IMM (code, dreg, dreg, 0);
3339 mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
3341 if (cfg->compile_aot) {
3342 guint32 offset = code - cfg->native_code;
3345 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
3346 /* Add the offset */
3347 val = ((offset / 4) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
3348 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF), 0);
3350 * Have to emit nops to keep the difference between the offset
3351 * stored in seq_points and breakpoint instruction constant,
3352 * mono_arch_get_ip_for_breakpoint () depends on this.
3355 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF00) >> 8, 24);
3359 ARM_ADD_REG_IMM (code, dreg, dreg, (val & 0xFF0000) >> 16, 16);
3362 g_assert (!(val & 0xFF000000));
3363 /* Load the info->bp_addrs [offset], which is either 0 or the address of a trigger page */
3364 ARM_LDR_IMM (code, dreg, dreg, 0);
3366 /* What is faster, a branch or a load ? */
3367 ARM_CMP_REG_IMM (code, dreg, 0, 0);
3368 /* The breakpoint instruction */
3369 ARM_LDR_IMM_COND (code, dreg, dreg, 0, ARMCOND_NE);
3372 * A placeholder for a possible breakpoint inserted by
3373 * mono_arch_set_breakpoint ().
3375 for (i = 0; i < 4; ++i)
3382 ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3385 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3389 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3392 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3393 g_assert (imm8 >= 0);
3394 ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3398 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3399 g_assert (imm8 >= 0);
3400 ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3404 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3405 g_assert (imm8 >= 0);
3406 ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3409 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3410 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3412 case OP_IADD_OVF_UN:
3413 ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3414 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3417 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3418 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3420 case OP_ISUB_OVF_UN:
3421 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3422 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3424 case OP_ADD_OVF_CARRY:
3425 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3426 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3428 case OP_ADD_OVF_UN_CARRY:
3429 ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3430 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3432 case OP_SUB_OVF_CARRY:
3433 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3434 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
3436 case OP_SUB_OVF_UN_CARRY:
3437 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3438 //EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
3442 ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3445 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3446 g_assert (imm8 >= 0);
3447 ARM_SUBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3450 ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3454 ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3458 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3459 g_assert (imm8 >= 0);
3460 ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3464 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3465 g_assert (imm8 >= 0);
3466 ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3468 case OP_ARM_RSBS_IMM:
3469 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3470 g_assert (imm8 >= 0);
3471 ARM_RSBS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3473 case OP_ARM_RSC_IMM:
3474 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3475 g_assert (imm8 >= 0);
3476 ARM_RSC_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3479 ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3483 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3484 g_assert (imm8 >= 0);
3485 ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3493 /* crappy ARM arch doesn't have a DIV instruction */
3494 g_assert_not_reached ();
3496 ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3500 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3501 g_assert (imm8 >= 0);
3502 ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3505 ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3509 imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
3510 g_assert (imm8 >= 0);
3511 ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
3514 ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3519 ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3520 else if (ins->dreg != ins->sreg1)
3521 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3524 ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3529 ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3530 else if (ins->dreg != ins->sreg1)
3531 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3534 case OP_ISHR_UN_IMM:
3536 ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
3537 else if (ins->dreg != ins->sreg1)
3538 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3541 ARM_SHR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3544 ARM_MVN_REG_REG (code, ins->dreg, ins->sreg1);
3547 ARM_RSB_REG_IMM8 (code, ins->dreg, ins->sreg1, 0);
3550 if (ins->dreg == ins->sreg2)
3551 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3553 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg2, ins->sreg1);
3556 g_assert_not_reached ();
3559 /* FIXME: handle ovf/ sreg2 != dreg */
3560 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3561 /* FIXME: MUL doesn't set the C/O flags on ARM */
3563 case OP_IMUL_OVF_UN:
3564 /* FIXME: handle ovf/ sreg2 != dreg */
3565 ARM_MUL_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
3566 /* FIXME: MUL doesn't set the C/O flags on ARM */
3569 code = mono_arm_emit_load_imm (code, ins->dreg, ins->inst_c0);
3572 /* Load the GOT offset */
3573 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3574 ARM_LDR_IMM (code, ins->dreg, ARMREG_PC, 0);
3576 *(gpointer*)code = NULL;
3578 /* Load the value from the GOT */
3579 ARM_LDR_REG_REG (code, ins->dreg, ARMREG_PC, ins->dreg);
3581 case OP_ICONV_TO_I4:
3582 case OP_ICONV_TO_U4:
3584 if (ins->dreg != ins->sreg1)
3585 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
3588 int saved = ins->sreg2;
3589 if (ins->sreg2 == ARM_LSW_REG) {
3590 ARM_MOV_REG_REG (code, ARMREG_LR, ins->sreg2);
3593 if (ins->sreg1 != ARM_LSW_REG)
3594 ARM_MOV_REG_REG (code, ARM_LSW_REG, ins->sreg1);
3595 if (saved != ARM_MSW_REG)
3596 ARM_MOV_REG_REG (code, ARM_MSW_REG, saved);
3601 ARM_MVFD (code, ins->dreg, ins->sreg1);
3602 #elif defined(ARM_FPU_VFP)
3603 ARM_CPYD (code, ins->dreg, ins->sreg1);
3606 case OP_FCONV_TO_R4:
3608 ARM_MVFS (code, ins->dreg, ins->sreg1);
3609 #elif defined(ARM_FPU_VFP)
3610 ARM_CVTD (code, ins->dreg, ins->sreg1);
3611 ARM_CVTS (code, ins->dreg, ins->dreg);
3616 * Keep in sync with mono_arch_emit_epilog
3618 g_assert (!cfg->method->save_lmf);
3620 code = emit_load_volatile_arguments (cfg, code);
3622 code = emit_big_add (code, ARMREG_SP, cfg->frame_reg, cfg->stack_usage);
3623 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP)) | ((1 << ARMREG_LR)));
3624 mono_add_patch_info (cfg, (guint8*) code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3625 if (cfg->compile_aot) {
3626 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
3628 *(gpointer*)code = NULL;
3630 ARM_LDR_REG_REG (code, ARMREG_PC, ARMREG_PC, ARMREG_IP);
3636 /* ensure ins->sreg1 is not NULL */
3637 ARM_LDR_IMM (code, ARMREG_LR, ins->sreg1, 0);
3640 g_assert (cfg->sig_cookie < 128);
3641 ARM_LDR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
3642 ARM_STR_IMM (code, ARMREG_IP, ins->sreg1, 0);
3651 call = (MonoCallInst*)ins;
3652 if (ins->flags & MONO_INST_HAS_METHOD)
3653 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD, call->method);
3655 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, call->fptr);
3656 code = emit_call_seq (cfg, code);
3657 code = emit_move_return_value (cfg, ins, code);
3663 case OP_VOIDCALL_REG:
3665 code = emit_call_reg (code, ins->sreg1);
3666 code = emit_move_return_value (cfg, ins, code);
3668 case OP_FCALL_MEMBASE:
3669 case OP_LCALL_MEMBASE:
3670 case OP_VCALL_MEMBASE:
3671 case OP_VCALL2_MEMBASE:
3672 case OP_VOIDCALL_MEMBASE:
3673 case OP_CALL_MEMBASE:
3674 g_assert (arm_is_imm12 (ins->inst_offset));
3675 g_assert (ins->sreg1 != ARMREG_LR);
3676 call = (MonoCallInst*)ins;
3677 if (call->method->klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
3678 ARM_ADD_REG_IMM8 (code, ARMREG_LR, ARMREG_PC, 4);
3679 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3681 * We can't embed the method in the code stream in PIC code, or
3683 * Instead, we put it in V5 in code emitted by
3684 * mono_arch_emit_imt_argument (), and embed NULL here to
3685 * signal the IMT thunk that the value is in V5.
3687 if (call->dynamic_imt_arg)
3688 *((gpointer*)code) = NULL;
3690 *((gpointer*)code) = (gpointer)call->method;
3693 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3694 ARM_LDR_IMM (code, ARMREG_PC, ins->sreg1, ins->inst_offset);
3696 code = emit_move_return_value (cfg, ins, code);
3699 /* keep alignment */
3700 int alloca_waste = cfg->param_area;
3703 /* round the size to 8 bytes */
3704 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->sreg1, 7);
3705 ARM_BIC_REG_IMM8 (code, ins->dreg, ins->dreg, 7);
3707 ARM_ADD_REG_IMM8 (code, ins->dreg, ins->dreg, alloca_waste);
3708 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ins->dreg);
3709 /* memzero the area: dreg holds the size, sp is the pointer */
3710 if (ins->flags & MONO_INST_INIT) {
3711 guint8 *start_loop, *branch_to_cond;
3712 ARM_MOV_REG_IMM8 (code, ARMREG_LR, 0);
3713 branch_to_cond = code;
3716 ARM_STR_REG_REG (code, ARMREG_LR, ARMREG_SP, ins->dreg);
3717 arm_patch (branch_to_cond, code);
3718 /* decrement by 4 and set flags */
3719 ARM_SUBS_REG_IMM8 (code, ins->dreg, ins->dreg, 4);
3720 ARM_B_COND (code, ARMCOND_GE, 0);
3721 arm_patch (code - 4, start_loop);
3723 ARM_ADD_REG_IMM8 (code, ins->dreg, ARMREG_SP, alloca_waste);
3728 MonoInst *var = cfg->dyn_call_var;
3730 g_assert (var->opcode == OP_REGOFFSET);
3731 g_assert (arm_is_imm12 (var->inst_offset));
3733 /* lr = args buffer filled by mono_arch_get_dyn_call_args () */
3734 ARM_MOV_REG_REG( code, ARMREG_LR, ins->sreg1);
3736 ARM_MOV_REG_REG( code, ARMREG_IP, ins->sreg2);
3738 /* Save args buffer */
3739 ARM_STR_IMM (code, ARMREG_LR, var->inst_basereg, var->inst_offset);
3741 /* Set stack slots using R0 as scratch reg */
3742 /* MONO_ARCH_DYN_CALL_PARAM_AREA gives the size of stack space available */
3743 for (i = 0; i < DYN_CALL_STACK_ARGS; ++i) {
3744 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, (PARAM_REGS + i) * sizeof (gpointer));
3745 ARM_STR_IMM (code, ARMREG_R0, ARMREG_SP, i * sizeof (gpointer));
3748 /* Set argument registers */
3749 for (i = 0; i < PARAM_REGS; ++i)
3750 ARM_LDR_IMM (code, i, ARMREG_LR, i * sizeof (gpointer));
3753 ARM_MOV_REG_REG (code, ARMREG_LR, ARMREG_PC);
3754 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3757 ARM_LDR_IMM (code, ARMREG_IP, var->inst_basereg, var->inst_offset);
3758 ARM_STR_IMM (code, ARMREG_R0, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res));
3759 ARM_STR_IMM (code, ARMREG_R1, ARMREG_IP, G_STRUCT_OFFSET (DynCallArgs, res2));
3763 if (ins->sreg1 != ARMREG_R0)
3764 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3765 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3766 (gpointer)"mono_arch_throw_exception");
3767 code = emit_call_seq (cfg, code);
3771 if (ins->sreg1 != ARMREG_R0)
3772 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3773 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3774 (gpointer)"mono_arch_rethrow_exception");
3775 code = emit_call_seq (cfg, code);
3778 case OP_START_HANDLER: {
3779 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3781 if (arm_is_imm12 (spvar->inst_offset)) {
3782 ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
3784 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3785 ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
3789 case OP_ENDFILTER: {
3790 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3792 if (ins->sreg1 != ARMREG_R0)
3793 ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
3794 if (arm_is_imm12 (spvar->inst_offset)) {
3795 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3797 g_assert (ARMREG_IP != spvar->inst_basereg);
3798 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3799 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3801 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3804 case OP_ENDFINALLY: {
3805 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3807 if (arm_is_imm12 (spvar->inst_offset)) {
3808 ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
3810 g_assert (ARMREG_IP != spvar->inst_basereg);
3811 code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
3812 ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
3814 ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
3817 case OP_CALL_HANDLER:
3818 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3820 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3823 ins->inst_c0 = code - cfg->native_code;
3826 /*if (ins->inst_target_bb->native_offset) {
3828 //x86_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset);
3830 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3835 ARM_MOV_REG_REG (code, ARMREG_PC, ins->sreg1);
3839 * In the normal case we have:
3840 * ldr pc, [pc, ins->sreg1 << 2]
3843 * ldr lr, [pc, ins->sreg1 << 2]
3845 * After follows the data.
3846 * FIXME: add aot support.
3848 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
3849 max_len += 4 * GPOINTER_TO_INT (ins->klass);
3850 if (offset > (cfg->code_size - max_len - 16)) {
3851 cfg->code_size += max_len;
3852 cfg->code_size *= 2;
3853 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3854 code = cfg->native_code + offset;
3856 ARM_LDR_REG_REG_SHIFT (code, ARMREG_PC, ARMREG_PC, ins->sreg1, ARMSHIFT_LSL, 2);
3858 code += 4 * GPOINTER_TO_INT (ins->klass);
3862 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
3863 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
3867 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3868 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
3872 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3873 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
3877 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3878 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
3882 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
3883 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
3885 case OP_COND_EXC_EQ:
3886 case OP_COND_EXC_NE_UN:
3887 case OP_COND_EXC_LT:
3888 case OP_COND_EXC_LT_UN:
3889 case OP_COND_EXC_GT:
3890 case OP_COND_EXC_GT_UN:
3891 case OP_COND_EXC_GE:
3892 case OP_COND_EXC_GE_UN:
3893 case OP_COND_EXC_LE:
3894 case OP_COND_EXC_LE_UN:
3895 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
3897 case OP_COND_EXC_IEQ:
3898 case OP_COND_EXC_INE_UN:
3899 case OP_COND_EXC_ILT:
3900 case OP_COND_EXC_ILT_UN:
3901 case OP_COND_EXC_IGT:
3902 case OP_COND_EXC_IGT_UN:
3903 case OP_COND_EXC_IGE:
3904 case OP_COND_EXC_IGE_UN:
3905 case OP_COND_EXC_ILE:
3906 case OP_COND_EXC_ILE_UN:
3907 EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
3910 case OP_COND_EXC_IC:
3911 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CS, ins->inst_p1);
3913 case OP_COND_EXC_OV:
3914 case OP_COND_EXC_IOV:
3915 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, ins->inst_p1);
3917 case OP_COND_EXC_NC:
3918 case OP_COND_EXC_INC:
3919 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_CC, ins->inst_p1);
3921 case OP_COND_EXC_NO:
3922 case OP_COND_EXC_INO:
3923 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VC, ins->inst_p1);
3935 EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
3938 /* floating point opcodes */
3941 if (cfg->compile_aot) {
3942 ARM_LDFD (code, ins->dreg, ARMREG_PC, 0);
3944 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3946 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
3949 /* FIXME: we can optimize the imm load by dealing with part of
3950 * the displacement in LDFD (aligning to 512).
3952 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3953 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3957 if (cfg->compile_aot) {
3958 ARM_LDFS (code, ins->dreg, ARMREG_PC, 0);
3960 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
3963 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
3964 ARM_LDFS (code, ins->dreg, ARMREG_LR, 0);
3967 case OP_STORER8_MEMBASE_REG:
3968 /* This is generated by the local regalloc pass which runs after the lowering pass */
3969 if (!arm_is_fpimm8 (ins->inst_offset)) {
3970 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3971 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
3972 ARM_STFD (code, ins->sreg1, ARMREG_LR, 0);
3974 ARM_STFD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3977 case OP_LOADR8_MEMBASE:
3978 /* This is generated by the local regalloc pass which runs after the lowering pass */
3979 if (!arm_is_fpimm8 (ins->inst_offset)) {
3980 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
3981 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
3982 ARM_LDFD (code, ins->dreg, ARMREG_LR, 0);
3984 ARM_LDFD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3987 case OP_STORER4_MEMBASE_REG:
3988 g_assert (arm_is_fpimm8 (ins->inst_offset));
3989 ARM_STFS (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3991 case OP_LOADR4_MEMBASE:
3992 g_assert (arm_is_fpimm8 (ins->inst_offset));
3993 ARM_LDFS (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
3995 case OP_ICONV_TO_R_UN: {
3997 tmpreg = ins->dreg == 0? 1: 0;
3998 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
3999 ARM_FLTD (code, ins->dreg, ins->sreg1);
4000 ARM_B_COND (code, ARMCOND_GE, 8);
4001 /* save the temp register */
4002 ARM_SUB_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4003 ARM_STFD (code, tmpreg, ARMREG_SP, 0);
4004 ARM_LDFD (code, tmpreg, ARMREG_PC, 12);
4005 ARM_FPA_ADFD (code, ins->dreg, ins->dreg, tmpreg);
4006 ARM_LDFD (code, tmpreg, ARMREG_SP, 0);
4007 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 8);
4008 /* skip the constant pool */
4011 *(int*)code = 0x41f00000;
4016 * ldfltd ftemp, [pc, #8] 0x41f00000 0x00000000
4017 * adfltd fdest, fdest, ftemp
4021 case OP_ICONV_TO_R4:
4022 ARM_FLTS (code, ins->dreg, ins->sreg1);
4024 case OP_ICONV_TO_R8:
4025 ARM_FLTD (code, ins->dreg, ins->sreg1);
4028 #elif defined(ARM_FPU_VFP)
4031 if (cfg->compile_aot) {
4032 ARM_FLDD (code, ins->dreg, ARMREG_PC, 0);
4034 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4036 *(guint32*)code = ((guint32*)(ins->inst_p0))[1];
4039 /* FIXME: we can optimize the imm load by dealing with part of
4040 * the displacement in LDFD (aligning to 512).
4042 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4043 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4047 if (cfg->compile_aot) {
4048 ARM_FLDS (code, ins->dreg, ARMREG_PC, 0);
4050 *(guint32*)code = ((guint32*)(ins->inst_p0))[0];
4052 ARM_CVTS (code, ins->dreg, ins->dreg);
4054 code = mono_arm_emit_load_imm (code, ARMREG_LR, (guint32)ins->inst_p0);
4055 ARM_FLDS (code, ins->dreg, ARMREG_LR, 0);
4056 ARM_CVTS (code, ins->dreg, ins->dreg);
4059 case OP_STORER8_MEMBASE_REG:
4060 /* This is generated by the local regalloc pass which runs after the lowering pass */
4061 if (!arm_is_fpimm8 (ins->inst_offset)) {
4062 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4063 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_destbasereg);
4064 ARM_FSTD (code, ins->sreg1, ARMREG_LR, 0);
4066 ARM_FSTD (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
4069 case OP_LOADR8_MEMBASE:
4070 /* This is generated by the local regalloc pass which runs after the lowering pass */
4071 if (!arm_is_fpimm8 (ins->inst_offset)) {
4072 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4073 ARM_ADD_REG_REG (code, ARMREG_LR, ARMREG_LR, ins->inst_basereg);
4074 ARM_FLDD (code, ins->dreg, ARMREG_LR, 0);
4076 ARM_FLDD (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
4079 case OP_STORER4_MEMBASE_REG:
4080 g_assert (arm_is_fpimm8 (ins->inst_offset));
4081 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4082 ARM_FSTS (code, ARM_VFP_F0, ins->inst_destbasereg, ins->inst_offset);
4084 case OP_LOADR4_MEMBASE:
4085 g_assert (arm_is_fpimm8 (ins->inst_offset));
4086 ARM_FLDS (code, ARM_VFP_F0, ins->inst_basereg, ins->inst_offset);
4087 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4089 case OP_ICONV_TO_R_UN: {
4090 g_assert_not_reached ();
4093 case OP_ICONV_TO_R4:
4094 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4095 ARM_FSITOS (code, ARM_VFP_F0, ARM_VFP_F0);
4096 ARM_CVTS (code, ins->dreg, ARM_VFP_F0);
4098 case OP_ICONV_TO_R8:
4099 ARM_FMSR (code, ARM_VFP_F0, ins->sreg1);
4100 ARM_FSITOD (code, ins->dreg, ARM_VFP_F0);
4104 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4) {
4105 ARM_CVTD (code, ARM_VFP_F0, ins->sreg1);
4106 ARM_FMRS (code, ARMREG_R0, ARM_VFP_F0);
4108 ARM_FMRRD (code, ARMREG_R0, ARMREG_R1, ins->sreg1);
4114 case OP_FCONV_TO_I1:
4115 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, TRUE);
4117 case OP_FCONV_TO_U1:
4118 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 1, FALSE);
4120 case OP_FCONV_TO_I2:
4121 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, TRUE);
4123 case OP_FCONV_TO_U2:
4124 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 2, FALSE);
4126 case OP_FCONV_TO_I4:
4128 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, TRUE);
4130 case OP_FCONV_TO_U4:
4132 code = emit_float_to_int (cfg, code, ins->dreg, ins->sreg1, 4, FALSE);
4134 case OP_FCONV_TO_I8:
4135 case OP_FCONV_TO_U8:
4136 g_assert_not_reached ();
4137 /* Implemented as helper calls */
4139 case OP_LCONV_TO_R_UN:
4140 g_assert_not_reached ();
4141 /* Implemented as helper calls */
4143 case OP_LCONV_TO_OVF_I4_2: {
4144 guint8 *high_bit_not_set, *valid_negative, *invalid_negative, *valid_positive;
4146 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4149 ARM_CMP_REG_IMM8 (code, ins->sreg1, 0);
4150 high_bit_not_set = code;
4151 ARM_B_COND (code, ARMCOND_GE, 0); /*branch if bit 31 of the lower part is not set*/
4153 ARM_CMN_REG_IMM8 (code, ins->sreg2, 1); /*This have the same effect as CMP reg, 0xFFFFFFFF */
4154 valid_negative = code;
4155 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0xFFFFFFFF (lower part has bit 31 set) */
4156 invalid_negative = code;
4157 ARM_B_COND (code, ARMCOND_AL, 0);
4159 arm_patch (high_bit_not_set, code);
4161 ARM_CMP_REG_IMM8 (code, ins->sreg2, 0);
4162 valid_positive = code;
4163 ARM_B_COND (code, ARMCOND_EQ, 0); /*branch if upper part == 0 (lower part has bit 31 clear)*/
4165 arm_patch (invalid_negative, code);
4166 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_AL, "OverflowException");
4168 arm_patch (valid_negative, code);
4169 arm_patch (valid_positive, code);
4171 if (ins->dreg != ins->sreg1)
4172 ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
4177 ARM_FPA_ADFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4180 ARM_FPA_SUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4183 ARM_FPA_MUFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4186 ARM_FPA_DVFD (code, ins->dreg, ins->sreg1, ins->sreg2);
4189 ARM_MNFD (code, ins->dreg, ins->sreg1);
4191 #elif defined(ARM_FPU_VFP)
4193 ARM_VFP_ADDD (code, ins->dreg, ins->sreg1, ins->sreg2);
4196 ARM_VFP_SUBD (code, ins->dreg, ins->sreg1, ins->sreg2);
4199 ARM_VFP_MULD (code, ins->dreg, ins->sreg1, ins->sreg2);
4202 ARM_VFP_DIVD (code, ins->dreg, ins->sreg1, ins->sreg2);
4205 ARM_NEGD (code, ins->dreg, ins->sreg1);
4210 g_assert_not_reached ();
4214 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4215 #elif defined(ARM_FPU_VFP)
4216 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4222 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4223 #elif defined(ARM_FPU_VFP)
4224 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4227 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
4228 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
4232 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4233 #elif defined(ARM_FPU_VFP)
4234 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4237 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4238 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4242 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
4243 #elif defined(ARM_FPU_VFP)
4244 ARM_CMPD (code, ins->sreg1, ins->sreg2);
4247 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4248 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4249 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4254 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4255 #elif defined(ARM_FPU_VFP)
4256 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4259 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4260 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4265 ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
4266 #elif defined(ARM_FPU_VFP)
4267 ARM_CMPD (code, ins->sreg2, ins->sreg1);
4270 ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
4271 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_MI);
4272 ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_VS);
4274 /* ARM FPA flags table:
4275 * N Less than ARMCOND_MI
4276 * Z Equal ARMCOND_EQ
4277 * C Greater Than or Equal ARMCOND_CS
4278 * V Unordered ARMCOND_VS
4281 EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
4284 EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
4287 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4290 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4291 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
4297 g_assert_not_reached ();
4301 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4303 /* FPA requires EQ even thou the docs suggests that just CS is enough */
4304 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_EQ);
4305 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
4309 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
4310 EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
4315 if (ins->dreg != ins->sreg1)
4316 ARM_MVFD (code, ins->dreg, ins->sreg1);
4317 #elif defined(ARM_FPU_VFP)
4318 ARM_ABSD (code, ARM_VFP_D1, ins->sreg1);
4319 ARM_FLDD (code, ARM_VFP_D0, ARMREG_PC, 0);
4321 *(guint32*)code = 0xffffffff;
4323 *(guint32*)code = 0x7fefffff;
4325 ARM_CMPD (code, ARM_VFP_D1, ARM_VFP_D0);
4327 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_GT, "ArithmeticException");
4328 ARM_CMPD (code, ins->sreg1, ins->sreg1);
4330 EMIT_COND_SYSTEM_EXCEPTION_FLAGS (ARMCOND_VS, "ArithmeticException");
4332 ARM_CPYD (code, ins->dreg, ins->sreg1);
4337 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4338 g_assert_not_reached ();
4341 if ((cfg->opt & MONO_OPT_BRANCH) && ((code - cfg->native_code - offset) > max_len)) {
4342 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4343 mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
4344 g_assert_not_reached ();
4350 last_offset = offset;
4353 cfg->code_len = code - cfg->native_code;
4356 #endif /* DISABLE_JIT */
4358 #ifdef HAVE_AEABI_READ_TP
4359 void __aeabi_read_tp (void);
4363 mono_arch_register_lowlevel_calls (void)
4365 /* The signature doesn't matter */
4366 mono_register_jit_icall (mono_arm_throw_exception, "mono_arm_throw_exception", mono_create_icall_signature ("void"), TRUE);
4367 mono_register_jit_icall (mono_arm_throw_exception_by_token, "mono_arm_throw_exception_by_token", mono_create_icall_signature ("void"), TRUE);
4369 #ifndef MONO_CROSS_COMPILE
4370 #ifdef HAVE_AEABI_READ_TP
4371 mono_register_jit_icall (__aeabi_read_tp, "__aeabi_read_tp", mono_create_icall_signature ("void"), TRUE);
4376 #define patch_lis_ori(ip,val) do {\
4377 guint16 *__lis_ori = (guint16*)(ip); \
4378 __lis_ori [1] = (((guint32)(val)) >> 16) & 0xffff; \
4379 __lis_ori [3] = ((guint32)(val)) & 0xffff; \
4383 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4385 MonoJumpInfo *patch_info;
4386 gboolean compile_aot = !run_cctors;
4388 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4389 unsigned char *ip = patch_info->ip.i + code;
4390 const unsigned char *target;
4392 if (patch_info->type == MONO_PATCH_INFO_SWITCH && !compile_aot) {
4393 gpointer *jt = (gpointer*)(ip + 8);
4395 /* jt is the inlined jump table, 2 instructions after ip
4396 * In the normal case we store the absolute addresses,
4397 * otherwise the displacements.
4399 for (i = 0; i < patch_info->data.table->table_size; i++)
4400 jt [i] = code + (int)patch_info->data.table->table [i];
4403 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4406 switch (patch_info->type) {
4407 case MONO_PATCH_INFO_BB:
4408 case MONO_PATCH_INFO_LABEL:
4411 /* No need to patch these */
4416 switch (patch_info->type) {
4417 case MONO_PATCH_INFO_IP:
4418 g_assert_not_reached ();
4419 patch_lis_ori (ip, ip);
4421 case MONO_PATCH_INFO_METHOD_REL:
4422 g_assert_not_reached ();
4423 *((gpointer *)(ip)) = code + patch_info->data.offset;
4425 case MONO_PATCH_INFO_METHODCONST:
4426 case MONO_PATCH_INFO_CLASS:
4427 case MONO_PATCH_INFO_IMAGE:
4428 case MONO_PATCH_INFO_FIELD:
4429 case MONO_PATCH_INFO_VTABLE:
4430 case MONO_PATCH_INFO_IID:
4431 case MONO_PATCH_INFO_SFLDA:
4432 case MONO_PATCH_INFO_LDSTR:
4433 case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
4434 case MONO_PATCH_INFO_LDTOKEN:
4435 g_assert_not_reached ();
4436 /* from OP_AOTCONST : lis + ori */
4437 patch_lis_ori (ip, target);
4439 case MONO_PATCH_INFO_R4:
4440 case MONO_PATCH_INFO_R8:
4441 g_assert_not_reached ();
4442 *((gconstpointer *)(ip + 2)) = patch_info->data.target;
4444 case MONO_PATCH_INFO_EXC_NAME:
4445 g_assert_not_reached ();
4446 *((gconstpointer *)(ip + 1)) = patch_info->data.name;
4448 case MONO_PATCH_INFO_NONE:
4449 case MONO_PATCH_INFO_BB_OVF:
4450 case MONO_PATCH_INFO_EXC_OVF:
4451 /* everything is dealt with at epilog output time */
4456 arm_patch_general (domain, ip, target);
4463 * Stack frame layout:
4465 * ------------------- fp
4466 * MonoLMF structure or saved registers
4467 * -------------------
4469 * -------------------
4471 * -------------------
4472 * optional 8 bytes for tracing
4473 * -------------------
4474 * param area size is cfg->param_area
4475 * ------------------- sp
4478 mono_arch_emit_prolog (MonoCompile *cfg)
4480 MonoMethod *method = cfg->method;
4482 MonoMethodSignature *sig;
4484 int alloc_size, pos, max_offset, i, rot_amount;
4489 int prev_sp_offset, reg_offset;
4491 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4494 sig = mono_method_signature (method);
4495 cfg->code_size = 256 + sig->param_count * 20;
4496 code = cfg->native_code = g_malloc (cfg->code_size);
4498 mono_emit_unwind_op_def_cfa (cfg, code, ARMREG_SP, 0);
4500 ARM_MOV_REG_REG (code, ARMREG_IP, ARMREG_SP);
4502 alloc_size = cfg->stack_offset;
4505 if (!method->save_lmf) {
4506 /* We save SP by storing it into IP and saving IP */
4507 ARM_PUSH (code, (cfg->used_int_regs | (1 << ARMREG_IP) | (1 << ARMREG_LR)));
4508 prev_sp_offset = 8; /* ip and lr */
4509 for (i = 0; i < 16; ++i) {
4510 if (cfg->used_int_regs & (1 << i))
4511 prev_sp_offset += 4;
4513 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4515 for (i = 0; i < 16; ++i) {
4516 if ((cfg->used_int_regs & (1 << i)) || (i == ARMREG_IP) || (i == ARMREG_LR)) {
4517 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4522 ARM_PUSH (code, 0x5ff0);
4523 prev_sp_offset = 4 * 10; /* all but r0-r3, sp and pc */
4524 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset);
4526 for (i = 0; i < 16; ++i) {
4527 if ((i > ARMREG_R3) && (i != ARMREG_SP) && (i != ARMREG_PC)) {
4528 mono_emit_unwind_op_offset (cfg, code, i, (- prev_sp_offset) + reg_offset);
4532 pos += sizeof (MonoLMF) - prev_sp_offset;
4536 // align to MONO_ARCH_FRAME_ALIGNMENT bytes
4537 if (alloc_size & (MONO_ARCH_FRAME_ALIGNMENT - 1)) {
4538 alloc_size += MONO_ARCH_FRAME_ALIGNMENT - 1;
4539 alloc_size &= ~(MONO_ARCH_FRAME_ALIGNMENT - 1);
4542 /* the stack used in the pushed regs */
4543 if (prev_sp_offset & 4)
4545 cfg->stack_usage = alloc_size;
4547 if ((i = mono_arm_is_rotated_imm8 (alloc_size, &rot_amount)) >= 0) {
4548 ARM_SUB_REG_IMM (code, ARMREG_SP, ARMREG_SP, i, rot_amount);
4550 code = mono_arm_emit_load_imm (code, ARMREG_IP, alloc_size);
4551 ARM_SUB_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4553 mono_emit_unwind_op_def_cfa_offset (cfg, code, prev_sp_offset + alloc_size);
4555 if (cfg->frame_reg != ARMREG_SP) {
4556 ARM_MOV_REG_REG (code, cfg->frame_reg, ARMREG_SP);
4557 mono_emit_unwind_op_def_cfa_reg (cfg, code, cfg->frame_reg);
4559 //g_print ("prev_sp_offset: %d, alloc_size:%d\n", prev_sp_offset, alloc_size);
4560 prev_sp_offset += alloc_size;
4562 /* compute max_offset in order to use short forward jumps
4563 * we could skip do it on arm because the immediate displacement
4564 * for jumps is large enough, it may be useful later for constant pools
4567 for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
4568 MonoInst *ins = bb->code;
4569 bb->max_offset = max_offset;
4571 if (cfg->prof_options & MONO_PROFILE_COVERAGE)
4574 MONO_BB_FOR_EACH_INS (bb, ins)
4575 max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
4578 /* store runtime generic context */
4579 if (cfg->rgctx_var) {
4580 MonoInst *ins = cfg->rgctx_var;
4582 g_assert (ins->opcode == OP_REGOFFSET);
4584 if (arm_is_imm12 (ins->inst_offset)) {
4585 ARM_STR_IMM (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ins->inst_offset);
4587 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4588 ARM_STR_REG_REG (code, MONO_ARCH_RGCTX_REG, ins->inst_basereg, ARMREG_LR);
4592 /* load arguments allocated to register from the stack */
4595 cinfo = get_call_info (NULL, sig, sig->pinvoke);
4597 if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != RegTypeStructByVal) {
4598 ArgInfo *ainfo = &cinfo->ret;
4599 inst = cfg->vret_addr;
4600 g_assert (arm_is_imm12 (inst->inst_offset));
4601 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4604 if (sig->call_convention == MONO_CALL_VARARG) {
4605 ArgInfo *cookie = &cinfo->sig_cookie;
4607 /* Save the sig cookie address */
4608 g_assert (cookie->storage == RegTypeBase);
4610 g_assert (arm_is_imm12 (prev_sp_offset + cookie->offset));
4611 g_assert (arm_is_imm12 (cfg->sig_cookie));
4612 ARM_ADD_REG_IMM8 (code, ARMREG_IP, cfg->frame_reg, prev_sp_offset + cookie->offset);
4613 ARM_STR_IMM (code, ARMREG_IP, cfg->frame_reg, cfg->sig_cookie);
4616 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4617 ArgInfo *ainfo = cinfo->args + i;
4618 inst = cfg->args [pos];
4620 if (cfg->verbose_level > 2)
4621 g_print ("Saving argument %d (type: %d)\n", i, ainfo->storage);
4622 if (inst->opcode == OP_REGVAR) {
4623 if (ainfo->storage == RegTypeGeneral)
4624 ARM_MOV_REG_REG (code, inst->dreg, ainfo->reg);
4625 else if (ainfo->storage == RegTypeFP) {
4626 g_assert_not_reached ();
4627 } else if (ainfo->storage == RegTypeBase) {
4628 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4629 ARM_LDR_IMM (code, inst->dreg, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4631 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4632 ARM_LDR_REG_REG (code, inst->dreg, ARMREG_SP, ARMREG_IP);
4635 g_assert_not_reached ();
4637 if (cfg->verbose_level > 2)
4638 g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
4640 /* the argument should be put on the stack: FIXME handle size != word */
4641 if (ainfo->storage == RegTypeGeneral || ainfo->storage == RegTypeIRegPair) {
4642 switch (ainfo->size) {
4644 if (arm_is_imm12 (inst->inst_offset))
4645 ARM_STRB_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4647 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4648 ARM_STRB_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4652 if (arm_is_imm8 (inst->inst_offset)) {
4653 ARM_STRH_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4655 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4656 ARM_STRH_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4660 g_assert (arm_is_imm12 (inst->inst_offset));
4661 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4662 g_assert (arm_is_imm12 (inst->inst_offset + 4));
4663 ARM_STR_IMM (code, ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4666 if (arm_is_imm12 (inst->inst_offset)) {
4667 ARM_STR_IMM (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4669 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4670 ARM_STR_REG_REG (code, ainfo->reg, inst->inst_basereg, ARMREG_IP);
4674 } else if (ainfo->storage == RegTypeBaseGen) {
4675 g_assert (arm_is_imm12 (prev_sp_offset + ainfo->offset));
4676 g_assert (arm_is_imm12 (inst->inst_offset));
4677 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4678 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4679 ARM_STR_IMM (code, ARMREG_R3, inst->inst_basereg, inst->inst_offset);
4680 } else if (ainfo->storage == RegTypeBase) {
4681 if (arm_is_imm12 (prev_sp_offset + ainfo->offset)) {
4682 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset));
4684 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset);
4685 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4688 switch (ainfo->size) {
4690 if (arm_is_imm8 (inst->inst_offset)) {
4691 ARM_STRB_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4693 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4694 ARM_STRB_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4698 if (arm_is_imm8 (inst->inst_offset)) {
4699 ARM_STRH_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4701 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4702 ARM_STRH_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4706 if (arm_is_imm12 (inst->inst_offset)) {
4707 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4709 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4710 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4712 if (arm_is_imm12 (prev_sp_offset + ainfo->offset + 4)) {
4713 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_SP, (prev_sp_offset + ainfo->offset + 4));
4715 code = mono_arm_emit_load_imm (code, ARMREG_IP, prev_sp_offset + ainfo->offset + 4);
4716 ARM_LDR_REG_REG (code, ARMREG_LR, ARMREG_SP, ARMREG_IP);
4718 if (arm_is_imm12 (inst->inst_offset + 4)) {
4719 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset + 4);
4721 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset + 4);
4722 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4726 if (arm_is_imm12 (inst->inst_offset)) {
4727 ARM_STR_IMM (code, ARMREG_LR, inst->inst_basereg, inst->inst_offset);
4729 code = mono_arm_emit_load_imm (code, ARMREG_IP, inst->inst_offset);
4730 ARM_STR_REG_REG (code, ARMREG_LR, inst->inst_basereg, ARMREG_IP);
4734 } else if (ainfo->storage == RegTypeFP) {
4735 g_assert_not_reached ();
4736 } else if (ainfo->storage == RegTypeStructByVal) {
4737 int doffset = inst->inst_offset;
4741 size = mini_type_stack_size_full (cfg->generic_sharing_context, inst->inst_vtype, NULL, sig->pinvoke);
4742 for (cur_reg = 0; cur_reg < ainfo->size; ++cur_reg) {
4743 if (arm_is_imm12 (doffset)) {
4744 ARM_STR_IMM (code, ainfo->reg + cur_reg, inst->inst_basereg, doffset);
4746 code = mono_arm_emit_load_imm (code, ARMREG_IP, doffset);
4747 ARM_STR_REG_REG (code, ainfo->reg + cur_reg, inst->inst_basereg, ARMREG_IP);
4749 soffset += sizeof (gpointer);
4750 doffset += sizeof (gpointer);
4752 if (ainfo->vtsize) {
4753 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4754 //g_print ("emit_memcpy (prev_sp_ofs: %d, ainfo->offset: %d, soffset: %d)\n", prev_sp_offset, ainfo->offset, soffset);
4755 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, doffset, ARMREG_SP, prev_sp_offset + ainfo->offset);
4757 } else if (ainfo->storage == RegTypeStructByAddr) {
4758 g_assert_not_reached ();
4759 /* FIXME: handle overrun! with struct sizes not multiple of 4 */
4760 code = emit_memcpy (code, ainfo->vtsize * sizeof (gpointer), inst->inst_basereg, inst->inst_offset, ainfo->reg, 0);
4762 g_assert_not_reached ();
4767 if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
4768 if (cfg->compile_aot)
4769 /* AOT code is only used in the root domain */
4770 code = mono_arm_emit_load_imm (code, ARMREG_R0, 0);
4772 code = mono_arm_emit_load_imm (code, ARMREG_R0, (guint32)cfg->domain);
4773 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4774 (gpointer)"mono_jit_thread_attach");
4775 code = emit_call_seq (cfg, code);
4778 if (method->save_lmf) {
4779 gboolean get_lmf_fast = FALSE;
4781 #ifdef HAVE_AEABI_READ_TP
4782 gint32 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
4784 if (lmf_addr_tls_offset != -1) {
4785 get_lmf_fast = TRUE;
4787 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4788 (gpointer)"__aeabi_read_tp");
4789 code = emit_call_seq (cfg, code);
4791 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_R0, lmf_addr_tls_offset);
4792 get_lmf_fast = TRUE;
4795 if (!get_lmf_fast) {
4796 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4797 (gpointer)"mono_get_lmf_addr");
4798 code = emit_call_seq (cfg, code);
4800 /* we build the MonoLMF structure on the stack - see mini-arm.h */
4801 /* lmf_offset is the offset from the previous stack pointer,
4802 * alloc_size is the total stack space allocated, so the offset
4803 * of MonoLMF from the current stack ptr is alloc_size - lmf_offset.
4804 * The pointer to the struct is put in r1 (new_lmf).
4805 * r2 is used as scratch
4806 * The callee-saved registers are already in the MonoLMF structure
4808 code = emit_big_add (code, ARMREG_R1, ARMREG_SP, alloc_size - lmf_offset);
4809 /* r0 is the result from mono_get_lmf_addr () */
4810 ARM_STR_IMM (code, ARMREG_R0, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4811 /* new_lmf->previous_lmf = *lmf_addr */
4812 ARM_LDR_IMM (code, ARMREG_R2, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4813 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4814 /* *(lmf_addr) = r1 */
4815 ARM_STR_IMM (code, ARMREG_R1, ARMREG_R0, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4816 /* Skip method (only needed for trampoline LMF frames) */
4817 ARM_STR_IMM (code, ARMREG_SP, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, ebp));
4818 /* save the current IP */
4819 ARM_MOV_REG_REG (code, ARMREG_R2, ARMREG_PC);
4820 ARM_STR_IMM (code, ARMREG_R2, ARMREG_R1, G_STRUCT_OFFSET (MonoLMF, eip));
4824 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4826 if (cfg->arch.seq_point_info_var) {
4827 MonoInst *ins = cfg->arch.seq_point_info_var;
4829 /* Initialize the variable from a GOT slot */
4830 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
4831 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
4833 *(gpointer*)code = NULL;
4835 ARM_LDR_REG_REG (code, ARMREG_R0, ARMREG_PC, ARMREG_R0);
4837 g_assert (ins->opcode == OP_REGOFFSET);
4839 if (arm_is_imm12 (ins->inst_offset)) {
4840 ARM_STR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4842 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4843 ARM_STR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4847 /* Initialize ss_trigger_page_var */
4849 MonoInst *info_var = cfg->arch.seq_point_info_var;
4850 MonoInst *ss_trigger_page_var = cfg->arch.ss_trigger_page_var;
4851 int dreg = ARMREG_LR;
4854 g_assert (info_var->opcode == OP_REGOFFSET);
4855 g_assert (arm_is_imm12 (info_var->inst_offset));
4857 ARM_LDR_IMM (code, dreg, info_var->inst_basereg, info_var->inst_offset);
4858 /* Load the trigger page addr */
4859 ARM_LDR_IMM (code, dreg, dreg, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page));
4860 ARM_STR_IMM (code, dreg, ss_trigger_page_var->inst_basereg, ss_trigger_page_var->inst_offset);
4864 cfg->code_len = code - cfg->native_code;
4865 g_assert (cfg->code_len < cfg->code_size);
4872 mono_arch_emit_epilog (MonoCompile *cfg)
4874 MonoMethod *method = cfg->method;
4875 int pos, i, rot_amount;
4876 int max_epilog_size = 16 + 20*4;
4880 if (cfg->method->save_lmf)
4881 max_epilog_size += 128;
4883 if (mono_jit_trace_calls != NULL)
4884 max_epilog_size += 50;
4886 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4887 max_epilog_size += 50;
4889 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4890 cfg->code_size *= 2;
4891 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4892 mono_jit_stats.code_reallocs++;
4896 * Keep in sync with OP_JMP
4898 code = cfg->native_code + cfg->code_len;
4900 if (mono_jit_trace_calls != NULL && mono_trace_eval (method)) {
4901 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4905 /* Load returned vtypes into registers if needed */
4906 cinfo = cfg->arch.cinfo;
4907 if (cinfo->ret.storage == RegTypeStructByVal) {
4908 MonoInst *ins = cfg->ret;
4910 if (arm_is_imm12 (ins->inst_offset)) {
4911 ARM_LDR_IMM (code, ARMREG_R0, ins->inst_basereg, ins->inst_offset);
4913 code = mono_arm_emit_load_imm (code, ARMREG_LR, ins->inst_offset);
4914 ARM_LDR_REG_REG (code, ARMREG_R0, ins->inst_basereg, ARMREG_LR);
4918 if (method->save_lmf) {
4920 /* all but r0-r3, sp and pc */
4921 pos += sizeof (MonoLMF) - (4 * 10);
4923 /* r2 contains the pointer to the current LMF */
4924 code = emit_big_add (code, ARMREG_R2, cfg->frame_reg, cfg->stack_usage - lmf_offset);
4925 /* ip = previous_lmf */
4926 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4928 ARM_LDR_IMM (code, ARMREG_LR, ARMREG_R2, G_STRUCT_OFFSET (MonoLMF, lmf_addr));
4929 /* *(lmf_addr) = previous_lmf */
4930 ARM_STR_IMM (code, ARMREG_IP, ARMREG_LR, G_STRUCT_OFFSET (MonoLMF, previous_lmf));
4931 /* FIXME: speedup: there is no actual need to restore the registers if
4932 * we didn't actually change them (idea from Zoltan).
4935 /* point sp at the registers to restore: 10 is 14 -4, because we skip r0-r3 */
4936 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_R2, (sizeof (MonoLMF) - 10 * sizeof (gulong)));
4937 ARM_POP_NWB (code, 0xaff0); /* restore ip to sp and lr to pc */
4939 if ((i = mono_arm_is_rotated_imm8 (cfg->stack_usage, &rot_amount)) >= 0) {
4940 ARM_ADD_REG_IMM (code, ARMREG_SP, cfg->frame_reg, i, rot_amount);
4942 code = mono_arm_emit_load_imm (code, ARMREG_IP, cfg->stack_usage);
4943 ARM_ADD_REG_REG (code, ARMREG_SP, ARMREG_SP, ARMREG_IP);
4945 /* FIXME: add v4 thumb interworking support */
4946 ARM_POP_NWB (code, cfg->used_int_regs | ((1 << ARMREG_SP) | (1 << ARMREG_PC)));
4949 cfg->code_len = code - cfg->native_code;
4951 g_assert (cfg->code_len < cfg->code_size);
4955 /* remove once throw_exception_by_name is eliminated */
4957 exception_id_by_name (const char *name)
4959 if (strcmp (name, "IndexOutOfRangeException") == 0)
4960 return MONO_EXC_INDEX_OUT_OF_RANGE;
4961 if (strcmp (name, "OverflowException") == 0)
4962 return MONO_EXC_OVERFLOW;
4963 if (strcmp (name, "ArithmeticException") == 0)
4964 return MONO_EXC_ARITHMETIC;
4965 if (strcmp (name, "DivideByZeroException") == 0)
4966 return MONO_EXC_DIVIDE_BY_ZERO;
4967 if (strcmp (name, "InvalidCastException") == 0)
4968 return MONO_EXC_INVALID_CAST;
4969 if (strcmp (name, "NullReferenceException") == 0)
4970 return MONO_EXC_NULL_REF;
4971 if (strcmp (name, "ArrayTypeMismatchException") == 0)
4972 return MONO_EXC_ARRAY_TYPE_MISMATCH;
4973 g_error ("Unknown intrinsic exception %s\n", name);
4978 mono_arch_emit_exceptions (MonoCompile *cfg)
4980 MonoJumpInfo *patch_info;
4983 const guint8* exc_throw_pos [MONO_EXC_INTRINS_NUM] = {NULL};
4984 guint8 exc_throw_found [MONO_EXC_INTRINS_NUM] = {0};
4985 int max_epilog_size = 50;
4987 /* count the number of exception infos */
4990 * make sure we have enough space for exceptions
4992 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4993 if (patch_info->type == MONO_PATCH_INFO_EXC) {
4994 i = exception_id_by_name (patch_info->data.target);
4995 if (!exc_throw_found [i]) {
4996 max_epilog_size += 32;
4997 exc_throw_found [i] = TRUE;
5002 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
5003 cfg->code_size *= 2;
5004 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
5005 mono_jit_stats.code_reallocs++;
5008 code = cfg->native_code + cfg->code_len;
5010 /* add code to raise exceptions */
5011 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
5012 switch (patch_info->type) {
5013 case MONO_PATCH_INFO_EXC: {
5014 MonoClass *exc_class;
5015 unsigned char *ip = patch_info->ip.i + cfg->native_code;
5017 i = exception_id_by_name (patch_info->data.target);
5018 if (exc_throw_pos [i]) {
5019 arm_patch (ip, exc_throw_pos [i]);
5020 patch_info->type = MONO_PATCH_INFO_NONE;
5023 exc_throw_pos [i] = code;
5025 arm_patch (ip, code);
5027 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
5028 g_assert (exc_class);
5030 ARM_MOV_REG_REG (code, ARMREG_R1, ARMREG_LR);
5031 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_PC, 0);
5032 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
5033 patch_info->data.name = "mono_arch_throw_corlib_exception";
5034 patch_info->ip.i = code - cfg->native_code;
5036 *(guint32*)(gpointer)code = exc_class->type_token;
5046 cfg->code_len = code - cfg->native_code;
5048 g_assert (cfg->code_len < cfg->code_size);
5052 #endif /* #ifndef DISABLE_JIT */
5054 static gboolean tls_offset_inited = FALSE;
5057 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
5059 if (!tls_offset_inited) {
5060 tls_offset_inited = TRUE;
5062 lmf_tls_offset = mono_get_lmf_tls_offset ();
5063 lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
5068 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
5073 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
5080 mono_arch_print_tree (MonoInst *tree, int arity)
5086 mono_arch_get_domain_intrinsic (MonoCompile* cfg)
5088 return mono_get_domain_intrinsic (cfg);
5092 mono_arch_get_patch_offset (guint8 *code)
5099 mono_arch_flush_register_windows (void)
5103 #ifdef MONO_ARCH_HAVE_IMT
5108 mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call, MonoInst *imt_arg)
5110 if (cfg->compile_aot) {
5111 int method_reg = mono_alloc_ireg (cfg);
5114 call->dynamic_imt_arg = TRUE;
5117 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5119 MONO_INST_NEW (cfg, ins, OP_AOTCONST);
5120 ins->dreg = method_reg;
5121 ins->inst_p0 = call->method;
5122 ins->inst_c1 = MONO_PATCH_INFO_METHODCONST;
5123 MONO_ADD_INS (cfg->cbb, ins);
5125 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5127 } else if (cfg->generic_context) {
5129 /* Always pass in a register for simplicity */
5130 call->dynamic_imt_arg = TRUE;
5132 cfg->uses_rgctx_reg = TRUE;
5135 mono_call_inst_add_outarg_reg (cfg, call, imt_arg->dreg, ARMREG_V5, FALSE);
5138 int method_reg = mono_alloc_preg (cfg);
5140 MONO_INST_NEW (cfg, ins, OP_PCONST);
5141 ins->inst_p0 = call->method;
5142 ins->dreg = method_reg;
5143 MONO_ADD_INS (cfg->cbb, ins);
5145 mono_call_inst_add_outarg_reg (cfg, call, method_reg, ARMREG_V5, FALSE);
5150 #endif /* DISABLE_JIT */
5153 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
5155 guint32 *code_ptr = (guint32*)code;
5157 /* The IMT value is stored in the code stream right after the LDC instruction. */
5158 if (!IS_LDR_PC (code_ptr [0])) {
5159 g_warning ("invalid code stream, instruction before IMT value is not a LDC in %s() (code %p value 0: 0x%x -1: 0x%x -2: 0x%x)", __FUNCTION__, code, code_ptr [2], code_ptr [1], code_ptr [0]);
5160 g_assert (IS_LDR_PC (code_ptr [0]));
5162 if (code_ptr [1] == 0)
5163 /* This is AOTed code, the IMT method is in V5 */
5164 return (MonoMethod*)regs [ARMREG_V5];
5166 return (MonoMethod*) code_ptr [1];
5170 mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
5172 return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
5175 #define ENABLE_WRONG_METHOD_CHECK 0
5176 #define BASE_SIZE (6 * 4)
5177 #define BSEARCH_ENTRY_SIZE (4 * 4)
5178 #define CMP_SIZE (3 * 4)
5179 #define BRANCH_SIZE (1 * 4)
5180 #define CALL_SIZE (2 * 4)
5181 #define WMC_SIZE (5 * 4)
5182 #define DISTANCE(A, B) (((gint32)(B)) - ((gint32)(A)))
5185 arm_emit_value_and_patch_ldr (arminstr_t *code, arminstr_t *target, guint32 value)
5187 guint32 delta = DISTANCE (target, code);
5189 g_assert (delta >= 0 && delta <= 0xFFF);
5190 *target = *target | delta;
5196 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
5197 gpointer fail_tramp)
5199 int size, i, extra_space = 0;
5200 arminstr_t *code, *start, *vtable_target = NULL;
5201 gboolean large_offsets = FALSE;
5202 guint32 **constant_pool_starts;
5205 constant_pool_starts = g_new0 (guint32*, count);
5208 * We might be called with a fail_tramp from the IMT builder code even if
5209 * MONO_ARCH_HAVE_GENERALIZED_IMT_THUNK is not defined.
5211 //g_assert (!fail_tramp);
5213 for (i = 0; i < count; ++i) {
5214 MonoIMTCheckItem *item = imt_entries [i];
5215 if (item->is_equals) {
5216 if (!arm_is_imm12 (DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]))) {
5217 item->chunk_size += 32;
5218 large_offsets = TRUE;
5221 if (item->check_target_idx) {
5222 if (!item->compare_done)
5223 item->chunk_size += CMP_SIZE;
5224 item->chunk_size += BRANCH_SIZE;
5226 #if ENABLE_WRONG_METHOD_CHECK
5227 item->chunk_size += WMC_SIZE;
5230 item->chunk_size += CALL_SIZE;
5232 item->chunk_size += BSEARCH_ENTRY_SIZE;
5233 imt_entries [item->check_target_idx]->compare_done = TRUE;
5235 size += item->chunk_size;
5239 size += 4 * count; /* The ARM_ADD_REG_IMM to pop the stack */
5241 start = code = mono_domain_code_reserve (domain, size);
5244 printf ("building IMT thunk for class %s %s entries %d code size %d code at %p end %p vtable %p\n", vtable->klass->name_space, vtable->klass->name, count, size, start, ((guint8*)start) + size, vtable);
5245 for (i = 0; i < count; ++i) {
5246 MonoIMTCheckItem *item = imt_entries [i];
5247 printf ("method %d (%p) %s vtable slot %p is_equals %d chunk size %d\n", i, item->key, item->key->name, &vtable->vtable [item->value.vtable_slot], item->is_equals, item->chunk_size);
5252 ARM_PUSH4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5254 ARM_PUSH2 (code, ARMREG_R0, ARMREG_R1);
5255 ARM_LDR_IMM (code, ARMREG_R0, ARMREG_LR, -4);
5256 vtable_target = code;
5257 ARM_LDR_IMM (code, ARMREG_IP, ARMREG_PC, 0);
5259 /* R0 == 0 means we are called from AOT code. In this case, V5 contains the IMT method */
5260 ARM_CMP_REG_IMM8 (code, ARMREG_R0, 0);
5261 ARM_MOV_REG_REG_COND (code, ARMREG_R0, ARMREG_V5, ARMCOND_EQ);
5263 for (i = 0; i < count; ++i) {
5264 MonoIMTCheckItem *item = imt_entries [i];
5265 arminstr_t *imt_method = NULL, *vtable_offset_ins = NULL;
5266 gint32 vtable_offset;
5268 item->code_target = (guint8*)code;
5270 if (item->is_equals) {
5271 if (item->check_target_idx) {
5272 if (!item->compare_done) {
5274 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5275 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5277 item->jmp_code = (guint8*)code;
5278 ARM_B_COND (code, ARMCOND_NE, 0);
5280 /*Enable the commented code to assert on wrong method*/
5281 #if ENABLE_WRONG_METHOD_CHECK
5283 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5284 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5285 ARM_B_COND (code, ARMCOND_NE, 1);
5291 vtable_offset = DISTANCE (vtable, &vtable->vtable[item->value.vtable_slot]);
5292 if (!arm_is_imm12 (vtable_offset)) {
5294 * We need to branch to a computed address but we don't have
5295 * a free register to store it, since IP must contain the
5296 * vtable address. So we push the two values to the stack, and
5297 * load them both using LDM.
5299 /* Compute target address */
5300 vtable_offset_ins = code;
5301 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5302 ARM_LDR_REG_REG (code, ARMREG_R1, ARMREG_IP, ARMREG_R1);
5303 /* Save it to the fourth slot */
5304 ARM_STR_IMM (code, ARMREG_R1, ARMREG_SP, 3 * sizeof (gpointer));
5305 /* Restore registers and branch */
5306 ARM_POP4 (code, ARMREG_R0, ARMREG_R1, ARMREG_IP, ARMREG_PC);
5308 code = arm_emit_value_and_patch_ldr (code, vtable_offset_ins, vtable_offset);
5310 ARM_POP2 (code, ARMREG_R0, ARMREG_R1);
5312 ARM_ADD_REG_IMM8 (code, ARMREG_SP, ARMREG_SP, 2 * sizeof (gpointer));
5313 ARM_LDR_IMM (code, ARMREG_PC, ARMREG_IP, vtable_offset);
5317 code = arm_emit_value_and_patch_ldr (code, imt_method, (guint32)item->key);
5319 /*must emit after unconditional branch*/
5320 if (vtable_target) {
5321 code = arm_emit_value_and_patch_ldr (code, vtable_target, (guint32)vtable);
5322 item->chunk_size += 4;
5323 vtable_target = NULL;
5326 /*We reserve the space for bsearch IMT values after the first entry with an absolute jump*/
5327 constant_pool_starts [i] = code;
5329 code += extra_space;
5333 ARM_LDR_IMM (code, ARMREG_R1, ARMREG_PC, 0);
5334 ARM_CMP_REG_REG (code, ARMREG_R0, ARMREG_R1);
5336 item->jmp_code = (guint8*)code;
5337 ARM_B_COND (code, ARMCOND_GE, 0);
5342 for (i = 0; i < count; ++i) {
5343 MonoIMTCheckItem *item = imt_entries [i];
5344 if (item->jmp_code) {
5345 if (item->check_target_idx)
5346 arm_patch (item->jmp_code, imt_entries [item->check_target_idx]->code_target);
5348 if (i > 0 && item->is_equals) {
5350 arminstr_t *space_start = constant_pool_starts [i];
5351 for (j = i - 1; j >= 0 && !imt_entries [j]->is_equals; --j) {
5352 space_start = arm_emit_value_and_patch_ldr (space_start, (arminstr_t*)imt_entries [j]->code_target, (guint32)imt_entries [j]->key);
5359 char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
5360 mono_disassemble_code (NULL, (guint8*)start, size, buff);
5365 g_free (constant_pool_starts);
5367 mono_arch_flush_icache ((guint8*)start, size);
5368 mono_stats.imt_thunks_size += code - start;
5370 g_assert (DISTANCE (start, code) <= size);
5377 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
5379 if (reg == ARMREG_SP)
5380 return (gpointer)ctx->esp;
5382 return (gpointer)ctx->regs [reg];
5386 * mono_arch_set_breakpoint:
5388 * Set a breakpoint at the native code corresponding to JI at NATIVE_OFFSET.
5389 * The location should contain code emitted by OP_SEQ_POINT.
5392 mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
5395 guint32 native_offset = ip - (guint8*)ji->code_start;
5398 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5400 g_assert (native_offset % 4 == 0);
5401 g_assert (info->bp_addrs [native_offset / 4] == 0);
5402 info->bp_addrs [native_offset / 4] = bp_trigger_page;
5404 int dreg = ARMREG_LR;
5406 /* Read from another trigger page */
5407 ARM_LDR_IMM (code, dreg, ARMREG_PC, 0);
5409 *(int*)code = (int)bp_trigger_page;
5411 ARM_LDR_IMM (code, dreg, dreg, 0);
5413 mono_arch_flush_icache (code - 16, 16);
5416 /* This is currently implemented by emitting an SWI instruction, which
5417 * qemu/linux seems to convert to a SIGILL.
5419 *(int*)code = (0xef << 24) | 8;
5421 mono_arch_flush_icache (code - 4, 4);
5427 * mono_arch_clear_breakpoint:
5429 * Clear the breakpoint at IP.
5432 mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
5438 guint32 native_offset = ip - (guint8*)ji->code_start;
5439 SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
5441 g_assert (native_offset % 4 == 0);
5442 g_assert (info->bp_addrs [native_offset / 4] == bp_trigger_page);
5443 info->bp_addrs [native_offset / 4] = 0;
5445 for (i = 0; i < 4; ++i)
5448 mono_arch_flush_icache (ip, code - ip);
5453 * mono_arch_start_single_stepping:
5455 * Start single stepping.
5458 mono_arch_start_single_stepping (void)
5460 mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
5464 * mono_arch_stop_single_stepping:
5466 * Stop single stepping.
5469 mono_arch_stop_single_stepping (void)
5471 mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
5475 #define DBG_SIGNAL SIGBUS
5477 #define DBG_SIGNAL SIGSEGV
5481 * mono_arch_is_single_step_event:
5483 * Return whenever the machine state in SIGCTX corresponds to a single
5487 mono_arch_is_single_step_event (void *info, void *sigctx)
5489 siginfo_t *sinfo = info;
5491 /* Sometimes the address is off by 4 */
5492 if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
5499 * mono_arch_is_breakpoint_event:
5501 * Return whenever the machine state in SIGCTX corresponds to a breakpoint event.
5504 mono_arch_is_breakpoint_event (void *info, void *sigctx)
5506 siginfo_t *sinfo = info;
5508 if (sinfo->si_signo == DBG_SIGNAL) {
5509 /* Sometimes the address is off by 4 */
5510 if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
5520 mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
5522 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5533 mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
5535 guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
5543 * mono_arch_skip_breakpoint:
5545 * See mini-amd64.c for docs.
5548 mono_arch_skip_breakpoint (MonoContext *ctx)
5550 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5554 * mono_arch_skip_single_step:
5556 * See mini-amd64.c for docs.
5559 mono_arch_skip_single_step (MonoContext *ctx)
5561 MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 4);
5565 * mono_arch_get_seq_point_info:
5567 * See mini-amd64.c for docs.
5570 mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
5575 // FIXME: Add a free function
5577 mono_domain_lock (domain);
5578 info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
5580 mono_domain_unlock (domain);
5583 ji = mono_jit_info_table_find (domain, (char*)code);
5586 info = g_malloc0 (sizeof (SeqPointInfo) + ji->code_size);
5588 info->ss_trigger_page = ss_trigger_page;
5589 info->bp_trigger_page = bp_trigger_page;
5591 mono_domain_lock (domain);
5592 g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
5594 mono_domain_unlock (domain);