2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
35 #include "cpu-sparc.h"
36 #include "jit-icalls.h"
40 * Sparc V9 means two things:
41 * - the instruction set
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
89 * - struct arguments/returns
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
118 #define MONO_SPARC_THR_TLS 1
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64 = TRUE;
160 static gboolean v64 = FALSE;
163 static gpointer mono_arch_get_lmf_addr (void);
166 mono_arch_regname (int reg) {
167 static const char * rnames[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg >= 0 && reg < 32)
182 mono_arch_fregname (int reg) {
183 static const char *rnames [] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg >= 0 && reg < 32)
200 * Initialize the cpu to execute managed code.
203 mono_arch_cpu_init (void)
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizations(&dummy);
211 * Initialize architecture specific code.
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
230 mono_arch_cpu_optimizations (guint32 *exclude_mask)
238 if (!sysinfo (SI_ISALIST, buf, 1024))
239 g_assert_not_reached ();
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf, "sparcv9");
250 strcpy (buf, "sparcv8");
254 * On some processors, the cmov instructions are even slower than the
257 if (strstr (buf, "sparcv9")) {
258 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
262 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
268 * This function test for all SIMD functions supported.
270 * Returns a bitmask corresponding to all supported versions.
274 mono_arch_cpu_enumerate_simd_versions (void)
276 /* SIMD is currently unimplemented */
281 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
282 #else /* assume Sun's compiler */
283 static void flushi(void *addr)
290 void sync_instruction_memory(caddr_t addr, int len);
294 mono_arch_flush_icache (guint8 *code, gint size)
297 /* Hopefully this is optimized based on the actual CPU */
298 sync_instruction_memory (code, size);
300 gulong start = (gulong) code;
301 gulong end = start + size;
304 /* Sparcv9 chips only need flushes on 32 byte
305 * cacheline boundaries.
307 * Sparcv8 needs a flush every 8 bytes.
309 align = (sparcv9 ? 32 : 8);
311 start &= ~(align - 1);
312 end = (end + (align - 1)) & ~(align - 1);
314 while (start < end) {
316 __asm__ __volatile__ ("iflush %0"::"r"(start));
328 * Flush all register windows to memory. Every register window is saved to
329 * a 16 word area on the stack pointed to by its %sp register.
332 mono_sparc_flushw (void)
334 static guint32 start [64];
335 static int inited = 0;
337 static void (*flushw) (void);
342 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
345 sparc_restore_simple (code);
347 g_assert ((code - start) < 64);
349 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
351 flushw = (gpointer)start;
360 mono_arch_flush_register_windows (void)
362 mono_sparc_flushw ();
366 mono_arch_is_inst_imm (gint64 imm)
368 return sparc_is_imm13 (imm);
372 mono_sparc_is_v9 (void) {
377 mono_sparc_is_sparc64 (void) {
389 ArgInFloatReg, /* V9 only */
390 ArgInDoubleReg /* V9 only */
395 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
398 guint32 vt_offset; /* for valuetypes */
416 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
418 ainfo->offset = *stack_size;
421 if (*gr >= PARAM_REGS) {
422 ainfo->storage = ArgOnStack;
425 ainfo->storage = ArgInIReg;
430 /* Allways reserve stack space for parameters passed in registers */
431 (*stack_size) += sizeof (gpointer);
434 if (*gr < PARAM_REGS - 1) {
435 /* A pair of registers */
436 ainfo->storage = ArgInIRegPair;
440 else if (*gr >= PARAM_REGS) {
441 /* A pair of stack locations */
442 ainfo->storage = ArgOnStackPair;
445 ainfo->storage = ArgInSplitRegStack;
450 (*stack_size) += 2 * sizeof (gpointer);
456 #define FLOAT_PARAM_REGS 32
459 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
461 ainfo->offset = *stack_size;
464 if (*gr >= FLOAT_PARAM_REGS) {
465 ainfo->storage = ArgOnStack;
468 /* A single is passed in an even numbered fp register */
469 ainfo->storage = ArgInFloatReg;
470 ainfo->reg = *gr + 1;
475 if (*gr < FLOAT_PARAM_REGS) {
476 /* A double register */
477 ainfo->storage = ArgInDoubleReg;
482 ainfo->storage = ArgOnStack;
486 (*stack_size) += sizeof (gpointer);
494 * Obtain information about a call according to the calling convention.
495 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
496 * document for more information.
497 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
498 * the 'Sparc Compliance Definition 2.4' document.
501 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
504 int n = sig->hasthis + sig->param_count;
505 guint32 stack_size = 0;
508 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
510 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
516 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
517 /* The address of the return value is passed in %o0 */
518 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
519 cinfo->ret.reg += sparc_i0;
520 /* FIXME: Pass this after this as on other platforms */
527 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
529 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
532 /* Emit the signature cookie just before the implicit arguments */
533 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
536 for (i = 0; i < sig->param_count; ++i) {
537 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
540 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
543 /* Emit the signature cookie just before the implicit arguments */
544 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
547 DEBUG(printf("param %d: ", i));
548 if (sig->params [i]->byref) {
549 DEBUG(printf("byref\n"));
551 add_general (&gr, &stack_size, ainfo, FALSE);
554 ptype = mono_type_get_underlying_type (sig->params [i]);
555 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
556 switch (ptype->type) {
557 case MONO_TYPE_BOOLEAN:
560 add_general (&gr, &stack_size, ainfo, FALSE);
561 /* the value is in the ls byte */
562 ainfo->offset += sizeof (gpointer) - 1;
567 add_general (&gr, &stack_size, ainfo, FALSE);
568 /* the value is in the ls word */
569 ainfo->offset += sizeof (gpointer) - 2;
573 add_general (&gr, &stack_size, ainfo, FALSE);
574 /* the value is in the ls dword */
575 ainfo->offset += sizeof (gpointer) - 4;
580 case MONO_TYPE_FNPTR:
581 case MONO_TYPE_CLASS:
582 case MONO_TYPE_OBJECT:
583 case MONO_TYPE_STRING:
584 case MONO_TYPE_SZARRAY:
585 case MONO_TYPE_ARRAY:
586 add_general (&gr, &stack_size, ainfo, FALSE);
588 case MONO_TYPE_GENERICINST:
589 if (!mono_type_generic_inst_is_valuetype (ptype)) {
590 add_general (&gr, &stack_size, ainfo, FALSE);
594 case MONO_TYPE_VALUETYPE:
599 add_general (&gr, &stack_size, ainfo, FALSE);
601 case MONO_TYPE_TYPEDBYREF:
602 add_general (&gr, &stack_size, ainfo, FALSE);
607 add_general (&gr, &stack_size, ainfo, FALSE);
609 add_general (&gr, &stack_size, ainfo, TRUE);
614 add_float (&fr, &stack_size, ainfo, TRUE);
617 /* single precision values are passed in integer registers */
618 add_general (&gr, &stack_size, ainfo, FALSE);
623 add_float (&fr, &stack_size, ainfo, FALSE);
626 /* double precision values are passed in a pair of registers */
627 add_general (&gr, &stack_size, ainfo, TRUE);
631 g_assert_not_reached ();
635 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
638 /* Emit the signature cookie just before the implicit arguments */
639 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
643 ret_type = mono_type_get_underlying_type (sig->ret);
644 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
645 switch (ret_type->type) {
646 case MONO_TYPE_BOOLEAN:
657 case MONO_TYPE_FNPTR:
658 case MONO_TYPE_CLASS:
659 case MONO_TYPE_OBJECT:
660 case MONO_TYPE_SZARRAY:
661 case MONO_TYPE_ARRAY:
662 case MONO_TYPE_STRING:
663 cinfo->ret.storage = ArgInIReg;
664 cinfo->ret.reg = sparc_i0;
671 cinfo->ret.storage = ArgInIReg;
672 cinfo->ret.reg = sparc_i0;
676 cinfo->ret.storage = ArgInIRegPair;
677 cinfo->ret.reg = sparc_i0;
684 cinfo->ret.storage = ArgInFReg;
685 cinfo->ret.reg = sparc_f0;
687 case MONO_TYPE_GENERICINST:
688 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
689 cinfo->ret.storage = ArgInIReg;
690 cinfo->ret.reg = sparc_i0;
696 case MONO_TYPE_VALUETYPE:
705 cinfo->ret.storage = ArgOnStack;
707 case MONO_TYPE_TYPEDBYREF:
710 /* Same as a valuetype with size 24 */
717 cinfo->ret.storage = ArgOnStack;
722 g_error ("Can't handle as return value 0x%x", sig->ret->type);
725 cinfo->stack_usage = stack_size;
726 cinfo->reg_usage = gr;
731 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
737 * FIXME: If an argument is allocated to a register, then load it from the
738 * stack in the prolog.
741 for (i = 0; i < cfg->num_varinfo; i++) {
742 MonoInst *ins = cfg->varinfo [i];
743 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
746 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
749 /* FIXME: Make arguments on stack allocateable to registers */
750 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
753 if (mono_is_regsize_var (ins->inst_vtype)) {
754 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
755 g_assert (i == vmv->idx);
757 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
765 mono_arch_get_global_int_regs (MonoCompile *cfg)
769 MonoMethodSignature *sig;
772 sig = mono_method_signature (cfg->method);
774 cinfo = get_call_info (cfg, sig, FALSE);
776 /* Use unused input registers */
777 for (i = cinfo->reg_usage; i < 6; ++i)
778 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
780 /* Use %l0..%l6 as global registers */
781 for (i = sparc_l0; i < sparc_l7; ++i)
782 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
790 * mono_arch_regalloc_cost:
792 * Return the cost, in number of memory references, of the action of
793 * allocating the variable VMV into a register during global register
797 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
803 * Set var information according to the calling convention. sparc version.
804 * The locals var stuff should most likely be split in another method.
808 mono_arch_allocate_vars (MonoCompile *cfg)
810 MonoMethodSignature *sig;
811 MonoMethodHeader *header;
813 int i, offset, size, align, curinst;
816 header = cfg->header;
818 sig = mono_method_signature (cfg->method);
820 cinfo = get_call_info (cfg, sig, FALSE);
822 if (sig->ret->type != MONO_TYPE_VOID) {
823 switch (cinfo->ret.storage) {
826 cfg->ret->opcode = OP_REGVAR;
827 cfg->ret->inst_c0 = cinfo->ret.reg;
829 case ArgInIRegPair: {
830 MonoType *t = mono_type_get_underlying_type (sig->ret);
831 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
832 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
833 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
835 low->opcode = OP_REGVAR;
836 low->dreg = cinfo->ret.reg + 1;
837 high->opcode = OP_REGVAR;
838 high->dreg = cinfo->ret.reg;
840 cfg->ret->opcode = OP_REGVAR;
841 cfg->ret->inst_c0 = cinfo->ret.reg;
846 g_assert_not_reached ();
849 cfg->vret_addr->opcode = OP_REGOFFSET;
850 cfg->vret_addr->inst_basereg = sparc_fp;
851 cfg->vret_addr->inst_offset = 64;
857 cfg->ret->dreg = cfg->ret->inst_c0;
861 * We use the ABI calling conventions for managed code as well.
862 * Exception: valuetypes are never returned in registers on V9.
863 * FIXME: Use something more optimized.
866 /* Locals are allocated backwards from %fp */
867 cfg->frame_reg = sparc_fp;
871 * Reserve a stack slot for holding information used during exception
874 if (header->num_clauses)
875 offset += sizeof (gpointer) * 2;
877 if (cfg->method->save_lmf) {
878 offset += sizeof (MonoLMF);
879 cfg->arch.lmf_offset = offset;
882 curinst = cfg->locals_start;
883 for (i = curinst; i < cfg->num_varinfo; ++i) {
884 inst = cfg->varinfo [i];
886 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
887 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
891 if (inst->flags & MONO_INST_IS_DEAD)
894 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
895 * pinvoke wrappers when they call functions returning structure */
896 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
897 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
899 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
902 * This is needed since structures containing doubles must be doubleword
904 * FIXME: Do this only if needed.
906 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
910 * variables are accessed as negative offsets from %fp, so increase
911 * the offset before assigning it to a variable
916 offset &= ~(align - 1);
917 inst->opcode = OP_REGOFFSET;
918 inst->inst_basereg = sparc_fp;
919 inst->inst_offset = STACK_BIAS + -offset;
921 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
924 if (sig->call_convention == MONO_CALL_VARARG) {
925 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
928 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
929 inst = cfg->args [i];
930 if (inst->opcode != OP_REGVAR) {
931 ArgInfo *ainfo = &cinfo->args [i];
932 gboolean inreg = TRUE;
936 if (sig->hasthis && (i == 0))
937 arg_type = &mono_defaults.object_class->byval_arg;
939 arg_type = sig->params [i - sig->hasthis];
942 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
943 || (arg_type->type == MONO_TYPE_R8)))
945 * Since float arguments are passed in integer registers, we need to
946 * save them to the stack in the prolog.
951 /* FIXME: Allocate volatile arguments to registers */
952 /* FIXME: This makes the argument holding a vtype address into volatile */
953 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
956 if (MONO_TYPE_ISSTRUCT (arg_type))
957 /* FIXME: this isn't needed */
960 inst->opcode = OP_REGOFFSET;
963 storage = ArgOnStack;
965 storage = ainfo->storage;
969 inst->opcode = OP_REGVAR;
970 inst->dreg = sparc_i0 + ainfo->reg;
973 if (inst->type == STACK_I8) {
974 MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
975 MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
977 low->opcode = OP_REGVAR;
978 low->dreg = sparc_i0 + ainfo->reg + 1;
979 high->opcode = OP_REGVAR;
980 high->dreg = sparc_i0 + ainfo->reg;
982 inst->opcode = OP_REGVAR;
983 inst->dreg = sparc_i0 + ainfo->reg;
988 * Since float regs are volatile, we save the arguments to
989 * the stack in the prolog.
990 * FIXME: Avoid this if the method contains no calls.
994 case ArgInSplitRegStack:
995 /* Split arguments are saved to the stack in the prolog */
996 inst->opcode = OP_REGOFFSET;
997 /* in parent frame */
998 inst->inst_basereg = sparc_fp;
999 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
1001 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
1003 * It is very hard to load doubles from non-doubleword aligned
1004 * memory locations. So if the offset is misaligned, we copy the
1005 * argument to a stack location in the prolog.
1007 if ((inst->inst_offset - STACK_BIAS) % 8) {
1008 inst->inst_basereg = sparc_fp;
1011 offset += align - 1;
1012 offset &= ~(align - 1);
1013 inst->inst_offset = STACK_BIAS + -offset;
1022 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1023 /* Add a level of indirection */
1025 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1026 * are destructively modified in a lot of places in inssel.brg.
1029 MONO_INST_NEW (cfg, indir, 0);
1031 inst->opcode = OP_VTARG_ADDR;
1032 inst->inst_left = indir;
1038 * spillvars are stored between the normal locals and the storage reserved
1042 cfg->stack_offset = offset;
1048 mono_arch_create_vars (MonoCompile *cfg)
1050 MonoMethodSignature *sig;
1052 sig = mono_method_signature (cfg->method);
1054 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1055 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1056 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1057 printf ("vret_addr = ");
1058 mono_print_ins (cfg->vret_addr);
1062 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1063 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
1064 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
1066 low->flags |= MONO_INST_VOLATILE;
1067 high->flags |= MONO_INST_VOLATILE;
1070 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1071 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1072 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1076 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1080 MONO_INST_NEW (cfg, arg, 0);
1086 arg->opcode = OP_MOVE;
1087 arg->dreg = mono_alloc_ireg (cfg);
1089 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1092 arg->opcode = OP_FMOVE;
1093 arg->dreg = mono_alloc_freg (cfg);
1095 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1098 g_assert_not_reached ();
1101 MONO_ADD_INS (cfg->cbb, arg);
1105 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1107 int dreg = mono_alloc_ireg (cfg);
1109 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1111 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1115 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1117 int offset = ARGS_OFFSET + ainfo->offset;
1119 switch (ainfo->storage) {
1121 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
1122 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1124 case ArgOnStackPair:
1125 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
1126 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1128 case ArgInSplitRegStack:
1129 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1130 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1133 g_assert_not_reached ();
1138 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1140 int offset = ARGS_OFFSET + ainfo->offset;
1142 switch (ainfo->storage) {
1144 /* floating-point <-> integer transfer must go through memory */
1145 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1147 /* Load into a register pair */
1148 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1149 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1151 case ArgOnStackPair:
1152 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1154 case ArgInSplitRegStack:
1155 /* floating-point <-> integer transfer must go through memory */
1156 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1157 /* Load most significant word into register */
1158 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1161 g_assert_not_reached ();
1166 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1168 int offset = ARGS_OFFSET + ainfo->offset;
1170 switch (ainfo->storage) {
1172 /* floating-point <-> integer transfer must go through memory */
1173 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1174 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1177 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1180 g_assert_not_reached ();
1185 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1188 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1191 guint32 align, offset, pad, size;
1193 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1194 size = sizeof (MonoTypedRef);
1195 align = sizeof (gpointer);
1198 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1201 * Other backends use mono_type_stack_size (), but that
1202 * aligns the size to 8, which is larger than the size of
1203 * the source, leading to reads of invalid memory if the
1204 * source is at the end of address space.
1206 size = mono_class_value_size (in->klass, &align);
1209 /* The first 6 argument locations are reserved */
1210 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1211 cinfo->stack_usage = 6 * sizeof (gpointer);
1213 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1214 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1216 cinfo->stack_usage += size;
1217 cinfo->stack_usage += pad;
1220 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1221 * use the normal OUTARG opcodes to pass the address of the location to
1225 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1226 arg->sreg1 = in->dreg;
1227 arg->klass = in->klass;
1228 arg->backend.size = size;
1229 arg->inst_p0 = call;
1230 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1231 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1232 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1233 MONO_ADD_INS (cfg->cbb, arg);
1235 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1236 arg->dreg = mono_alloc_preg (cfg);
1237 arg->sreg1 = sparc_sp;
1238 arg->inst_imm = STACK_BIAS + offset;
1239 MONO_ADD_INS (cfg->cbb, arg);
1241 emit_pass_other (cfg, call, ainfo, NULL, arg);
1246 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1248 int offset = ARGS_OFFSET + ainfo->offset;
1251 switch (ainfo->storage) {
1253 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1260 opcode = OP_STOREI1_MEMBASE_REG;
1261 else if (offset & 0x2)
1262 opcode = OP_STOREI2_MEMBASE_REG;
1264 opcode = OP_STOREI4_MEMBASE_REG;
1265 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1269 g_assert_not_reached ();
1274 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1276 MonoMethodSignature *tmp_sig;
1279 * mono_ArgIterator_Setup assumes the signature cookie is
1280 * passed first and all the arguments which were before it are
1281 * passed on the stack after the signature. So compensate by
1282 * passing a different signature.
1284 tmp_sig = mono_metadata_signature_dup (call->signature);
1285 tmp_sig->param_count -= call->signature->sentinelpos;
1286 tmp_sig->sentinelpos = 0;
1287 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1289 /* FIXME: Add support for signature tokens to AOT */
1290 cfg->disable_aot = TRUE;
1291 /* We allways pass the signature on the stack for simplicity */
1292 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1296 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1299 MonoMethodSignature *sig;
1303 guint32 extra_space = 0;
1305 sig = call->signature;
1306 n = sig->param_count + sig->hasthis;
1308 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1310 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1311 /* Set the 'struct/union return pointer' location on the stack */
1312 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1315 for (i = 0; i < n; ++i) {
1318 ainfo = cinfo->args + i;
1320 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1321 /* Emit the signature cookie just before the first implicit argument */
1322 emit_sig_cookie (cfg, call, cinfo);
1325 in = call->args [i];
1327 if (sig->hasthis && (i == 0))
1328 arg_type = &mono_defaults.object_class->byval_arg;
1330 arg_type = sig->params [i - sig->hasthis];
1332 arg_type = mono_type_get_underlying_type (arg_type);
1333 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1334 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1335 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1336 emit_pass_long (cfg, call, ainfo, in);
1337 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1338 emit_pass_double (cfg, call, ainfo, in);
1339 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1340 emit_pass_float (cfg, call, ainfo, in);
1342 emit_pass_other (cfg, call, ainfo, arg_type, in);
1345 /* Handle the case where there are no implicit arguments */
1346 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1347 emit_sig_cookie (cfg, call, cinfo);
1350 call->stack_usage = cinfo->stack_usage + extra_space;
1356 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1358 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1359 int size = ins->backend.size;
1361 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1365 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1367 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1368 MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
1370 switch (cinfo->ret.storage) {
1372 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1375 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1376 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1378 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 2, val->dreg + 2);
1379 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
1383 if (ret->type == MONO_TYPE_R4)
1384 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1386 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1389 g_assert_not_reached ();
1395 int cond_to_sparc_cond [][3] = {
1396 {sparc_be, sparc_be, sparc_fbe},
1397 {sparc_bne, sparc_bne, 0},
1398 {sparc_ble, sparc_ble, sparc_fble},
1399 {sparc_bge, sparc_bge, sparc_fbge},
1400 {sparc_bl, sparc_bl, sparc_fbl},
1401 {sparc_bg, sparc_bg, sparc_fbg},
1402 {sparc_bleu, sparc_bleu, 0},
1403 {sparc_beu, sparc_beu, 0},
1404 {sparc_blu, sparc_blu, sparc_fbl},
1405 {sparc_bgu, sparc_bgu, sparc_fbg}
1408 /* Map opcode to the sparc condition codes */
1409 static inline SparcCond
1410 opcode_to_sparc_cond (int opcode)
1416 case OP_COND_EXC_OV:
1417 case OP_COND_EXC_IOV:
1420 case OP_COND_EXC_IC:
1422 case OP_COND_EXC_NO:
1423 case OP_COND_EXC_NC:
1426 rel = mono_opcode_to_cond (opcode);
1427 t = mono_opcode_to_type (opcode, -1);
1429 return cond_to_sparc_cond [rel][t];
1436 #define COMPUTE_DISP(ins) \
1437 if (ins->inst_true_bb->native_offset) \
1438 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1441 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1445 #define DEFAULT_ICC sparc_xcc_short
1447 #define DEFAULT_ICC sparc_icc_short
1451 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1455 COMPUTE_DISP(ins); \
1456 predict = (disp != 0) ? 1 : 0; \
1457 g_assert (sparc_is_imm19 (disp)); \
1458 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1459 if (filldelay) sparc_nop (code); \
1461 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1462 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1466 COMPUTE_DISP(ins); \
1467 predict = (disp != 0) ? 1 : 0; \
1468 g_assert (sparc_is_imm19 (disp)); \
1469 sparc_fbranch (code, (annul), cond, disp); \
1470 if (filldelay) sparc_nop (code); \
1473 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1474 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1477 COMPUTE_DISP(ins); \
1478 g_assert (sparc_is_imm22 (disp)); \
1479 sparc_ ## bop (code, (annul), cond, disp); \
1480 if (filldelay) sparc_nop (code); \
1482 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1483 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1486 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1490 COMPUTE_DISP(ins); \
1491 predict = (disp != 0) ? 1 : 0; \
1492 g_assert (sparc_is_imm19 (disp)); \
1493 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1494 if (filldelay) sparc_nop (code); \
1497 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1500 COMPUTE_DISP(ins); \
1501 g_assert (sparc_is_imm22 (disp)); \
1502 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1503 if (filldelay) sparc_nop (code); \
1506 /* emit an exception if condition is fail */
1508 * We put the exception throwing code out-of-line, at the end of the method
1510 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1511 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1512 MONO_PATCH_INFO_EXC, sexc_name); \
1513 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1514 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1517 sparc_branch (code, 0, cond, 0); \
1519 if (filldelay) sparc_nop (code); \
1522 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1524 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1525 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1526 MONO_PATCH_INFO_EXC, sexc_name); \
1527 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1531 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1532 if (sparc_is_imm13 ((ins)->inst_imm)) \
1533 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1535 sparc_set (code, ins->inst_imm, sparc_o7); \
1536 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1540 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1541 if (sparc_is_imm13 (ins->inst_offset)) \
1542 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1544 sparc_set (code, ins->inst_offset, sparc_o7); \
1545 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1550 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1552 if (ins->inst_imm == 0) \
1555 sparc_set (code, ins->inst_imm, sparc_o7); \
1558 if (!sparc_is_imm13 (ins->inst_offset)) { \
1559 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1560 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1563 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1566 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1567 if (!sparc_is_imm13 (ins->inst_offset)) { \
1568 sparc_set (code, ins->inst_offset, sparc_o7); \
1569 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1572 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1575 #define EMIT_CALL() do { \
1577 sparc_set_template (code, sparc_o7); \
1578 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1581 sparc_call_simple (code, 0); \
1587 * A call template is 7 instructions long, so we want to avoid it if possible.
1590 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1594 /* FIXME: This only works if the target method is already compiled */
1595 if (0 && v64 && !cfg->compile_aot) {
1596 MonoJumpInfo patch_info;
1598 patch_info.type = patch_type;
1599 patch_info.data.target = data;
1601 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1603 /* FIXME: Add optimizations if the target is close enough */
1604 sparc_set (code, target, sparc_o7);
1605 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1609 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1617 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1622 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1624 MonoInst *ins, *n, *last_ins = NULL;
1627 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1628 switch (ins->opcode) {
1630 /* remove unnecessary multiplication with 1 */
1631 if (ins->inst_imm == 1) {
1632 if (ins->dreg != ins->sreg1) {
1633 ins->opcode = OP_MOVE;
1635 MONO_DELETE_INS (bb, ins);
1641 case OP_LOAD_MEMBASE:
1642 case OP_LOADI4_MEMBASE:
1644 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1645 * OP_LOAD_MEMBASE offset(basereg), reg
1647 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1648 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1649 ins->inst_basereg == last_ins->inst_destbasereg &&
1650 ins->inst_offset == last_ins->inst_offset) {
1651 if (ins->dreg == last_ins->sreg1) {
1652 MONO_DELETE_INS (bb, ins);
1655 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1656 ins->opcode = OP_MOVE;
1657 ins->sreg1 = last_ins->sreg1;
1661 * Note: reg1 must be different from the basereg in the second load
1662 * OP_LOAD_MEMBASE offset(basereg), reg1
1663 * OP_LOAD_MEMBASE offset(basereg), reg2
1665 * OP_LOAD_MEMBASE offset(basereg), reg1
1666 * OP_MOVE reg1, reg2
1668 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1669 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1670 ins->inst_basereg != last_ins->dreg &&
1671 ins->inst_basereg == last_ins->inst_basereg &&
1672 ins->inst_offset == last_ins->inst_offset) {
1674 if (ins->dreg == last_ins->dreg) {
1675 MONO_DELETE_INS (bb, ins);
1678 ins->opcode = OP_MOVE;
1679 ins->sreg1 = last_ins->dreg;
1682 //g_assert_not_reached ();
1686 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1687 * OP_LOAD_MEMBASE offset(basereg), reg
1689 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1690 * OP_ICONST reg, imm
1692 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1693 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1694 ins->inst_basereg == last_ins->inst_destbasereg &&
1695 ins->inst_offset == last_ins->inst_offset) {
1696 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1697 ins->opcode = OP_ICONST;
1698 ins->inst_c0 = last_ins->inst_imm;
1699 g_assert_not_reached (); // check this rule
1704 case OP_LOADI1_MEMBASE:
1705 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1706 ins->inst_basereg == last_ins->inst_destbasereg &&
1707 ins->inst_offset == last_ins->inst_offset) {
1708 if (ins->dreg == last_ins->sreg1) {
1709 MONO_DELETE_INS (bb, ins);
1712 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1713 ins->opcode = OP_MOVE;
1714 ins->sreg1 = last_ins->sreg1;
1718 case OP_LOADI2_MEMBASE:
1719 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1720 ins->inst_basereg == last_ins->inst_destbasereg &&
1721 ins->inst_offset == last_ins->inst_offset) {
1722 if (ins->dreg == last_ins->sreg1) {
1723 MONO_DELETE_INS (bb, ins);
1726 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1727 ins->opcode = OP_MOVE;
1728 ins->sreg1 = last_ins->sreg1;
1732 case OP_STOREI4_MEMBASE_IMM:
1733 /* Convert pairs of 0 stores to a dword 0 store */
1734 /* Used when initializing temporaries */
1735 /* We know sparc_fp is dword aligned */
1736 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1737 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1738 (ins->inst_destbasereg == sparc_fp) &&
1739 (ins->inst_offset < 0) &&
1740 ((ins->inst_offset % 8) == 0) &&
1741 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1742 (ins->inst_imm == 0) &&
1743 (last_ins->inst_imm == 0)) {
1745 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1746 last_ins->inst_offset = ins->inst_offset;
1747 MONO_DELETE_INS (bb, ins);
1758 case OP_COND_EXC_EQ:
1759 case OP_COND_EXC_GE:
1760 case OP_COND_EXC_GT:
1761 case OP_COND_EXC_LE:
1762 case OP_COND_EXC_LT:
1763 case OP_COND_EXC_NE_UN:
1765 * Convert compare with zero+branch to BRcc
1768 * This only works in 64 bit mode, since it examines all 64
1769 * bits of the register.
1770 * Only do this if the method is small since BPr only has a 16bit
1773 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1774 (last_ins->opcode == OP_COMPARE_IMM) &&
1775 (last_ins->inst_imm == 0)) {
1776 switch (ins->opcode) {
1778 ins->opcode = OP_SPARC_BRZ;
1781 ins->opcode = OP_SPARC_BRNZ;
1784 ins->opcode = OP_SPARC_BRLZ;
1787 ins->opcode = OP_SPARC_BRGZ;
1790 ins->opcode = OP_SPARC_BRGEZ;
1793 ins->opcode = OP_SPARC_BRLEZ;
1795 case OP_COND_EXC_EQ:
1796 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1798 case OP_COND_EXC_GE:
1799 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1801 case OP_COND_EXC_GT:
1802 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1804 case OP_COND_EXC_LE:
1805 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1807 case OP_COND_EXC_LT:
1808 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1810 case OP_COND_EXC_NE_UN:
1811 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1814 g_assert_not_reached ();
1816 ins->sreg1 = last_ins->sreg1;
1818 MONO_DELETE_INS (bb, ins);
1826 if (ins->dreg == ins->sreg1) {
1827 MONO_DELETE_INS (bb, ins);
1831 * OP_MOVE sreg, dreg
1832 * OP_MOVE dreg, sreg
1834 if (last_ins && last_ins->opcode == OP_MOVE &&
1835 ins->sreg1 == last_ins->dreg &&
1836 ins->dreg == last_ins->sreg1) {
1837 MONO_DELETE_INS (bb, ins);
1845 bb->last_ins = last_ins;
1849 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1851 switch (ins->opcode) {
1853 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, ins->dreg + 1, 0, ins->sreg1 + 1);
1854 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, ins->dreg + 2, 0, ins->sreg1 + 2);
1863 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1867 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1870 sparc_patch (guint32 *code, const gpointer target)
1873 guint32 ins = *code;
1874 guint32 op = ins >> 30;
1875 guint32 op2 = (ins >> 22) & 0x7;
1876 guint32 rd = (ins >> 25) & 0x1f;
1877 guint8* target8 = (guint8*)target;
1878 gint64 disp = (target8 - (guint8*)code) >> 2;
1881 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1883 if ((op == 0) && (op2 == 2)) {
1884 if (!sparc_is_imm22 (disp))
1887 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1889 else if ((op == 0) && (op2 == 1)) {
1890 if (!sparc_is_imm19 (disp))
1893 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1895 else if ((op == 0) && (op2 == 3)) {
1896 if (!sparc_is_imm16 (disp))
1899 *code &= ~(0x180000 | 0x3fff);
1900 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1902 else if ((op == 0) && (op2 == 6)) {
1903 if (!sparc_is_imm22 (disp))
1906 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1908 else if ((op == 0) && (op2 == 4)) {
1909 guint32 ins2 = code [1];
1911 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1912 /* sethi followed by or */
1914 sparc_set (p, target8, rd);
1915 while (p <= (code + 1))
1918 else if (ins2 == 0x01000000) {
1919 /* sethi followed by nop */
1921 sparc_set (p, target8, rd);
1922 while (p <= (code + 1))
1925 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1926 /* sethi followed by load/store */
1928 guint32 t = (guint32)target8;
1929 *code &= ~(0x3fffff);
1931 *(code + 1) &= ~(0x3ff);
1932 *(code + 1) |= (t & 0x3ff);
1936 (sparc_inst_rd (ins) == sparc_g1) &&
1937 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1938 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1939 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1943 reg = sparc_inst_rd (c [1]);
1944 sparc_set (p, target8, reg);
1948 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1949 (sparc_inst_imm (ins2))) {
1950 /* sethi followed by jmpl */
1952 guint32 t = (guint32)target8;
1953 *code &= ~(0x3fffff);
1955 *(code + 1) &= ~(0x3ff);
1956 *(code + 1) |= (t & 0x3ff);
1962 else if (op == 01) {
1963 gint64 disp = (target8 - (guint8*)code) >> 2;
1965 if (!sparc_is_imm30 (disp))
1967 sparc_call_simple (code, target8 - (guint8*)code);
1969 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1971 g_assert (sparc_is_imm13 (target8));
1973 *code |= (guint32)target8;
1975 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1976 /* sparc_set case 5. */
1980 reg = sparc_inst_rd (c [3]);
1981 sparc_set (p, target, reg);
1988 // g_print ("patched with 0x%08x\n", ins);
1992 * mono_sparc_emit_save_lmf:
1994 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1995 * trampolines as well.
1998 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2001 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2002 /* Save previous_lmf */
2003 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
2004 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2006 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2007 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
2013 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2015 /* Load previous_lmf */
2016 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2018 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2019 /* *(lmf) = previous_lmf */
2020 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2025 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2028 * Since register windows are saved to the current value of %sp, we need to
2029 * set the sp field in the lmf before the call, not in the prolog.
2031 if (cfg->method->save_lmf) {
2032 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2035 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2042 emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
2044 MonoCallInst *call = (MonoCallInst*)ins;
2048 * The sparc ABI requires that calls to functions which return a structure
2049 * contain an additional unimpl instruction which is checked by the callee.
2051 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2052 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2053 size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
2055 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2056 sparc_unimp (code, size & 0xfff);
2063 emit_move_return_value (MonoInst *ins, guint32 *code)
2065 /* Move return value to the target register */
2066 /* FIXME: do more things in the local reg allocator */
2067 switch (ins->opcode) {
2069 case OP_VOIDCALL_REG:
2070 case OP_VOIDCALL_MEMBASE:
2074 case OP_CALL_MEMBASE:
2075 g_assert (ins->dreg == sparc_o0);
2079 case OP_LCALL_MEMBASE:
2081 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2082 * in inssel-long32.brg.
2085 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2087 g_assert (ins->dreg == sparc_o1);
2092 case OP_FCALL_MEMBASE:
2094 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2095 sparc_fmovs (code, sparc_f0, ins->dreg);
2096 sparc_fstod (code, ins->dreg, ins->dreg);
2099 sparc_fmovd (code, sparc_f0, ins->dreg);
2101 sparc_fmovs (code, sparc_f0, ins->dreg);
2102 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2103 sparc_fstod (code, ins->dreg, ins->dreg);
2105 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2110 case OP_VCALL_MEMBASE:
2113 case OP_VCALL2_MEMBASE:
2123 * emit_load_volatile_arguments:
2125 * Load volatile arguments from the stack to the original input registers.
2126 * Required before a tail call.
2129 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2131 MonoMethod *method = cfg->method;
2132 MonoMethodSignature *sig;
2137 /* FIXME: Generate intermediate code instead */
2139 sig = mono_method_signature (method);
2141 cinfo = get_call_info (cfg, sig, FALSE);
2143 /* This is the opposite of the code in emit_prolog */
2145 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2146 ArgInfo *ainfo = cinfo->args + i;
2147 gint32 stack_offset;
2150 inst = cfg->args [i];
2152 if (sig->hasthis && (i == 0))
2153 arg_type = &mono_defaults.object_class->byval_arg;
2155 arg_type = sig->params [i - sig->hasthis];
2157 stack_offset = ainfo->offset + ARGS_OFFSET;
2158 ireg = sparc_i0 + ainfo->reg;
2160 if (ainfo->storage == ArgInSplitRegStack) {
2161 g_assert (inst->opcode == OP_REGOFFSET);
2163 if (!sparc_is_imm13 (stack_offset))
2165 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2168 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2169 if (ainfo->storage == ArgInIRegPair) {
2170 if (!sparc_is_imm13 (inst->inst_offset + 4))
2172 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2173 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2176 if (ainfo->storage == ArgInSplitRegStack) {
2177 if (stack_offset != inst->inst_offset) {
2178 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2179 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2180 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2185 if (ainfo->storage == ArgOnStackPair) {
2186 if (stack_offset != inst->inst_offset) {
2187 /* stack_offset is not dword aligned, so we need to make a copy */
2188 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2189 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2191 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2192 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2197 g_assert_not_reached ();
2200 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2201 /* Argument in register, but need to be saved to stack */
2202 if (!sparc_is_imm13 (stack_offset))
2204 if ((stack_offset - ARGS_OFFSET) & 0x1)
2205 /* FIXME: Is this ldsb or ldub ? */
2206 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2208 if ((stack_offset - ARGS_OFFSET) & 0x2)
2209 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2211 if ((stack_offset - ARGS_OFFSET) & 0x4)
2212 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2215 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2217 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2220 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2221 /* Argument in regpair, but need to be saved to stack */
2222 if (!sparc_is_imm13 (inst->inst_offset + 4))
2224 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2225 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2227 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2230 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2234 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2235 if (inst->opcode == OP_REGVAR)
2236 /* FIXME: Load the argument into memory */
2246 * mono_sparc_is_virtual_call:
2248 * Determine whenever the instruction at CODE is a virtual call.
2251 mono_sparc_is_virtual_call (guint32 *code)
2258 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2260 * Register indirect call. If it is a virtual call, then the
2261 * instruction in the delay slot is a special kind of nop.
2264 /* Construct special nop */
2265 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2268 if (code [1] == p [0])
2276 #define BR_SMALL_SIZE 2
2277 #define BR_LARGE_SIZE 2
2278 #define JUMP_IMM_SIZE 5
2279 #define ENABLE_WRONG_METHOD_CHECK 0
2282 * LOCKING: called with the domain lock held
2285 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2286 gpointer fail_tramp)
2290 guint32 *code, *start;
2292 for (i = 0; i < count; ++i) {
2293 MonoIMTCheckItem *item = imt_entries [i];
2294 if (item->is_equals) {
2295 if (item->check_target_idx) {
2296 if (!item->compare_done)
2297 item->chunk_size += CMP_SIZE;
2298 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2301 item->chunk_size += 16;
2302 item->chunk_size += JUMP_IMM_SIZE;
2303 #if ENABLE_WRONG_METHOD_CHECK
2304 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2308 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2309 imt_entries [item->check_target_idx]->compare_done = TRUE;
2311 size += item->chunk_size;
2314 code = mono_method_alloc_generic_virtual_thunk (domain, size * 4);
2316 code = mono_domain_code_reserve (domain, size * 4);
2318 for (i = 0; i < count; ++i) {
2319 MonoIMTCheckItem *item = imt_entries [i];
2320 item->code_target = (guint8*)code;
2321 if (item->is_equals) {
2322 gboolean fail_case = !item->check_target_idx && fail_tramp;
2324 if (item->check_target_idx || fail_case) {
2325 if (!item->compare_done || fail_case) {
2326 sparc_set (code, (guint32)item->key, sparc_g5);
2327 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2329 item->jmp_code = (guint8*)code;
2330 sparc_branch (code, 0, sparc_bne, 0);
2332 if (item->has_target_code) {
2333 sparc_set (code, item->value.target_code, sparc_f5);
2335 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2336 sparc_ld (code, sparc_g5, 0, sparc_g5);
2338 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2342 sparc_patch (item->jmp_code, code);
2343 sparc_set (code, fail_tramp, sparc_g5);
2344 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2346 item->jmp_code = NULL;
2349 /* enable the commented code to assert on wrong method */
2350 #if ENABLE_WRONG_METHOD_CHECK
2351 g_assert_not_reached ();
2353 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2354 sparc_ld (code, sparc_g5, 0, sparc_g5);
2355 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2357 #if ENABLE_WRONG_METHOD_CHECK
2358 g_assert_not_reached ();
2362 sparc_set (code, (guint32)item->key, sparc_g5);
2363 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2364 item->jmp_code = (guint8*)code;
2365 sparc_branch (code, 0, sparc_beu, 0);
2369 /* patch the branches to get to the target items */
2370 for (i = 0; i < count; ++i) {
2371 MonoIMTCheckItem *item = imt_entries [i];
2372 if (item->jmp_code) {
2373 if (item->check_target_idx) {
2374 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2379 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2381 mono_stats.imt_thunks_size += (code - start) * 4;
2382 g_assert (code - start <= size);
2387 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2390 g_assert_not_reached ();
2393 return (MonoMethod*)regs [sparc_g1];
2397 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
2399 mono_sparc_flushw ();
2401 return (gpointer)regs [sparc_o0];
2405 * Some conventions used in the following code.
2406 * 2) The only scratch registers we have are o7 and g1. We try to
2407 * stick to o7 when we can, and use g1 when necessary.
2411 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2416 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2417 MonoInst *last_ins = NULL;
2421 if (cfg->verbose_level > 2)
2422 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2424 cpos = bb->max_offset;
2426 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2430 MONO_BB_FOR_EACH_INS (bb, ins) {
2433 offset = (guint8*)code - cfg->native_code;
2435 spec = ins_get_spec (ins->opcode);
2437 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2439 if (offset > (cfg->code_size - max_len - 16)) {
2440 cfg->code_size *= 2;
2441 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2442 code = (guint32*)(cfg->native_code + offset);
2444 code_start = (guint8*)code;
2445 // if (ins->cil_code)
2446 // g_print ("cil code\n");
2447 mono_debug_record_line_number (cfg, ins, offset);
2449 switch (ins->opcode) {
2450 case OP_STOREI1_MEMBASE_IMM:
2451 EMIT_STORE_MEMBASE_IMM (ins, stb);
2453 case OP_STOREI2_MEMBASE_IMM:
2454 EMIT_STORE_MEMBASE_IMM (ins, sth);
2456 case OP_STORE_MEMBASE_IMM:
2457 EMIT_STORE_MEMBASE_IMM (ins, sti);
2459 case OP_STOREI4_MEMBASE_IMM:
2460 EMIT_STORE_MEMBASE_IMM (ins, st);
2462 case OP_STOREI8_MEMBASE_IMM:
2464 EMIT_STORE_MEMBASE_IMM (ins, stx);
2466 /* Only generated by peephole opts */
2467 g_assert ((ins->inst_offset % 8) == 0);
2468 g_assert (ins->inst_imm == 0);
2469 EMIT_STORE_MEMBASE_IMM (ins, stx);
2472 case OP_STOREI1_MEMBASE_REG:
2473 EMIT_STORE_MEMBASE_REG (ins, stb);
2475 case OP_STOREI2_MEMBASE_REG:
2476 EMIT_STORE_MEMBASE_REG (ins, sth);
2478 case OP_STOREI4_MEMBASE_REG:
2479 EMIT_STORE_MEMBASE_REG (ins, st);
2481 case OP_STOREI8_MEMBASE_REG:
2483 EMIT_STORE_MEMBASE_REG (ins, stx);
2485 /* Only used by OP_MEMSET */
2486 EMIT_STORE_MEMBASE_REG (ins, std);
2489 case OP_STORE_MEMBASE_REG:
2490 EMIT_STORE_MEMBASE_REG (ins, sti);
2493 sparc_set (code, ins->inst_c0, ins->dreg);
2494 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2496 case OP_LOADI4_MEMBASE:
2498 EMIT_LOAD_MEMBASE (ins, ldsw);
2500 EMIT_LOAD_MEMBASE (ins, ld);
2503 case OP_LOADU4_MEMBASE:
2504 EMIT_LOAD_MEMBASE (ins, ld);
2506 case OP_LOADU1_MEMBASE:
2507 EMIT_LOAD_MEMBASE (ins, ldub);
2509 case OP_LOADI1_MEMBASE:
2510 EMIT_LOAD_MEMBASE (ins, ldsb);
2512 case OP_LOADU2_MEMBASE:
2513 EMIT_LOAD_MEMBASE (ins, lduh);
2515 case OP_LOADI2_MEMBASE:
2516 EMIT_LOAD_MEMBASE (ins, ldsh);
2518 case OP_LOAD_MEMBASE:
2520 EMIT_LOAD_MEMBASE (ins, ldx);
2522 EMIT_LOAD_MEMBASE (ins, ld);
2526 case OP_LOADI8_MEMBASE:
2527 EMIT_LOAD_MEMBASE (ins, ldx);
2530 case OP_ICONV_TO_I1:
2531 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2532 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2534 case OP_ICONV_TO_I2:
2535 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2536 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2538 case OP_ICONV_TO_U1:
2539 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2541 case OP_ICONV_TO_U2:
2542 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2543 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2545 case OP_LCONV_TO_OVF_U4:
2546 case OP_ICONV_TO_OVF_U4:
2547 /* Only used on V9 */
2548 sparc_cmp_imm (code, ins->sreg1, 0);
2549 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2550 MONO_PATCH_INFO_EXC, "OverflowException");
2551 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2553 sparc_set (code, 1, sparc_o7);
2554 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2555 sparc_cmp (code, ins->sreg1, sparc_o7);
2556 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2557 MONO_PATCH_INFO_EXC, "OverflowException");
2558 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2560 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2562 case OP_LCONV_TO_OVF_I4_UN:
2563 case OP_ICONV_TO_OVF_I4_UN:
2564 /* Only used on V9 */
2570 sparc_cmp (code, ins->sreg1, ins->sreg2);
2572 case OP_COMPARE_IMM:
2573 case OP_ICOMPARE_IMM:
2574 if (sparc_is_imm13 (ins->inst_imm))
2575 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2577 sparc_set (code, ins->inst_imm, sparc_o7);
2578 sparc_cmp (code, ins->sreg1, sparc_o7);
2583 * gdb does not like encountering 'ta 1' in the debugged code. So
2584 * instead of emitting a trap, we emit a call a C function and place a
2587 //sparc_ta (code, 1);
2588 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2593 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2596 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2601 /* according to inssel-long32.brg, this should set cc */
2602 EMIT_ALU_IMM (ins, add, TRUE);
2606 /* according to inssel-long32.brg, this should set cc */
2607 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2611 EMIT_ALU_IMM (ins, addx, TRUE);
2615 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2618 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2623 /* according to inssel-long32.brg, this should set cc */
2624 EMIT_ALU_IMM (ins, sub, TRUE);
2628 /* according to inssel-long32.brg, this should set cc */
2629 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2633 EMIT_ALU_IMM (ins, subx, TRUE);
2636 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2640 EMIT_ALU_IMM (ins, and, FALSE);
2643 /* Sign extend sreg1 into %y */
2644 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2645 sparc_wry (code, sparc_o7, sparc_g0);
2646 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2647 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2650 sparc_wry (code, sparc_g0, sparc_g0);
2651 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2657 /* Transform division into a shift */
2658 for (i = 1; i < 30; ++i) {
2660 if (ins->inst_imm == imm)
2666 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2667 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2668 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2671 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2672 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2673 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2674 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2675 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2679 /* Sign extend sreg1 into %y */
2680 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2681 sparc_wry (code, sparc_o7, sparc_g0);
2682 EMIT_ALU_IMM (ins, sdiv, TRUE);
2683 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2687 case OP_IDIV_UN_IMM:
2688 sparc_wry (code, sparc_g0, sparc_g0);
2689 EMIT_ALU_IMM (ins, udiv, FALSE);
2692 /* Sign extend sreg1 into %y */
2693 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2694 sparc_wry (code, sparc_o7, sparc_g0);
2695 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2696 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2697 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2698 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2701 sparc_wry (code, sparc_g0, sparc_g0);
2702 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2703 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2704 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2708 /* Sign extend sreg1 into %y */
2709 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2710 sparc_wry (code, sparc_o7, sparc_g0);
2711 if (!sparc_is_imm13 (ins->inst_imm)) {
2712 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2713 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2714 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2715 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2718 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2719 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2720 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2722 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2724 case OP_IREM_UN_IMM:
2725 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2726 sparc_wry (code, sparc_g0, sparc_g0);
2727 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2728 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2729 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2732 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2736 EMIT_ALU_IMM (ins, or, FALSE);
2739 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2743 EMIT_ALU_IMM (ins, xor, FALSE);
2746 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2750 if (ins->inst_imm < (1 << 5))
2751 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2753 sparc_set (code, ins->inst_imm, sparc_o7);
2754 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2758 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2762 if (ins->inst_imm < (1 << 5))
2763 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2765 sparc_set (code, ins->inst_imm, sparc_o7);
2766 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2770 case OP_ISHR_UN_IMM:
2771 if (ins->inst_imm < (1 << 5))
2772 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2774 sparc_set (code, ins->inst_imm, sparc_o7);
2775 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2779 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2782 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2785 if (ins->inst_imm < (1 << 6))
2786 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2788 sparc_set (code, ins->inst_imm, sparc_o7);
2789 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2793 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2796 if (ins->inst_imm < (1 << 6))
2797 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2799 sparc_set (code, ins->inst_imm, sparc_o7);
2800 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2804 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2806 case OP_LSHR_UN_IMM:
2807 if (ins->inst_imm < (1 << 6))
2808 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2810 sparc_set (code, ins->inst_imm, sparc_o7);
2811 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2815 /* can't use sparc_not */
2816 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2819 /* can't use sparc_neg */
2820 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2823 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2829 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2832 /* Transform multiplication into a shift */
2833 for (i = 0; i < 30; ++i) {
2835 if (ins->inst_imm == imm)
2839 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2841 EMIT_ALU_IMM (ins, smul, FALSE);
2845 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2846 sparc_rdy (code, sparc_g1);
2847 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2848 sparc_cmp (code, sparc_g1, sparc_o7);
2849 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2851 case OP_IMUL_OVF_UN:
2852 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2853 sparc_rdy (code, sparc_o7);
2854 sparc_cmp (code, sparc_o7, sparc_g0);
2855 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2858 sparc_set (code, ins->inst_c0, ins->dreg);
2861 sparc_set (code, ins->inst_l, ins->dreg);
2864 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2865 sparc_set_template (code, ins->dreg);
2868 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2869 sparc_set_template (code, ins->dreg);
2871 case OP_ICONV_TO_I4:
2872 case OP_ICONV_TO_U4:
2874 if (ins->sreg1 != ins->dreg)
2875 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2879 if (ins->sreg1 != ins->dreg)
2880 sparc_fmovd (code, ins->sreg1, ins->dreg);
2882 sparc_fmovs (code, ins->sreg1, ins->dreg);
2883 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2887 if (cfg->method->save_lmf)
2890 code = emit_load_volatile_arguments (cfg, code);
2891 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2892 sparc_set_template (code, sparc_o7);
2893 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2894 /* Restore parent frame in delay slot */
2895 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2898 /* ensure ins->sreg1 is not NULL */
2899 /* Might be misaligned in case of vtypes so use a byte load */
2900 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2903 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2904 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2912 call = (MonoCallInst*)ins;
2913 g_assert (!call->virtual);
2914 code = emit_save_sp_to_lmf (cfg, code);
2915 if (ins->flags & MONO_INST_HAS_METHOD)
2916 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2918 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2920 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2921 code = emit_move_return_value (ins, code);
2927 case OP_VOIDCALL_REG:
2929 call = (MonoCallInst*)ins;
2930 code = emit_save_sp_to_lmf (cfg, code);
2931 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2933 * We emit a special kind of nop in the delay slot to tell the
2934 * trampoline code that this is a virtual call, thus an unbox
2935 * trampoline might need to be called.
2938 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2942 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2943 code = emit_move_return_value (ins, code);
2945 case OP_FCALL_MEMBASE:
2946 case OP_LCALL_MEMBASE:
2947 case OP_VCALL_MEMBASE:
2948 case OP_VCALL2_MEMBASE:
2949 case OP_VOIDCALL_MEMBASE:
2950 case OP_CALL_MEMBASE:
2951 call = (MonoCallInst*)ins;
2952 code = emit_save_sp_to_lmf (cfg, code);
2953 if (sparc_is_imm13 (ins->inst_offset)) {
2954 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2956 sparc_set (code, ins->inst_offset, sparc_o7);
2957 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2959 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2961 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2965 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2966 code = emit_move_return_value (ins, code);
2969 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2970 sparc_fdtos (code, ins->sreg1, sparc_f0);
2973 sparc_fmovd (code, ins->sreg1, ins->dreg);
2975 /* FIXME: Why not use fmovd ? */
2976 sparc_fmovs (code, ins->sreg1, ins->dreg);
2977 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2985 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2986 /* Perform stack touching */
2990 /* Keep alignment */
2991 /* Add 4 to compensate for the rounding of localloc_offset */
2992 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2993 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2994 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2996 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2998 size_reg = sparc_g4;
3000 size_reg = sparc_g1;
3002 sparc_mov_reg_reg (code, ins->dreg, size_reg);
3005 size_reg = ins->sreg1;
3007 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3008 /* Keep %sp valid at all times */
3009 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3010 /* Round localloc_offset too so the result is at least 8 aligned */
3011 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3012 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3013 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3015 if (ins->flags & MONO_INST_INIT) {
3017 /* Initialize memory region */
3018 sparc_cmp_imm (code, size_reg, 0);
3020 sparc_branch (code, 0, sparc_be, 0);
3022 sparc_set (code, 0, sparc_o7);
3023 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
3027 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3029 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3030 sparc_cmp (code, sparc_o7, size_reg);
3032 sparc_branch (code, 0, sparc_bl, 0);
3033 sparc_patch (br [2], br [1]);
3035 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3036 sparc_patch (br [0], code);
3040 case OP_LOCALLOC_IMM: {
3041 gint32 offset = ins->inst_imm;
3044 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3045 /* Perform stack touching */
3049 /* To compensate for the rounding of localloc_offset */
3050 offset += sizeof (gpointer);
3051 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3052 if (sparc_is_imm13 (offset))
3053 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3055 sparc_set (code, offset, sparc_o7);
3056 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3058 /* Round localloc_offset too so the result is at least 8 aligned */
3059 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3060 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3061 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3062 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3068 while (i < offset) {
3070 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3074 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3080 sparc_set (code, offset, sparc_o7);
3081 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3082 /* beginning of loop */
3085 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3087 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3088 sparc_cmp_imm (code, sparc_o7, 0);
3090 sparc_branch (code, 0, sparc_bne, 0);
3092 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3093 sparc_patch (br [1], br [0]);
3099 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3100 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3101 (gpointer)"mono_arch_throw_exception");
3105 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3106 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3107 (gpointer)"mono_arch_rethrow_exception");
3110 case OP_START_HANDLER: {
3112 * The START_HANDLER instruction marks the beginning of a handler
3113 * block. It is called using a call instruction, so %o7 contains
3114 * the return address. Since the handler executes in the same stack
3115 * frame as the method itself, we can't use save/restore to save
3116 * the return address. Instead, we save it into a dedicated
3119 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3120 if (!sparc_is_imm13 (spvar->inst_offset)) {
3121 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3122 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3125 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3128 case OP_ENDFILTER: {
3129 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3130 if (!sparc_is_imm13 (spvar->inst_offset)) {
3131 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3132 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3135 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3136 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3138 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3141 case OP_ENDFINALLY: {
3142 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3143 if (!sparc_is_imm13 (spvar->inst_offset)) {
3144 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3145 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3148 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3149 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3153 case OP_CALL_HANDLER:
3154 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3155 /* This is a jump inside the method, so call_simple works even on V9 */
3156 sparc_call_simple (code, 0);
3158 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3161 ins->inst_c0 = (guint8*)code - cfg->native_code;
3163 case OP_RELAXED_NOP:
3166 case OP_DUMMY_STORE:
3167 case OP_NOT_REACHED:
3171 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3172 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3174 if (ins->inst_target_bb->native_offset) {
3175 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3176 g_assert (sparc_is_imm22 (disp));
3177 sparc_branch (code, 1, sparc_ba, disp);
3179 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3180 sparc_branch (code, 1, sparc_ba, 0);
3185 sparc_jmp (code, ins->sreg1, sparc_g0);
3193 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3194 sparc_clr_reg (code, ins->dreg);
3195 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3198 sparc_clr_reg (code, ins->dreg);
3200 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3202 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3205 sparc_set (code, 1, ins->dreg);
3213 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3214 sparc_clr_reg (code, ins->dreg);
3215 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3218 sparc_clr_reg (code, ins->dreg);
3219 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3221 sparc_set (code, 1, ins->dreg);
3224 case OP_COND_EXC_EQ:
3225 case OP_COND_EXC_NE_UN:
3226 case OP_COND_EXC_LT:
3227 case OP_COND_EXC_LT_UN:
3228 case OP_COND_EXC_GT:
3229 case OP_COND_EXC_GT_UN:
3230 case OP_COND_EXC_GE:
3231 case OP_COND_EXC_GE_UN:
3232 case OP_COND_EXC_LE:
3233 case OP_COND_EXC_LE_UN:
3234 case OP_COND_EXC_OV:
3235 case OP_COND_EXC_NO:
3237 case OP_COND_EXC_NC:
3238 case OP_COND_EXC_IEQ:
3239 case OP_COND_EXC_INE_UN:
3240 case OP_COND_EXC_ILT:
3241 case OP_COND_EXC_ILT_UN:
3242 case OP_COND_EXC_IGT:
3243 case OP_COND_EXC_IGT_UN:
3244 case OP_COND_EXC_IGE:
3245 case OP_COND_EXC_IGE_UN:
3246 case OP_COND_EXC_ILE:
3247 case OP_COND_EXC_ILE_UN:
3248 case OP_COND_EXC_IOV:
3249 case OP_COND_EXC_INO:
3250 case OP_COND_EXC_IC:
3251 case OP_COND_EXC_INC:
3255 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3258 case OP_SPARC_COND_EXC_EQZ:
3259 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3261 case OP_SPARC_COND_EXC_GEZ:
3262 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3264 case OP_SPARC_COND_EXC_GTZ:
3265 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3267 case OP_SPARC_COND_EXC_LEZ:
3268 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3270 case OP_SPARC_COND_EXC_LTZ:
3271 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3273 case OP_SPARC_COND_EXC_NEZ:
3274 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3288 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3290 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3295 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3297 case OP_SPARC_BRLEZ:
3298 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3301 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3304 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3307 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3309 case OP_SPARC_BRGEZ:
3310 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3313 /* floating point opcodes */
3315 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3317 sparc_set_template (code, sparc_o7);
3319 sparc_sethi (code, 0, sparc_o7);
3321 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3324 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3326 sparc_set_template (code, sparc_o7);
3328 sparc_sethi (code, 0, sparc_o7);
3330 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3332 /* Extend to double */
3333 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3335 case OP_STORER8_MEMBASE_REG:
3336 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3337 sparc_set (code, ins->inst_offset, sparc_o7);
3338 /* SPARCV9 handles misaligned fp loads/stores */
3339 if (!v64 && (ins->inst_offset % 8)) {
3341 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3342 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3343 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3345 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3348 if (!v64 && (ins->inst_offset % 8)) {
3350 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3351 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3353 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3356 case OP_LOADR8_MEMBASE:
3357 EMIT_LOAD_MEMBASE (ins, lddf);
3359 case OP_STORER4_MEMBASE_REG:
3360 /* This requires a double->single conversion */
3361 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3362 if (!sparc_is_imm13 (ins->inst_offset)) {
3363 sparc_set (code, ins->inst_offset, sparc_o7);
3364 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3367 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3369 case OP_LOADR4_MEMBASE: {
3370 /* ldf needs a single precision register */
3371 int dreg = ins->dreg;
3372 ins->dreg = FP_SCRATCH_REG;
3373 EMIT_LOAD_MEMBASE (ins, ldf);
3375 /* Extend to double */
3376 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3379 case OP_ICONV_TO_R4: {
3380 MonoInst *spill = cfg->arch.float_spill_slot;
3381 gint32 reg = spill->inst_basereg;
3382 gint32 offset = spill->inst_offset;
3384 g_assert (spill->opcode == OP_REGOFFSET);
3386 if (!sparc_is_imm13 (offset)) {
3387 sparc_set (code, offset, sparc_o7);
3388 sparc_stx (code, ins->sreg1, reg, offset);
3389 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3391 sparc_stx_imm (code, ins->sreg1, reg, offset);
3392 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3394 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3396 if (!sparc_is_imm13 (offset)) {
3397 sparc_set (code, offset, sparc_o7);
3398 sparc_st (code, ins->sreg1, reg, sparc_o7);
3399 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3401 sparc_st_imm (code, ins->sreg1, reg, offset);
3402 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3404 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3406 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3409 case OP_ICONV_TO_R8: {
3410 MonoInst *spill = cfg->arch.float_spill_slot;
3411 gint32 reg = spill->inst_basereg;
3412 gint32 offset = spill->inst_offset;
3414 g_assert (spill->opcode == OP_REGOFFSET);
3417 if (!sparc_is_imm13 (offset)) {
3418 sparc_set (code, offset, sparc_o7);
3419 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3420 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3422 sparc_stx_imm (code, ins->sreg1, reg, offset);
3423 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3425 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3427 if (!sparc_is_imm13 (offset)) {
3428 sparc_set (code, offset, sparc_o7);
3429 sparc_st (code, ins->sreg1, reg, sparc_o7);
3430 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3432 sparc_st_imm (code, ins->sreg1, reg, offset);
3433 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3435 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3439 case OP_FCONV_TO_I1:
3440 case OP_FCONV_TO_U1:
3441 case OP_FCONV_TO_I2:
3442 case OP_FCONV_TO_U2:
3447 case OP_FCONV_TO_I4:
3448 case OP_FCONV_TO_U4: {
3449 MonoInst *spill = cfg->arch.float_spill_slot;
3450 gint32 reg = spill->inst_basereg;
3451 gint32 offset = spill->inst_offset;
3453 g_assert (spill->opcode == OP_REGOFFSET);
3455 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3456 if (!sparc_is_imm13 (offset)) {
3457 sparc_set (code, offset, sparc_o7);
3458 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3459 sparc_ld (code, reg, sparc_o7, ins->dreg);
3461 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3462 sparc_ld_imm (code, reg, offset, ins->dreg);
3465 switch (ins->opcode) {
3466 case OP_FCONV_TO_I1:
3467 case OP_FCONV_TO_U1:
3468 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3470 case OP_FCONV_TO_I2:
3471 case OP_FCONV_TO_U2:
3472 sparc_set (code, 0xffff, sparc_o7);
3473 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3480 case OP_FCONV_TO_I8:
3481 case OP_FCONV_TO_U8:
3483 g_assert_not_reached ();
3485 case OP_FCONV_TO_R4:
3486 /* FIXME: Change precision ? */
3488 sparc_fmovd (code, ins->sreg1, ins->dreg);
3490 sparc_fmovs (code, ins->sreg1, ins->dreg);
3491 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3494 case OP_LCONV_TO_R_UN: {
3496 g_assert_not_reached ();
3499 case OP_LCONV_TO_OVF_I:
3500 case OP_LCONV_TO_OVF_I4_2: {
3501 guint32 *br [3], *label [1];
3504 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3506 sparc_cmp_imm (code, ins->sreg1, 0);
3508 sparc_branch (code, 1, sparc_bneg, 0);
3512 /* ms word must be 0 */
3513 sparc_cmp_imm (code, ins->sreg2, 0);
3515 sparc_branch (code, 1, sparc_be, 0);
3520 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3523 sparc_patch (br [0], code);
3525 /* ms word must 0xfffffff */
3526 sparc_cmp_imm (code, ins->sreg2, -1);
3528 sparc_branch (code, 1, sparc_bne, 0);
3530 sparc_patch (br [2], label [0]);
3533 sparc_patch (br [1], code);
3534 if (ins->sreg1 != ins->dreg)
3535 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3539 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3542 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3545 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3548 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3552 sparc_fnegd (code, ins->sreg1, ins->dreg);
3554 /* FIXME: why don't use fnegd ? */
3555 sparc_fnegs (code, ins->sreg1, ins->dreg);
3559 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3560 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3561 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3564 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3571 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3572 sparc_clr_reg (code, ins->dreg);
3573 switch (ins->opcode) {
3576 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3578 sparc_set (code, 1, ins->dreg);
3579 sparc_fbranch (code, 1, sparc_fbu, 2);
3581 sparc_set (code, 1, ins->dreg);
3584 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3586 sparc_set (code, 1, ins->dreg);
3592 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3595 /* clt.un + brfalse */
3597 sparc_fbranch (code, 1, sparc_fbul, 0);
3600 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3601 sparc_patch (p, (guint8*)code);
3605 /* cgt.un + brfalse */
3607 sparc_fbranch (code, 1, sparc_fbug, 0);
3610 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3611 sparc_patch (p, (guint8*)code);
3615 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3616 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3619 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3620 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3623 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3624 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3627 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3628 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3631 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3632 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3635 MonoInst *spill = cfg->arch.float_spill_slot;
3636 gint32 reg = spill->inst_basereg;
3637 gint32 offset = spill->inst_offset;
3639 g_assert (spill->opcode == OP_REGOFFSET);
3641 if (!sparc_is_imm13 (offset)) {
3642 sparc_set (code, offset, sparc_o7);
3643 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3644 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3646 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3647 sparc_lduh_imm (code, reg, offset, sparc_o7);
3649 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3650 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3651 sparc_cmp_imm (code, sparc_o7, 2047);
3652 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3654 sparc_fmovd (code, ins->sreg1, ins->dreg);
3656 sparc_fmovs (code, ins->sreg1, ins->dreg);
3657 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3662 case OP_MEMORY_BARRIER:
3663 sparc_membar (code, sparc_membar_all);
3668 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3670 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3672 g_assert_not_reached ();
3675 if ((((guint8*)code) - code_start) > max_len) {
3676 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3677 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3678 g_assert_not_reached ();
3686 cfg->code_len = (guint8*)code - cfg->native_code;
3690 mono_arch_register_lowlevel_calls (void)
3692 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3696 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
3698 MonoJumpInfo *patch_info;
3700 /* FIXME: Move part of this to arch independent code */
3701 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3702 unsigned char *ip = patch_info->ip.i + code;
3705 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3707 switch (patch_info->type) {
3708 case MONO_PATCH_INFO_NONE:
3710 case MONO_PATCH_INFO_CLASS_INIT: {
3711 guint32 *ip2 = (guint32*)ip;
3712 /* Might already been changed to a nop */
3714 sparc_set_template (ip2, sparc_o7);
3715 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3717 sparc_call_simple (ip2, 0);
3721 case MONO_PATCH_INFO_METHOD_JUMP: {
3722 guint32 *ip2 = (guint32*)ip;
3723 /* Might already been patched */
3724 sparc_set_template (ip2, sparc_o7);
3730 sparc_patch ((guint32*)ip, target);
3735 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3738 guint32 *code = (guint32*)p;
3739 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3742 /* Save registers to stack */
3743 for (i = 0; i < 6; ++i)
3744 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3746 cinfo = get_call_info (cfg, sig, FALSE);
3748 /* Save float regs on V9, since they are caller saved */
3749 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3750 ArgInfo *ainfo = cinfo->args + i;
3751 gint32 stack_offset;
3753 stack_offset = ainfo->offset + ARGS_OFFSET;
3755 if (ainfo->storage == ArgInFloatReg) {
3756 if (!sparc_is_imm13 (stack_offset))
3758 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3760 else if (ainfo->storage == ArgInDoubleReg) {
3761 /* The offset is guaranteed to be aligned by the ABI rules */
3762 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3766 sparc_set (code, cfg->method, sparc_o0);
3767 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3769 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3772 /* Restore float regs on V9 */
3773 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3774 ArgInfo *ainfo = cinfo->args + i;
3775 gint32 stack_offset;
3777 stack_offset = ainfo->offset + ARGS_OFFSET;
3779 if (ainfo->storage == ArgInFloatReg) {
3780 if (!sparc_is_imm13 (stack_offset))
3782 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3784 else if (ainfo->storage == ArgInDoubleReg) {
3785 /* The offset is guaranteed to be aligned by the ABI rules */
3786 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3804 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3806 guint32 *code = (guint32*)p;
3807 int save_mode = SAVE_NONE;
3808 MonoMethod *method = cfg->method;
3810 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
3811 case MONO_TYPE_VOID:
3812 /* special case string .ctor icall */
3813 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3814 save_mode = SAVE_ONE;
3816 save_mode = SAVE_NONE;
3821 save_mode = SAVE_ONE;
3823 save_mode = SAVE_TWO;
3828 save_mode = SAVE_FP;
3830 case MONO_TYPE_VALUETYPE:
3831 save_mode = SAVE_STRUCT;
3834 save_mode = SAVE_ONE;
3838 /* Save the result to the stack and also put it into the output registers */
3840 switch (save_mode) {
3843 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3844 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3845 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3846 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3849 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3850 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3854 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3856 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3857 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3858 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3863 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3865 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3873 sparc_set (code, cfg->method, sparc_o0);
3875 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3878 /* Restore result */
3880 switch (save_mode) {
3882 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3883 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3886 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3889 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3900 mono_arch_emit_prolog (MonoCompile *cfg)
3902 MonoMethod *method = cfg->method;
3903 MonoMethodSignature *sig;
3909 cfg->code_size = 256;
3910 cfg->native_code = g_malloc (cfg->code_size);
3911 code = (guint32*)cfg->native_code;
3913 /* FIXME: Generate intermediate code instead */
3915 offset = cfg->stack_offset;
3916 offset += (16 * sizeof (gpointer)); /* register save area */
3918 offset += 4; /* struct/union return pointer */
3921 /* add parameter area size for called functions */
3922 if (cfg->param_area < (6 * sizeof (gpointer)))
3923 /* Reserve space for the first 6 arguments even if it is unused */
3924 offset += 6 * sizeof (gpointer);
3926 offset += cfg->param_area;
3928 /* align the stack size */
3929 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3932 * localloc'd memory is stored between the local variables (whose
3933 * size is given by cfg->stack_offset), and between the space reserved
3936 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3938 cfg->stack_offset = offset;
3940 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3941 /* Perform stack touching */
3945 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3946 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3947 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3948 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3951 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3954 if (strstr (cfg->method->name, "foo")) {
3955 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3956 sparc_call_simple (code, 0);
3961 sig = mono_method_signature (method);
3963 cinfo = get_call_info (cfg, sig, FALSE);
3965 /* Keep in sync with emit_load_volatile_arguments */
3966 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3967 ArgInfo *ainfo = cinfo->args + i;
3968 gint32 stack_offset;
3970 inst = cfg->args [i];
3972 if (sig->hasthis && (i == 0))
3973 arg_type = &mono_defaults.object_class->byval_arg;
3975 arg_type = sig->params [i - sig->hasthis];
3977 stack_offset = ainfo->offset + ARGS_OFFSET;
3979 /* Save the split arguments so they will reside entirely on the stack */
3980 if (ainfo->storage == ArgInSplitRegStack) {
3981 /* Save the register to the stack */
3982 g_assert (inst->opcode == OP_REGOFFSET);
3983 if (!sparc_is_imm13 (stack_offset))
3985 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3988 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3989 /* Save the argument to a dword aligned stack location */
3991 * stack_offset contains the offset of the argument on the stack.
3992 * inst->inst_offset contains the dword aligned offset where the value
3995 if (ainfo->storage == ArgInIRegPair) {
3996 if (!sparc_is_imm13 (inst->inst_offset + 4))
3998 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3999 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4002 if (ainfo->storage == ArgInSplitRegStack) {
4004 g_assert_not_reached ();
4006 if (stack_offset != inst->inst_offset) {
4007 /* stack_offset is not dword aligned, so we need to make a copy */
4008 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4009 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4010 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4014 if (ainfo->storage == ArgOnStackPair) {
4016 g_assert_not_reached ();
4018 if (stack_offset != inst->inst_offset) {
4019 /* stack_offset is not dword aligned, so we need to make a copy */
4020 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4021 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4022 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4023 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4027 g_assert_not_reached ();
4030 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4031 /* Argument in register, but need to be saved to stack */
4032 if (!sparc_is_imm13 (stack_offset))
4034 if ((stack_offset - ARGS_OFFSET) & 0x1)
4035 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4037 if ((stack_offset - ARGS_OFFSET) & 0x2)
4038 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4040 if ((stack_offset - ARGS_OFFSET) & 0x4)
4041 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4044 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4046 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4050 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4054 /* Argument in regpair, but need to be saved to stack */
4055 if (!sparc_is_imm13 (inst->inst_offset + 4))
4057 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4058 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4060 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4061 if (!sparc_is_imm13 (stack_offset))
4063 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4065 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4066 /* The offset is guaranteed to be aligned by the ABI rules */
4067 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4070 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4071 /* Need to move into the a double precision register */
4072 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4075 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4076 if (inst->opcode == OP_REGVAR)
4077 /* FIXME: Load the argument into memory */
4083 if (cfg->method->save_lmf) {
4084 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4087 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4088 sparc_set_template (code, sparc_o7);
4089 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4091 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4093 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4095 /* FIXME: add a relocation for this */
4096 sparc_set (code, cfg->method, sparc_o7);
4097 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4099 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4100 (gpointer)"mono_arch_get_lmf_addr");
4103 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4106 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4107 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4109 cfg->code_len = (guint8*)code - cfg->native_code;
4111 g_assert (cfg->code_len <= cfg->code_size);
4113 return (guint8*)code;
4117 mono_arch_emit_epilog (MonoCompile *cfg)
4119 MonoMethod *method = cfg->method;
4122 int max_epilog_size = 16 + 20 * 4;
4124 if (cfg->method->save_lmf)
4125 max_epilog_size += 128;
4127 if (mono_jit_trace_calls != NULL)
4128 max_epilog_size += 50;
4130 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4131 max_epilog_size += 50;
4133 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4134 cfg->code_size *= 2;
4135 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4136 cfg->stat_code_reallocs++;
4139 code = (guint32*)(cfg->native_code + cfg->code_len);
4141 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4142 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4144 if (cfg->method->save_lmf) {
4145 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4147 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4151 * The V8 ABI requires that calls to functions which return a structure
4154 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4155 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4159 /* Only fold last instruction into the restore if the exit block has an in count of 1
4160 and the previous block hasn't been optimized away since it may have an in count > 1 */
4161 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4165 * FIXME: The last instruction might have a branch pointing into it like in
4166 * int_ceq sparc_i0 <-
4170 /* Try folding last instruction into the restore */
4171 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4172 /* or reg, imm, %i0 */
4173 int reg = sparc_inst_rs1 (code [-2]);
4174 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4175 code [-2] = code [-1];
4177 sparc_restore_imm (code, reg, imm, sparc_o0);
4180 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4181 /* or reg, reg, %i0 */
4182 int reg1 = sparc_inst_rs1 (code [-2]);
4183 int reg2 = sparc_inst_rs2 (code [-2]);
4184 code [-2] = code [-1];
4186 sparc_restore (code, reg1, reg2, sparc_o0);
4189 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4191 cfg->code_len = (guint8*)code - cfg->native_code;
4193 g_assert (cfg->code_len < cfg->code_size);
4198 mono_arch_emit_exceptions (MonoCompile *cfg)
4200 MonoJumpInfo *patch_info;
4205 MonoClass *exc_classes [16];
4206 guint8 *exc_throw_start [16], *exc_throw_end [16];
4208 /* Compute needed space */
4209 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4210 if (patch_info->type == MONO_PATCH_INFO_EXC)
4215 * make sure we have enough space for exceptions
4218 code_size = exc_count * (20 * 4);
4220 code_size = exc_count * 24;
4223 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4224 cfg->code_size *= 2;
4225 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4226 cfg->stat_code_reallocs++;
4229 code = (guint32*)(cfg->native_code + cfg->code_len);
4231 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4232 switch (patch_info->type) {
4233 case MONO_PATCH_INFO_EXC: {
4234 MonoClass *exc_class;
4235 guint32 *buf, *buf2;
4236 guint32 throw_ip, type_idx;
4239 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4241 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4242 g_assert (exc_class);
4243 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4244 throw_ip = patch_info->ip.i;
4246 /* Find a throw sequence for the same exception class */
4247 for (i = 0; i < nthrows; ++i)
4248 if (exc_classes [i] == exc_class)
4252 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4253 if (!sparc_is_imm13 (throw_offset))
4254 sparc_set32 (code, throw_offset, sparc_o1);
4256 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4257 g_assert (sparc_is_imm22 (disp));
4258 sparc_branch (code, 0, sparc_ba, disp);
4259 if (sparc_is_imm13 (throw_offset))
4260 sparc_set32 (code, throw_offset, sparc_o1);
4263 patch_info->type = MONO_PATCH_INFO_NONE;
4266 /* Emit the template for setting o1 */
4268 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4269 /* Can use a short form */
4272 sparc_set_template (code, sparc_o1);
4276 exc_classes [nthrows] = exc_class;
4277 exc_throw_start [nthrows] = (guint8*)code;
4281 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4285 /* first arg = type token */
4286 /* Pass the type index to reduce the size of the sparc_set */
4287 if (!sparc_is_imm13 (type_idx))
4288 sparc_set32 (code, type_idx, sparc_o0);
4290 /* second arg = offset between the throw ip and the current ip */
4291 /* On sparc, the saved ip points to the call instruction */
4292 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4293 sparc_set32 (buf, disp, sparc_o1);
4298 exc_throw_end [nthrows] = (guint8*)code;
4302 patch_info->data.name = "mono_arch_throw_corlib_exception";
4303 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4304 patch_info->ip.i = (guint8*)code - cfg->native_code;
4308 if (sparc_is_imm13 (type_idx)) {
4309 /* Put it into the delay slot */
4312 sparc_set32 (code, type_idx, sparc_o0);
4313 g_assert (code - buf == 1);
4324 cfg->code_len = (guint8*)code - cfg->native_code;
4326 g_assert (cfg->code_len < cfg->code_size);
4330 gboolean lmf_addr_key_inited = FALSE;
4332 #ifdef MONO_SPARC_THR_TLS
4333 thread_key_t lmf_addr_key;
4335 pthread_key_t lmf_addr_key;
4339 mono_arch_get_lmf_addr (void)
4341 /* This is perf critical so we bypass the IO layer */
4342 /* The thr_... functions seem to be somewhat faster */
4343 #ifdef MONO_SPARC_THR_TLS
4345 thr_getspecific (lmf_addr_key, &res);
4348 return pthread_getspecific (lmf_addr_key);
4352 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4355 * There seems to be no way to determine stack boundaries under solaris,
4356 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4359 #error "--with-sigaltstack=yes not supported on solaris"
4364 mono_arch_finish_init (void)
4366 if (!lmf_addr_key_inited) {
4369 lmf_addr_key_inited = TRUE;
4371 #ifdef MONO_SPARC_THR_TLS
4372 res = thr_keycreate (&lmf_addr_key, NULL);
4374 res = pthread_key_create (&lmf_addr_key, NULL);
4376 g_assert (res == 0);
4380 #ifdef MONO_SPARC_THR_TLS
4381 thr_setspecific (lmf_addr_key, &tls->lmf);
4383 pthread_setspecific (lmf_addr_key, &tls->lmf);
4388 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4393 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4395 MonoInst *ins = NULL;
4401 * mono_arch_get_argument_info:
4402 * @csig: a method signature
4403 * @param_count: the number of parameters to consider
4404 * @arg_info: an array to store the result infos
4406 * Gathers information on parameters such as size, alignment and
4407 * padding. arg_info should be large enought to hold param_count + 1 entries.
4409 * Returns the size of the activation frame.
4412 mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4418 cinfo = get_call_info (NULL, csig, FALSE);
4420 if (csig->hasthis) {
4421 ainfo = &cinfo->args [0];
4422 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4425 for (k = 0; k < param_count; k++) {
4426 ainfo = &cinfo->args [k + csig->hasthis];
4428 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4429 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4438 mono_arch_print_tree (MonoInst *tree, int arity)
4443 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4449 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4451 /* FIXME: implement */
4452 g_assert_not_reached ();