2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
36 #include "cpu-sparc.h"
39 * Sparc V9 means two things:
40 * - the instruction set
43 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
44 * processors in use are 64 bit processors. The V9 ABI is only usable if the
45 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
46 * instructions without using the 64 bit ABI.
51 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
52 * code. Unused input registers are used for global register allocation.
53 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
54 * - %l0..%l6 is used for global register allocation
55 * - %o7 and %g1 is used as scratch registers in opcodes
56 * - all floating point registers are used for local register allocation except %f0.
57 * Only double precision registers are used.
59 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
60 * used for local allocation.
65 * - doubles and longs must be stored in dword aligned locations
69 * The following things are not implemented or do not work:
70 * - some fp arithmetic corner cases
71 * The following tests in mono/mini are expected to fail:
72 * - test_0_simple_double_casts
73 * This test casts (guint64)-1 to double and then back to guint64 again.
74 * Under x86, it returns 0, while under sparc it returns -1.
76 * In addition to this, the runtime requires the trunc function, or its
77 * solaris counterpart, aintl, to do some double->int conversions. If this
78 * function is not available, it is emulated somewhat, but the results can be
84 * - optimize sparc_set according to the memory model
85 * - when non-AOT compiling, compute patch targets immediately so we don't
86 * have to emit the 6 byte template.
88 * - struct arguments/returns
93 * - sparc_call_simple can't be used in a lot of places since the displacement
94 * might not fit into an imm30.
95 * - g1 can't be used in a lot of places since it is used as a scratch reg in
97 * - sparc_f0 can't be used as a scratch register on V9
98 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
100 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
101 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
102 * be a double precision register which has no single precision part.
103 * - passing/returning structs is hard to implement, because:
104 * - the spec is very hard to understand
105 * - it requires knowledge about the fields of structure, needs to handle
106 * nested structures etc.
110 * Possible optimizations:
111 * - delay slot scheduling
112 * - allocate large constants to registers
113 * - add more mul/div/rem optimizations
117 #define MONO_SPARC_THR_TLS 1
121 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
122 * causing infinite loops in dominator computation. So glib-2.4 is required.
125 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
126 #error "glib 2.4 or later is required for 64 bit mode."
130 #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
132 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
134 #define SIGNAL_STACK_SIZE (64 * 1024)
136 #define STACK_BIAS MONO_SPARC_STACK_BIAS
140 /* %g1 is used by sparc_set */
141 #define GP_SCRATCH_REG sparc_g4
142 /* %f0 is used for parameter passing */
143 #define FP_SCRATCH_REG sparc_f30
144 #define ARGS_OFFSET (STACK_BIAS + 128)
148 #define FP_SCRATCH_REG sparc_f0
149 #define ARGS_OFFSET 68
150 #define GP_SCRATCH_REG sparc_g1
154 /* Whenever the CPU supports v9 instructions */
155 static gboolean sparcv9 = FALSE;
157 /* Whenever this is a 64bit executable */
159 static gboolean v64 = TRUE;
161 static gboolean v64 = FALSE;
164 static gpointer mono_arch_get_lmf_addr (void);
167 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
170 mono_arch_regname (int reg) {
171 static const char * rnames[] = {
172 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
173 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
174 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
175 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
176 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
177 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
178 "sparc_fp", "sparc_retadr"
180 if (reg >= 0 && reg < 32)
186 mono_arch_fregname (int reg) {
187 static const char *rnames [] = {
188 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
189 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
190 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
191 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
192 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
193 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
194 "sparc_f30", "sparc_f31"
197 if (reg >= 0 && reg < 32)
204 * Initialize the cpu to execute managed code.
207 mono_arch_cpu_init (void)
210 /* make sure sparcv9 is initialized for embedded use */
211 mono_arch_cpu_optimizazions(&dummy);
215 * This function returns the optimizations supported on this cpu.
218 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
226 if (!sysinfo (SI_ISALIST, buf, 1024))
227 g_assert_not_reached ();
229 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
230 * (in)directly implies that we're a v9 or better.
231 * Improvements to this are greatly accepted...
232 * Also, we don't differentiate between v7 and v8. I sense SIGILL
233 * sniffing in my future.
235 if (getpagesize() == 8192)
236 strcpy (buf, "sparcv9");
238 strcpy (buf, "sparcv8");
242 * On some processors, the cmov instructions are even slower than the
245 if (strstr (buf, "sparcv9")) {
246 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
250 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
256 mono_sparc_break (void)
261 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
262 #else /* assume Sun's compiler */
263 static void flushi(void *addr)
270 void sync_instruction_memory(caddr_t addr, int len);
274 mono_arch_flush_icache (guint8 *code, gint size)
277 /* Hopefully this is optimized based on the actual CPU */
278 sync_instruction_memory (code, size);
280 guint64 *p = (guint64*)code;
281 guint64 *end = (guint64*)(code + ((size + 8) /8));
284 * FIXME: Flushing code in dword chunks in _slow_.
288 __asm__ __volatile__ ("iflush %0"::"r"(p++));
298 * Flush all register windows to memory. Every register window is saved to
299 * a 16 word area on the stack pointed to by its %sp register.
302 mono_sparc_flushw (void)
304 static guint32 start [64];
305 static int inited = 0;
307 static void (*flushw) (void);
312 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
315 sparc_restore_simple (code);
317 g_assert ((code - start) < 64);
319 flushw = (gpointer)start;
328 mono_arch_flush_register_windows (void)
330 mono_sparc_flushw ();
334 mono_arch_is_inst_imm (gint64 imm)
336 return sparc_is_imm13 (imm);
340 mono_sparc_is_v9 (void) {
345 mono_sparc_is_sparc64 (void) {
357 ArgInFloatReg, /* V9 only */
358 ArgInDoubleReg /* V9 only */
363 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
366 guint32 vt_offset; /* for valuetypes */
384 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
386 ainfo->offset = *stack_size;
389 if (*gr >= PARAM_REGS) {
390 ainfo->storage = ArgOnStack;
393 ainfo->storage = ArgInIReg;
398 /* Allways reserve stack space for parameters passed in registers */
399 (*stack_size) += sizeof (gpointer);
402 if (*gr < PARAM_REGS - 1) {
403 /* A pair of registers */
404 ainfo->storage = ArgInIRegPair;
408 else if (*gr >= PARAM_REGS) {
409 /* A pair of stack locations */
410 ainfo->storage = ArgOnStackPair;
413 ainfo->storage = ArgInSplitRegStack;
418 (*stack_size) += 2 * sizeof (gpointer);
424 #define FLOAT_PARAM_REGS 32
427 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
429 ainfo->offset = *stack_size;
432 if (*gr >= FLOAT_PARAM_REGS) {
433 ainfo->storage = ArgOnStack;
436 /* A single is passed in an even numbered fp register */
437 ainfo->storage = ArgInFloatReg;
438 ainfo->reg = *gr + 1;
443 if (*gr < FLOAT_PARAM_REGS) {
444 /* A double register */
445 ainfo->storage = ArgInDoubleReg;
450 ainfo->storage = ArgOnStack;
454 (*stack_size) += sizeof (gpointer);
462 * Obtain information about a call according to the calling convention.
463 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
464 * document for more information.
465 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
466 * the 'Sparc Compliance Definition 2.4' document.
469 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
472 int n = sig->hasthis + sig->param_count;
473 guint32 stack_size = 0;
476 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
482 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
483 /* The address of the return value is passed in %o0 */
484 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
485 cinfo->ret.reg += sparc_i0;
491 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
493 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
496 /* Emit the signature cookie just before the implicit arguments */
497 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
500 for (i = 0; i < sig->param_count; ++i) {
501 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
503 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
506 /* Emit the signature cookie just before the implicit arguments */
507 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
510 DEBUG(printf("param %d: ", i));
511 if (sig->params [i]->byref) {
512 DEBUG(printf("byref\n"));
514 add_general (&gr, &stack_size, ainfo, FALSE);
517 switch (mono_type_get_underlying_type (sig->params [i])->type) {
518 case MONO_TYPE_BOOLEAN:
521 add_general (&gr, &stack_size, ainfo, FALSE);
522 /* the value is in the ls byte */
523 ainfo->offset += sizeof (gpointer) - 1;
528 add_general (&gr, &stack_size, ainfo, FALSE);
529 /* the value is in the ls word */
530 ainfo->offset += sizeof (gpointer) - 2;
534 add_general (&gr, &stack_size, ainfo, FALSE);
535 /* the value is in the ls dword */
536 ainfo->offset += sizeof (gpointer) - 4;
541 case MONO_TYPE_FNPTR:
542 case MONO_TYPE_CLASS:
543 case MONO_TYPE_OBJECT:
544 case MONO_TYPE_STRING:
545 case MONO_TYPE_SZARRAY:
546 case MONO_TYPE_ARRAY:
547 add_general (&gr, &stack_size, ainfo, FALSE);
549 case MONO_TYPE_VALUETYPE:
554 add_general (&gr, &stack_size, ainfo, FALSE);
556 case MONO_TYPE_TYPEDBYREF:
557 add_general (&gr, &stack_size, ainfo, FALSE);
562 add_general (&gr, &stack_size, ainfo, FALSE);
564 add_general (&gr, &stack_size, ainfo, TRUE);
569 add_float (&fr, &stack_size, ainfo, TRUE);
572 /* single precision values are passed in integer registers */
573 add_general (&gr, &stack_size, ainfo, FALSE);
578 add_float (&fr, &stack_size, ainfo, FALSE);
581 /* double precision values are passed in a pair of registers */
582 add_general (&gr, &stack_size, ainfo, TRUE);
586 g_assert_not_reached ();
590 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
593 /* Emit the signature cookie just before the implicit arguments */
594 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
599 switch (mono_type_get_underlying_type (sig->ret)->type) {
600 case MONO_TYPE_BOOLEAN:
611 case MONO_TYPE_FNPTR:
612 case MONO_TYPE_CLASS:
613 case MONO_TYPE_OBJECT:
614 case MONO_TYPE_SZARRAY:
615 case MONO_TYPE_ARRAY:
616 case MONO_TYPE_STRING:
617 cinfo->ret.storage = ArgInIReg;
618 cinfo->ret.reg = sparc_i0;
625 cinfo->ret.storage = ArgInIReg;
626 cinfo->ret.reg = sparc_i0;
630 cinfo->ret.storage = ArgInIRegPair;
631 cinfo->ret.reg = sparc_i0;
638 cinfo->ret.storage = ArgInFReg;
639 cinfo->ret.reg = sparc_f0;
641 case MONO_TYPE_VALUETYPE:
650 cinfo->ret.storage = ArgOnStack;
652 case MONO_TYPE_TYPEDBYREF:
655 /* Same as a valuetype with size 24 */
662 cinfo->ret.storage = ArgOnStack;
667 g_error ("Can't handle as return value 0x%x", sig->ret->type);
671 cinfo->stack_usage = stack_size;
672 cinfo->reg_usage = gr;
677 is_regsize_var (MonoType *t) {
680 switch (mono_type_get_underlying_type (t)->type) {
681 case MONO_TYPE_BOOLEAN:
692 case MONO_TYPE_OBJECT:
693 case MONO_TYPE_STRING:
694 case MONO_TYPE_CLASS:
695 case MONO_TYPE_SZARRAY:
696 case MONO_TYPE_ARRAY:
698 case MONO_TYPE_VALUETYPE:
710 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
716 * FIXME: If an argument is allocated to a register, then load it from the
717 * stack in the prolog.
720 for (i = 0; i < cfg->num_varinfo; i++) {
721 MonoInst *ins = cfg->varinfo [i];
722 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
725 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
728 /* FIXME: Make arguments on stack allocateable to registers */
729 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
732 if (is_regsize_var (ins->inst_vtype)) {
733 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
734 g_assert (i == vmv->idx);
736 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
744 mono_arch_get_global_int_regs (MonoCompile *cfg)
748 MonoMethodSignature *sig;
751 sig = mono_method_signature (cfg->method);
753 cinfo = get_call_info (sig, FALSE);
755 /* Use unused input registers */
756 for (i = cinfo->reg_usage; i < 6; ++i)
757 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
759 /* Use %l0..%l6 as global registers */
760 for (i = sparc_l0; i < sparc_l7; ++i)
761 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
769 * mono_arch_regalloc_cost:
771 * Return the cost, in number of memory references, of the action of
772 * allocating the variable VMV into a register during global register
776 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
782 * Set var information according to the calling convention. sparc version.
783 * The locals var stuff should most likely be split in another method.
786 mono_arch_allocate_vars (MonoCompile *m)
788 MonoMethodSignature *sig;
789 MonoMethodHeader *header;
791 int i, offset, size, align, curinst;
794 header = mono_method_get_header (m->method);
796 sig = mono_method_signature (m->method);
798 cinfo = get_call_info (sig, FALSE);
800 if (sig->ret->type != MONO_TYPE_VOID) {
801 switch (cinfo->ret.storage) {
805 m->ret->opcode = OP_REGVAR;
806 m->ret->inst_c0 = cinfo->ret.reg;
810 g_assert_not_reached ();
813 m->ret->opcode = OP_REGOFFSET;
814 m->ret->inst_basereg = sparc_fp;
815 m->ret->inst_offset = 64;
821 m->ret->dreg = m->ret->inst_c0;
825 * We use the ABI calling conventions for managed code as well.
826 * Exception: valuetypes are never returned in registers on V9.
827 * FIXME: Use something more optimized.
830 /* Locals are allocated backwards from %fp */
831 m->frame_reg = sparc_fp;
835 * Reserve a stack slot for holding information used during exception
838 if (header->num_clauses)
839 offset += sizeof (gpointer) * 2;
841 if (m->method->save_lmf) {
842 offset += sizeof (MonoLMF);
843 m->arch.lmf_offset = offset;
846 curinst = m->locals_start;
847 for (i = curinst; i < m->num_varinfo; ++i) {
848 inst = m->varinfo [i];
850 if (inst->opcode == OP_REGVAR) {
851 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
855 if (inst->flags & MONO_INST_IS_DEAD)
858 /* inst->unused indicates native sized value types, this is used by the
859 * pinvoke wrappers when they call functions returning structure */
860 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
861 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
863 size = mono_type_stack_size (inst->inst_vtype, &align);
866 * This is needed since structures containing doubles must be doubleword
868 * FIXME: Do this only if needed.
870 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
874 * variables are accessed as negative offsets from %fp, so increase
875 * the offset before assigning it to a variable
880 offset &= ~(align - 1);
881 inst->opcode = OP_REGOFFSET;
882 inst->inst_basereg = sparc_fp;
883 inst->inst_offset = STACK_BIAS + -offset;
885 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
888 if (sig->call_convention == MONO_CALL_VARARG) {
889 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
892 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
893 inst = m->varinfo [i];
894 if (inst->opcode != OP_REGVAR) {
895 ArgInfo *ainfo = &cinfo->args [i];
896 gboolean inreg = TRUE;
900 if (sig->hasthis && (i == 0))
901 arg_type = &mono_defaults.object_class->byval_arg;
903 arg_type = sig->params [i - sig->hasthis];
906 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
907 || (arg_type->type == MONO_TYPE_R8)))
909 * Since float arguments are passed in integer registers, we need to
910 * save them to the stack in the prolog.
915 /* FIXME: Allocate volatile arguments to registers */
916 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
919 if (MONO_TYPE_ISSTRUCT (arg_type))
920 /* FIXME: this isn't needed */
923 inst->opcode = OP_REGOFFSET;
926 storage = ArgOnStack;
928 storage = ainfo->storage;
933 inst->opcode = OP_REGVAR;
934 inst->dreg = sparc_i0 + ainfo->reg;
939 * Since float regs are volatile, we save the arguments to
940 * the stack in the prolog.
941 * FIXME: Avoid this if the method contains no calls.
945 case ArgInSplitRegStack:
946 /* Split arguments are saved to the stack in the prolog */
947 inst->opcode = OP_REGOFFSET;
948 /* in parent frame */
949 inst->inst_basereg = sparc_fp;
950 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
952 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
954 * It is very hard to load doubles from non-doubleword aligned
955 * memory locations. So if the offset is misaligned, we copy the
956 * argument to a stack location in the prolog.
958 if ((inst->inst_offset - STACK_BIAS) % 8) {
959 inst->inst_basereg = sparc_fp;
963 offset &= ~(align - 1);
964 inst->inst_offset = STACK_BIAS + -offset;
973 if (MONO_TYPE_ISSTRUCT (arg_type)) {
974 /* Add a level of indirection */
976 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
977 * are destructively modified in a lot of places in inssel.brg.
980 MONO_INST_NEW (m, indir, 0);
982 inst->opcode = OP_SPARC_INARG_VT;
983 inst->inst_left = indir;
989 * spillvars are stored between the normal locals and the storage reserved
993 m->stack_offset = offset;
995 /* Add a properly aligned dword for use by int<->float conversion opcodes */
997 mono_spillvar_offset_float (m, 0);
1003 make_group (MonoCompile *cfg, MonoInst *left, int basereg, int offset)
1007 MONO_INST_NEW (cfg, group, OP_GROUP);
1008 group->inst_left = left;
1009 group->inst_basereg = basereg;
1010 group->inst_imm = offset;
1016 * take the arguments and generate the arch-specific
1017 * instructions to properly call the function in call.
1018 * This includes pushing, moving arguments to the right register
1022 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1024 MonoMethodSignature *sig;
1028 guint32 extra_space = 0;
1030 sig = call->signature;
1031 n = sig->param_count + sig->hasthis;
1033 cinfo = get_call_info (sig, sig->pinvoke);
1035 for (i = 0; i < n; ++i) {
1036 ainfo = cinfo->args + i;
1038 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1039 /* Emit the signature cookie just before the first implicit argument */
1041 MonoMethodSignature *tmp_sig;
1044 * mono_ArgIterator_Setup assumes the signature cookie is
1045 * passed first and all the arguments which were before it are
1046 * passed on the stack after the signature. So compensate by
1047 * passing a different signature.
1049 tmp_sig = mono_metadata_signature_dup (call->signature);
1050 tmp_sig->param_count -= call->signature->sentinelpos;
1051 tmp_sig->sentinelpos = 0;
1052 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1054 /* FIXME: Add support for signature tokens to AOT */
1055 cfg->disable_aot = TRUE;
1056 /* We allways pass the signature on the stack for simplicity */
1057 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
1058 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset);
1059 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1060 sig_arg->inst_p0 = tmp_sig;
1061 arg->inst_left = sig_arg;
1062 arg->type = STACK_PTR;
1063 /* prepend, so they get reversed */
1064 arg->next = call->out_args;
1065 call->out_args = arg;
1068 if (is_virtual && i == 0) {
1069 /* the argument will be attached to the call instruction */
1070 in = call->args [i];
1072 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1073 in = call->args [i];
1074 arg->cil_code = in->cil_code;
1075 arg->inst_left = in;
1076 arg->type = in->type;
1077 /* prepend, we'll need to reverse them later */
1078 arg->next = call->out_args;
1079 call->out_args = arg;
1081 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1084 guint32 offset, pad;
1092 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1093 size = sizeof (MonoTypedRef);
1094 align = sizeof (gpointer);
1098 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1100 size = mono_type_stack_size (&in->klass->byval_arg, &align);
1103 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1104 * use the normal OUTARG opcodes to pass the address of the location to
1107 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1108 inst->inst_left = in;
1110 /* The first 6 argument locations are reserved */
1111 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1112 cinfo->stack_usage = 6 * sizeof (gpointer);
1114 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1115 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1117 inst->inst_c1 = STACK_BIAS + offset;
1118 inst->unused = size;
1119 arg->inst_left = inst;
1121 cinfo->stack_usage += size;
1122 cinfo->stack_usage += pad;
1125 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + ainfo->offset);
1127 switch (ainfo->storage) {
1131 if (ainfo->storage == ArgInIRegPair)
1132 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1133 arg->unused = sparc_o0 + ainfo->reg;
1134 call->used_iregs |= 1 << ainfo->reg;
1136 if ((i >= sig->hasthis) && !sig->params [i - sig->hasthis]->byref && ((sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) || (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4))) {
1137 /* An fp value is passed in an ireg */
1139 if (arg->opcode == OP_SPARC_OUTARG_REGPAIR)
1140 arg->opcode = OP_SPARC_OUTARG_REGPAIR_FLOAT;
1142 arg->opcode = OP_SPARC_OUTARG_FLOAT;
1145 * The OUTARG (freg) implementation needs an extra dword to store
1146 * the temporary value.
1152 arg->opcode = OP_SPARC_OUTARG_MEM;
1154 case ArgOnStackPair:
1155 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1157 case ArgInSplitRegStack:
1158 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1159 arg->unused = sparc_o0 + ainfo->reg;
1160 call->used_iregs |= 1 << ainfo->reg;
1163 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1164 arg->unused = sparc_f0 + ainfo->reg;
1166 case ArgInDoubleReg:
1167 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1168 arg->unused = sparc_f0 + ainfo->reg;
1177 * Reverse the call->out_args list.
1180 MonoInst *prev = NULL, *list = call->out_args, *next;
1187 call->out_args = prev;
1189 call->stack_usage = cinfo->stack_usage + extra_space;
1190 call->out_ireg_args = NULL;
1191 call->out_freg_args = NULL;
1192 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1193 cfg->flags |= MONO_CFG_HAS_CALLS;
1199 /* Map opcode to the sparc condition codes */
1200 static inline SparcCond
1201 opcode_to_sparc_cond (int opcode)
1223 case OP_COND_EXC_EQ:
1226 case OP_COND_EXC_NE_UN:
1233 case OP_COND_EXC_LT:
1239 case OP_COND_EXC_LT_UN:
1245 case OP_COND_EXC_GT:
1251 case OP_COND_EXC_GT_UN:
1255 case OP_COND_EXC_GE:
1259 case OP_COND_EXC_GE_UN:
1263 case OP_COND_EXC_LE:
1267 case OP_COND_EXC_LE_UN:
1269 case OP_COND_EXC_OV:
1270 case OP_COND_EXC_IOV:
1273 case OP_COND_EXC_IC:
1275 case OP_COND_EXC_NO:
1276 case OP_COND_EXC_NC:
1279 g_assert_not_reached ();
1284 #define COMPUTE_DISP(ins) \
1285 if (ins->flags & MONO_INST_BRLABEL) { \
1286 if (ins->inst_i0->inst_c0) \
1287 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1290 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1293 if (ins->inst_true_bb->native_offset) \
1294 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1297 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1302 #define DEFAULT_ICC sparc_xcc_short
1304 #define DEFAULT_ICC sparc_icc_short
1308 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1312 COMPUTE_DISP(ins); \
1313 predict = (disp != 0) ? 1 : 0; \
1314 g_assert (sparc_is_imm19 (disp)); \
1315 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1316 if (filldelay) sparc_nop (code); \
1318 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1319 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1323 COMPUTE_DISP(ins); \
1324 predict = (disp != 0) ? 1 : 0; \
1325 g_assert (sparc_is_imm19 (disp)); \
1326 sparc_fbranch (code, (annul), cond, disp); \
1327 if (filldelay) sparc_nop (code); \
1330 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1331 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1334 COMPUTE_DISP(ins); \
1335 g_assert (sparc_is_imm22 (disp)); \
1336 sparc_ ## bop (code, (annul), cond, disp); \
1337 if (filldelay) sparc_nop (code); \
1339 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1340 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1343 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1347 COMPUTE_DISP(ins); \
1348 predict = (disp != 0) ? 1 : 0; \
1349 g_assert (sparc_is_imm19 (disp)); \
1350 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1351 if (filldelay) sparc_nop (code); \
1354 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1357 COMPUTE_DISP(ins); \
1358 g_assert (sparc_is_imm22 (disp)); \
1359 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1360 if (filldelay) sparc_nop (code); \
1363 /* emit an exception if condition is fail */
1365 * We put the exception throwing code out-of-line, at the end of the method
1367 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1368 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1369 MONO_PATCH_INFO_EXC, sexc_name); \
1371 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1374 sparc_branch (code, 0, cond, 0); \
1376 if (filldelay) sparc_nop (code); \
1379 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1381 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1382 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1383 MONO_PATCH_INFO_EXC, sexc_name); \
1384 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1388 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1389 if (sparc_is_imm13 ((ins)->inst_imm)) \
1390 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1392 sparc_set (code, ins->inst_imm, sparc_o7); \
1393 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1397 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1398 if (sparc_is_imm13 (ins->inst_offset)) \
1399 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1401 sparc_set (code, ins->inst_offset, sparc_o7); \
1402 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1407 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1409 if (ins->inst_imm == 0) \
1412 sparc_set (code, ins->inst_imm, sparc_o7); \
1415 if (!sparc_is_imm13 (ins->inst_offset)) { \
1416 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1417 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1420 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1423 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1424 if (!sparc_is_imm13 (ins->inst_offset)) { \
1425 sparc_set (code, ins->inst_offset, sparc_o7); \
1426 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1429 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1432 #define EMIT_CALL() do { \
1434 sparc_set_template (code, sparc_o7); \
1435 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1438 sparc_call_simple (code, 0); \
1444 * A call template is 7 instructions long, so we want to avoid it if possible.
1447 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1451 /* FIXME: This only works if the target method is already compiled */
1452 if (0 && v64 && !cfg->compile_aot) {
1453 MonoJumpInfo patch_info;
1455 patch_info.type = patch_type;
1456 patch_info.data.target = data;
1458 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1460 /* FIXME: Add optimizations if the target is close enough */
1461 sparc_set (code, target, sparc_o7);
1462 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1466 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1474 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1476 MonoInst *ins, *last_ins = NULL;
1481 switch (ins->opcode) {
1483 /* remove unnecessary multiplication with 1 */
1484 if (ins->inst_imm == 1) {
1485 if (ins->dreg != ins->sreg1) {
1486 ins->opcode = OP_MOVE;
1488 last_ins->next = ins->next;
1495 case OP_LOAD_MEMBASE:
1496 case OP_LOADI4_MEMBASE:
1498 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1499 * OP_LOAD_MEMBASE offset(basereg), reg
1501 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1502 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1503 ins->inst_basereg == last_ins->inst_destbasereg &&
1504 ins->inst_offset == last_ins->inst_offset) {
1505 if (ins->dreg == last_ins->sreg1) {
1506 last_ins->next = ins->next;
1510 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1511 ins->opcode = OP_MOVE;
1512 ins->sreg1 = last_ins->sreg1;
1516 * Note: reg1 must be different from the basereg in the second load
1517 * OP_LOAD_MEMBASE offset(basereg), reg1
1518 * OP_LOAD_MEMBASE offset(basereg), reg2
1520 * OP_LOAD_MEMBASE offset(basereg), reg1
1521 * OP_MOVE reg1, reg2
1523 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1524 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1525 ins->inst_basereg != last_ins->dreg &&
1526 ins->inst_basereg == last_ins->inst_basereg &&
1527 ins->inst_offset == last_ins->inst_offset) {
1529 if (ins->dreg == last_ins->dreg) {
1530 last_ins->next = ins->next;
1534 ins->opcode = OP_MOVE;
1535 ins->sreg1 = last_ins->dreg;
1538 //g_assert_not_reached ();
1542 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1543 * OP_LOAD_MEMBASE offset(basereg), reg
1545 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1546 * OP_ICONST reg, imm
1548 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1549 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1550 ins->inst_basereg == last_ins->inst_destbasereg &&
1551 ins->inst_offset == last_ins->inst_offset) {
1552 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1553 ins->opcode = OP_ICONST;
1554 ins->inst_c0 = last_ins->inst_imm;
1555 g_assert_not_reached (); // check this rule
1560 case OP_LOADU1_MEMBASE:
1561 case OP_LOADI1_MEMBASE:
1562 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1563 ins->inst_basereg == last_ins->inst_destbasereg &&
1564 ins->inst_offset == last_ins->inst_offset) {
1565 if (ins->dreg == last_ins->sreg1) {
1566 last_ins->next = ins->next;
1570 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1571 ins->opcode = OP_MOVE;
1572 ins->sreg1 = last_ins->sreg1;
1576 case OP_LOADU2_MEMBASE:
1577 case OP_LOADI2_MEMBASE:
1578 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1579 ins->inst_basereg == last_ins->inst_destbasereg &&
1580 ins->inst_offset == last_ins->inst_offset) {
1581 if (ins->dreg == last_ins->sreg1) {
1582 last_ins->next = ins->next;
1586 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1587 ins->opcode = OP_MOVE;
1588 ins->sreg1 = last_ins->sreg1;
1592 case OP_STOREI4_MEMBASE_IMM:
1593 /* Convert pairs of 0 stores to a dword 0 store */
1594 /* Used when initializing temporaries */
1595 /* We know sparc_fp is dword aligned */
1596 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1597 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1598 (ins->inst_destbasereg == sparc_fp) &&
1599 (ins->inst_offset < 0) &&
1600 ((ins->inst_offset % 8) == 0) &&
1601 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1602 (ins->inst_imm == 0) &&
1603 (last_ins->inst_imm == 0)) {
1605 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1606 last_ins->inst_offset = ins->inst_offset;
1607 last_ins->next = ins->next;
1619 case OP_COND_EXC_EQ:
1620 case OP_COND_EXC_GE:
1621 case OP_COND_EXC_GT:
1622 case OP_COND_EXC_LE:
1623 case OP_COND_EXC_LT:
1624 case OP_COND_EXC_NE_UN:
1626 * Convert compare with zero+branch to BRcc
1629 * This only works in 64 bit mode, since it examines all 64
1630 * bits of the register.
1631 * Only do this if the method is small since BPr only has a 16bit
1634 if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
1635 (last_ins->opcode == OP_COMPARE_IMM) &&
1636 (last_ins->inst_imm == 0)) {
1637 MonoInst *next = ins->next;
1638 switch (ins->opcode) {
1640 ins->opcode = OP_SPARC_BRZ;
1643 ins->opcode = OP_SPARC_BRNZ;
1646 ins->opcode = OP_SPARC_BRLZ;
1649 ins->opcode = OP_SPARC_BRGZ;
1652 ins->opcode = OP_SPARC_BRGEZ;
1655 ins->opcode = OP_SPARC_BRLEZ;
1657 case OP_COND_EXC_EQ:
1658 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1660 case OP_COND_EXC_GE:
1661 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1663 case OP_COND_EXC_GT:
1664 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1666 case OP_COND_EXC_LE:
1667 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1669 case OP_COND_EXC_LT:
1670 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1672 case OP_COND_EXC_NE_UN:
1673 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1676 g_assert_not_reached ();
1678 ins->sreg1 = last_ins->sreg1;
1680 last_ins->next = next;
1691 if (ins->dreg == ins->sreg1) {
1693 last_ins->next = ins->next;
1698 * OP_MOVE sreg, dreg
1699 * OP_MOVE dreg, sreg
1701 if (last_ins && last_ins->opcode == OP_MOVE &&
1702 ins->sreg1 == last_ins->dreg &&
1703 ins->dreg == last_ins->sreg1) {
1704 last_ins->next = ins->next;
1713 bb->last_ins = last_ins;
1716 static const char*const * ins_spec = sparc_desc;
1718 static inline const char*
1719 get_ins_spec (int opcode)
1721 if (ins_spec [opcode])
1722 return ins_spec [opcode];
1724 return ins_spec [CEE_ADD];
1728 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1730 MonoSpillInfo **si, *info;
1733 si = &cfg->spill_info_float;
1735 while (i <= spillvar) {
1738 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1740 cfg->stack_offset += sizeof (double);
1741 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1742 info->offset = - cfg->stack_offset;
1746 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1752 g_assert_not_reached ();
1756 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1759 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1761 mono_local_regalloc (cfg, bb);
1765 sparc_patch (guint32 *code, const gpointer target)
1768 guint32 ins = *code;
1769 guint32 op = ins >> 30;
1770 guint32 op2 = (ins >> 22) & 0x7;
1771 guint32 rd = (ins >> 25) & 0x1f;
1772 guint8* target8 = (guint8*)target;
1773 gint64 disp = (target8 - (guint8*)code) >> 2;
1776 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1778 if ((op == 0) && (op2 == 2)) {
1779 if (!sparc_is_imm22 (disp))
1782 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1784 else if ((op == 0) && (op2 == 1)) {
1785 if (!sparc_is_imm19 (disp))
1788 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1790 else if ((op == 0) && (op2 == 3)) {
1791 if (!sparc_is_imm16 (disp))
1794 *code &= ~(0x180000 | 0x3fff);
1795 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1797 else if ((op == 0) && (op2 == 6)) {
1798 if (!sparc_is_imm22 (disp))
1801 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1803 else if ((op == 0) && (op2 == 4)) {
1804 guint32 ins2 = code [1];
1806 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1807 /* sethi followed by or */
1809 sparc_set (p, target8, rd);
1810 while (p <= (code + 1))
1813 else if (ins2 == 0x01000000) {
1814 /* sethi followed by nop */
1816 sparc_set (p, target8, rd);
1817 while (p <= (code + 1))
1820 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1821 /* sethi followed by load/store */
1823 guint32 t = (guint32)target8;
1824 *code &= ~(0x3fffff);
1826 *(code + 1) &= ~(0x3ff);
1827 *(code + 1) |= (t & 0x3ff);
1831 (sparc_inst_rd (ins) == sparc_g1) &&
1832 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1833 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1834 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1838 reg = sparc_inst_rd (c [1]);
1839 sparc_set (p, target8, reg);
1843 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1844 (sparc_inst_imm (ins2))) {
1845 /* sethi followed by jmpl */
1847 guint32 t = (guint32)target8;
1848 *code &= ~(0x3fffff);
1850 *(code + 1) &= ~(0x3ff);
1851 *(code + 1) |= (t & 0x3ff);
1857 else if (op == 01) {
1858 gint64 disp = (target8 - (guint8*)code) >> 2;
1860 if (!sparc_is_imm30 (disp))
1862 sparc_call_simple (code, target8 - (guint8*)code);
1864 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1866 g_assert (sparc_is_imm13 (target8));
1868 *code |= (guint32)target8;
1870 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1871 /* sparc_set case 5. */
1875 reg = sparc_inst_rd (c [3]);
1876 sparc_set (p, target, reg);
1883 // g_print ("patched with 0x%08x\n", ins);
1887 * mono_sparc_emit_save_lmf:
1889 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1890 * trampolines as well.
1893 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1896 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1897 /* Save previous_lmf */
1898 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1899 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1901 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1902 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1908 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1910 /* Load previous_lmf */
1911 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
1913 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
1914 /* *(lmf) = previous_lmf */
1915 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
1920 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
1923 * Since register windows are saved to the current value of %sp, we need to
1924 * set the sp field in the lmf before the call, not in the prolog.
1926 if (cfg->method->save_lmf) {
1927 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
1930 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
1937 emit_vret_token (MonoInst *ins, guint32 *code)
1939 MonoCallInst *call = (MonoCallInst*)ins;
1943 * The sparc ABI requires that calls to functions which return a structure
1944 * contain an additional unimpl instruction which is checked by the callee.
1946 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
1947 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
1948 size = mono_type_stack_size (call->signature->ret, NULL);
1950 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
1951 sparc_unimp (code, size & 0xfff);
1958 emit_move_return_value (MonoInst *ins, guint32 *code)
1960 /* Move return value to the target register */
1961 /* FIXME: do more things in the local reg allocator */
1962 switch (ins->opcode) {
1964 case OP_VOIDCALL_REG:
1965 case OP_VOIDCALL_MEMBASE:
1969 case OP_CALL_MEMBASE:
1970 g_assert (ins->dreg == sparc_o0);
1974 case OP_LCALL_MEMBASE:
1976 * ins->dreg is the least significant reg due to the lreg: LCALL rule
1977 * in inssel-long32.brg.
1980 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
1982 g_assert (ins->dreg == sparc_o1);
1987 case OP_FCALL_MEMBASE:
1989 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
1990 sparc_fmovs (code, sparc_f0, ins->dreg);
1991 sparc_fstod (code, ins->dreg, ins->dreg);
1994 sparc_fmovd (code, sparc_f0, ins->dreg);
1996 sparc_fmovs (code, sparc_f0, ins->dreg);
1997 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1998 sparc_fstod (code, ins->dreg, ins->dreg);
2000 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2005 case OP_VCALL_MEMBASE:
2015 * emit_load_volatile_arguments:
2017 * Load volatile arguments from the stack to the original input registers.
2018 * Required before a tail call.
2021 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2023 MonoMethod *method = cfg->method;
2024 MonoMethodSignature *sig;
2029 /* FIXME: Generate intermediate code instead */
2031 sig = mono_method_signature (method);
2033 cinfo = get_call_info (sig, FALSE);
2035 /* This is the opposite of the code in emit_prolog */
2037 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2038 ArgInfo *ainfo = cinfo->args + i;
2039 gint32 stack_offset;
2041 inst = cfg->varinfo [i];
2043 if (sig->hasthis && (i == 0))
2044 arg_type = &mono_defaults.object_class->byval_arg;
2046 arg_type = sig->params [i - sig->hasthis];
2048 stack_offset = ainfo->offset + ARGS_OFFSET;
2049 ireg = sparc_i0 + ainfo->reg;
2051 if (ainfo->storage == ArgInSplitRegStack) {
2052 g_assert (inst->opcode == OP_REGOFFSET);
2054 if (!sparc_is_imm13 (stack_offset))
2056 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2059 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2060 if (ainfo->storage == ArgInIRegPair) {
2061 if (!sparc_is_imm13 (inst->inst_offset + 4))
2063 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2064 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2067 if (ainfo->storage == ArgInSplitRegStack) {
2068 if (stack_offset != inst->inst_offset) {
2069 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2070 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2071 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2076 if (ainfo->storage == ArgOnStackPair) {
2077 if (stack_offset != inst->inst_offset) {
2078 /* stack_offset is not dword aligned, so we need to make a copy */
2079 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2080 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2082 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2083 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2088 g_assert_not_reached ();
2091 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2092 /* Argument in register, but need to be saved to stack */
2093 if (!sparc_is_imm13 (stack_offset))
2095 if ((stack_offset - ARGS_OFFSET) & 0x1)
2096 /* FIXME: Is this ldsb or ldub ? */
2097 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2099 if ((stack_offset - ARGS_OFFSET) & 0x2)
2100 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2102 if ((stack_offset - ARGS_OFFSET) & 0x4)
2103 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2106 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2108 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2111 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2112 /* Argument in regpair, but need to be saved to stack */
2113 if (!sparc_is_imm13 (inst->inst_offset + 4))
2115 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2116 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2118 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2121 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2125 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2126 if (inst->opcode == OP_REGVAR)
2127 /* FIXME: Load the argument into memory */
2137 * mono_sparc_is_virtual_call:
2139 * Determine whenever the instruction at CODE is a virtual call.
2142 mono_sparc_is_virtual_call (guint32 *code)
2149 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2151 * Register indirect call. If it is a virtual call, then the
2152 * instruction in the delay slot is a special kind of nop.
2155 /* Construct special nop */
2156 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2159 if (code [1] == p [0])
2167 * mono_arch_get_vcall_slot_addr:
2169 * Determine the vtable slot used by a virtual call.
2172 mono_arch_get_vcall_slot_addr (guint8 *code8, gpointer *regs)
2174 guint32 *code = (guint32*)(gpointer)code8;
2175 guint32 ins = code [0];
2176 guint32 prev_ins = code [-1];
2178 mono_sparc_flushw ();
2180 if (!mono_sparc_is_virtual_call (code))
2183 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2184 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2185 /* ld [r1 + CONST ], r2; call r2 */
2186 guint32 base = sparc_inst_rs1 (prev_ins);
2187 guint32 disp = sparc_inst_imm13 (prev_ins);
2190 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2192 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2194 base_val = regs [base - sparc_o0];
2196 return (gpointer)((guint8*)base_val + disp);
2199 g_assert_not_reached ();
2202 g_assert_not_reached ();
2208 * Some conventions used in the following code.
2209 * 2) The only scratch registers we have are o7 and g1. We try to
2210 * stick to o7 when we can, and use g1 when necessary.
2214 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2219 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2220 MonoInst *last_ins = NULL;
2224 if (cfg->opt & MONO_OPT_PEEPHOLE)
2225 peephole_pass (cfg, bb);
2227 if (cfg->verbose_level > 2)
2228 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2230 cpos = bb->max_offset;
2232 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2240 offset = (guint8*)code - cfg->native_code;
2242 spec = ins_spec [ins->opcode];
2244 spec = ins_spec [CEE_ADD];
2246 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2248 if (offset > (cfg->code_size - max_len - 16)) {
2249 cfg->code_size *= 2;
2250 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2251 code = (guint32*)(cfg->native_code + offset);
2253 code_start = (guint8*)code;
2254 // if (ins->cil_code)
2255 // g_print ("cil code\n");
2256 mono_debug_record_line_number (cfg, ins, offset);
2258 switch (ins->opcode) {
2259 case OP_STOREI1_MEMBASE_IMM:
2260 EMIT_STORE_MEMBASE_IMM (ins, stb);
2262 case OP_STOREI2_MEMBASE_IMM:
2263 EMIT_STORE_MEMBASE_IMM (ins, sth);
2265 case OP_STORE_MEMBASE_IMM:
2266 EMIT_STORE_MEMBASE_IMM (ins, sti);
2268 case OP_STOREI4_MEMBASE_IMM:
2269 EMIT_STORE_MEMBASE_IMM (ins, st);
2271 case OP_STOREI8_MEMBASE_IMM:
2273 EMIT_STORE_MEMBASE_IMM (ins, stx);
2275 /* Only generated by peephole opts */
2276 g_assert ((ins->inst_offset % 8) == 0);
2277 g_assert (ins->inst_imm == 0);
2278 EMIT_STORE_MEMBASE_IMM (ins, stx);
2281 case OP_STOREI1_MEMBASE_REG:
2282 EMIT_STORE_MEMBASE_REG (ins, stb);
2284 case OP_STOREI2_MEMBASE_REG:
2285 EMIT_STORE_MEMBASE_REG (ins, sth);
2287 case OP_STOREI4_MEMBASE_REG:
2288 EMIT_STORE_MEMBASE_REG (ins, st);
2290 case OP_STOREI8_MEMBASE_REG:
2292 EMIT_STORE_MEMBASE_REG (ins, stx);
2294 /* Only used by OP_MEMSET */
2295 EMIT_STORE_MEMBASE_REG (ins, std);
2298 case OP_STORE_MEMBASE_REG:
2299 EMIT_STORE_MEMBASE_REG (ins, sti);
2303 sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
2305 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2310 sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
2312 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2316 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2319 sparc_set (code, ins->inst_c0, ins->dreg);
2320 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2322 case OP_LOADI4_MEMBASE:
2324 EMIT_LOAD_MEMBASE (ins, ldsw);
2326 EMIT_LOAD_MEMBASE (ins, ld);
2329 case OP_LOADU4_MEMBASE:
2330 EMIT_LOAD_MEMBASE (ins, ld);
2332 case OP_LOADU1_MEMBASE:
2333 EMIT_LOAD_MEMBASE (ins, ldub);
2335 case OP_LOADI1_MEMBASE:
2336 EMIT_LOAD_MEMBASE (ins, ldsb);
2338 case OP_LOADU2_MEMBASE:
2339 EMIT_LOAD_MEMBASE (ins, lduh);
2341 case OP_LOADI2_MEMBASE:
2342 EMIT_LOAD_MEMBASE (ins, ldsh);
2344 case OP_LOAD_MEMBASE:
2346 EMIT_LOAD_MEMBASE (ins, ldx);
2348 EMIT_LOAD_MEMBASE (ins, ld);
2352 case OP_LOADI8_MEMBASE:
2353 EMIT_LOAD_MEMBASE (ins, ldx);
2357 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2358 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2361 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2362 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2365 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2368 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2369 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2371 case CEE_CONV_OVF_U4:
2372 /* Only used on V9 */
2373 sparc_cmp_imm (code, ins->sreg1, 0);
2374 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2375 MONO_PATCH_INFO_EXC, "OverflowException");
2376 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2378 sparc_set (code, 1, sparc_o7);
2379 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2380 sparc_cmp (code, ins->sreg1, sparc_o7);
2381 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2382 MONO_PATCH_INFO_EXC, "OverflowException");
2383 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2385 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2387 case CEE_CONV_OVF_I4_UN:
2388 /* Only used on V9 */
2393 /* Only used on V9 */
2394 sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
2398 /* Only used on V9 */
2399 sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
2404 sparc_cmp (code, ins->sreg1, ins->sreg2);
2406 case OP_COMPARE_IMM:
2407 case OP_ICOMPARE_IMM:
2408 if (sparc_is_imm13 (ins->inst_imm))
2409 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2411 sparc_set (code, ins->inst_imm, sparc_o7);
2412 sparc_cmp (code, ins->sreg1, sparc_o7);
2415 case OP_X86_TEST_NULL:
2416 sparc_cmp_imm (code, ins->sreg1, 0);
2420 * gdb does not like encountering 'ta 1' in the debugged code. So
2421 * instead of emitting a trap, we emit a call a C function and place a
2424 //sparc_ta (code, 1);
2425 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
2430 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2434 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2439 /* according to inssel-long32.brg, this should set cc */
2440 EMIT_ALU_IMM (ins, add, TRUE);
2444 /* according to inssel-long32.brg, this should set cc */
2445 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2449 EMIT_ALU_IMM (ins, addx, TRUE);
2453 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2457 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2462 /* according to inssel-long32.brg, this should set cc */
2463 EMIT_ALU_IMM (ins, sub, TRUE);
2467 /* according to inssel-long32.brg, this should set cc */
2468 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2472 EMIT_ALU_IMM (ins, subx, TRUE);
2476 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2480 EMIT_ALU_IMM (ins, and, FALSE);
2484 /* Sign extend sreg1 into %y */
2485 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2486 sparc_wry (code, sparc_o7, sparc_g0);
2487 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2488 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2492 sparc_wry (code, sparc_g0, sparc_g0);
2493 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2498 /* Transform division into a shift */
2499 for (i = 1; i < 30; ++i) {
2501 if (ins->inst_imm == imm)
2507 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2508 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2509 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2512 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2513 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2514 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2515 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2516 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2520 /* Sign extend sreg1 into %y */
2521 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2522 sparc_wry (code, sparc_o7, sparc_g0);
2523 EMIT_ALU_IMM (ins, sdiv, TRUE);
2524 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2530 /* Sign extend sreg1 into %y */
2531 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2532 sparc_wry (code, sparc_o7, sparc_g0);
2533 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2534 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2535 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2536 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2540 sparc_wry (code, sparc_g0, sparc_g0);
2541 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2542 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2543 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2547 /* Sign extend sreg1 into %y */
2548 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2549 sparc_wry (code, sparc_o7, sparc_g0);
2550 if (!sparc_is_imm13 (ins->inst_imm)) {
2551 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2552 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2553 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2554 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2557 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2558 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2559 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2561 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2565 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2569 EMIT_ALU_IMM (ins, or, FALSE);
2573 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2577 EMIT_ALU_IMM (ins, xor, FALSE);
2581 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2585 if (ins->inst_imm < (1 << 5))
2586 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2588 sparc_set (code, ins->inst_imm, sparc_o7);
2589 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2594 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2598 if (ins->inst_imm < (1 << 5))
2599 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2601 sparc_set (code, ins->inst_imm, sparc_o7);
2602 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2606 case OP_ISHR_UN_IMM:
2607 if (ins->inst_imm < (1 << 5))
2608 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2610 sparc_set (code, ins->inst_imm, sparc_o7);
2611 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2616 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2619 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2622 if (ins->inst_imm < (1 << 6))
2623 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2625 sparc_set (code, ins->inst_imm, sparc_o7);
2626 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2630 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2633 if (ins->inst_imm < (1 << 6))
2634 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2636 sparc_set (code, ins->inst_imm, sparc_o7);
2637 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2641 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2643 case OP_LSHR_UN_IMM:
2644 if (ins->inst_imm < (1 << 6))
2645 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2647 sparc_set (code, ins->inst_imm, sparc_o7);
2648 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2653 /* can't use sparc_not */
2654 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2658 /* can't use sparc_neg */
2659 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2663 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2669 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2672 /* Transform multiplication into a shift */
2673 for (i = 0; i < 30; ++i) {
2675 if (ins->inst_imm == imm)
2679 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2681 EMIT_ALU_IMM (ins, smul, FALSE);
2686 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2687 sparc_rdy (code, sparc_g1);
2688 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2689 sparc_cmp (code, sparc_g1, sparc_o7);
2690 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2692 case CEE_MUL_OVF_UN:
2693 case OP_IMUL_OVF_UN:
2694 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2695 sparc_rdy (code, sparc_o7);
2696 sparc_cmp (code, sparc_o7, sparc_g0);
2697 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2701 sparc_set (code, ins->inst_c0, ins->dreg);
2704 sparc_set (code, ins->inst_l, ins->dreg);
2707 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2708 sparc_set_template (code, ins->dreg);
2714 if (ins->sreg1 != ins->dreg)
2715 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2718 /* Only used on V9 */
2719 if (ins->sreg1 != ins->dreg)
2720 sparc_fmovd (code, ins->sreg1, ins->dreg);
2722 case OP_SPARC_SETFREG_FLOAT:
2723 /* Only used on V9 */
2724 sparc_fdtos (code, ins->sreg1, ins->dreg);
2727 if (cfg->method->save_lmf)
2730 code = emit_load_volatile_arguments (cfg, code);
2731 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2732 sparc_set_template (code, sparc_o7);
2733 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2734 /* Restore parent frame in delay slot */
2735 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2738 /* ensure ins->sreg1 is not NULL */
2739 sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
2742 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2743 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2750 call = (MonoCallInst*)ins;
2751 g_assert (!call->virtual);
2752 code = emit_save_sp_to_lmf (cfg, code);
2753 if (ins->flags & MONO_INST_HAS_METHOD)
2754 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2756 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2758 code = emit_vret_token (ins, code);
2759 code = emit_move_return_value (ins, code);
2764 case OP_VOIDCALL_REG:
2766 call = (MonoCallInst*)ins;
2767 code = emit_save_sp_to_lmf (cfg, code);
2768 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2770 * We emit a special kind of nop in the delay slot to tell the
2771 * trampoline code that this is a virtual call, thus an unbox
2772 * trampoline might need to be called.
2775 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2779 code = emit_vret_token (ins, code);
2780 code = emit_move_return_value (ins, code);
2782 case OP_FCALL_MEMBASE:
2783 case OP_LCALL_MEMBASE:
2784 case OP_VCALL_MEMBASE:
2785 case OP_VOIDCALL_MEMBASE:
2786 case OP_CALL_MEMBASE:
2787 call = (MonoCallInst*)ins;
2788 g_assert (sparc_is_imm13 (ins->inst_offset));
2789 code = emit_save_sp_to_lmf (cfg, code);
2790 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2791 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2793 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2797 code = emit_vret_token (ins, code);
2798 code = emit_move_return_value (ins, code);
2801 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2802 sparc_fdtos (code, ins->sreg1, sparc_f0);
2805 sparc_fmovd (code, ins->sreg1, ins->dreg);
2807 /* FIXME: Why not use fmovd ? */
2808 sparc_fmovs (code, ins->sreg1, ins->dreg);
2809 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2814 g_assert_not_reached ();
2819 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2820 /* Perform stack touching */
2824 /* Keep alignment */
2825 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
2826 sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
2827 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2829 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2831 size_reg = sparc_g4;
2833 size_reg = sparc_g1;
2835 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2838 size_reg = ins->sreg1;
2840 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2841 /* Keep %sp valid at all times */
2842 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2843 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
2844 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
2846 if (ins->flags & MONO_INST_INIT) {
2848 /* Initialize memory region */
2849 sparc_cmp_imm (code, size_reg, 0);
2851 sparc_branch (code, 0, sparc_be, 0);
2853 sparc_set (code, 0, sparc_o7);
2854 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
2858 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
2860 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
2861 sparc_cmp (code, sparc_o7, size_reg);
2863 sparc_branch (code, 0, sparc_bl, 0);
2864 sparc_patch (br [2], br [1]);
2866 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
2867 sparc_patch (br [0], code);
2871 case OP_SPARC_LOCALLOC_IMM: {
2872 gint32 offset = ins->inst_c0;
2874 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2875 /* Perform stack touching */
2879 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
2880 if (sparc_is_imm13 (offset))
2881 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
2883 sparc_set (code, offset, sparc_o7);
2884 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
2886 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
2887 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
2888 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
2894 while (i < offset) {
2896 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
2900 sparc_st_imm (code, sparc_g0, ins->dreg, i);
2906 sparc_set (code, offset, sparc_o7);
2907 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
2908 /* beginning of loop */
2911 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
2913 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
2914 sparc_cmp_imm (code, sparc_o7, 0);
2916 sparc_branch (code, 0, sparc_bne, 0);
2918 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
2919 sparc_patch (br [1], br [0]);
2925 /* The return is done in the epilog */
2926 g_assert_not_reached ();
2929 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
2930 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2931 (gpointer)"mono_arch_throw_exception");
2935 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
2936 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
2937 (gpointer)"mono_arch_rethrow_exception");
2940 case OP_START_HANDLER: {
2942 * The START_HANDLER instruction marks the beginning of a handler
2943 * block. It is called using a call instruction, so %o7 contains
2944 * the return address. Since the handler executes in the same stack
2945 * frame as the method itself, we can't use save/restore to save
2946 * the return address. Instead, we save it into a dedicated
2949 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2950 if (!sparc_is_imm13 (spvar->inst_offset)) {
2951 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
2952 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
2955 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
2958 case OP_ENDFILTER: {
2959 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2960 if (!sparc_is_imm13 (spvar->inst_offset)) {
2961 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
2962 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
2965 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
2966 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
2968 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
2971 case CEE_ENDFINALLY: {
2972 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2973 if (!sparc_is_imm13 (spvar->inst_offset)) {
2974 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
2975 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
2978 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
2979 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
2983 case OP_CALL_HANDLER:
2984 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
2985 /* This is a jump inside the method, so call_simple works even on V9 */
2986 sparc_call_simple (code, 0);
2990 ins->inst_c0 = (guint8*)code - cfg->native_code;
2993 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
2994 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
2996 if (ins->flags & MONO_INST_BRLABEL) {
2997 if (ins->inst_i0->inst_c0) {
2998 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
2999 g_assert (sparc_is_imm22 (disp));
3000 sparc_branch (code, 1, sparc_ba, disp);
3002 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3003 sparc_branch (code, 1, sparc_ba, 0);
3006 if (ins->inst_target_bb->native_offset) {
3007 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3008 g_assert (sparc_is_imm22 (disp));
3009 sparc_branch (code, 1, sparc_ba, disp);
3011 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3012 sparc_branch (code, 1, sparc_ba, 0);
3018 sparc_jmp (code, ins->sreg1, sparc_g0);
3026 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3027 sparc_clr_reg (code, ins->dreg);
3028 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3031 sparc_clr_reg (code, ins->dreg);
3033 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3035 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3038 sparc_set (code, 1, ins->dreg);
3046 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3047 sparc_clr_reg (code, ins->dreg);
3048 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3051 sparc_clr_reg (code, ins->dreg);
3052 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3054 sparc_set (code, 1, ins->dreg);
3057 case OP_COND_EXC_EQ:
3058 case OP_COND_EXC_NE_UN:
3059 case OP_COND_EXC_LT:
3060 case OP_COND_EXC_LT_UN:
3061 case OP_COND_EXC_GT:
3062 case OP_COND_EXC_GT_UN:
3063 case OP_COND_EXC_GE:
3064 case OP_COND_EXC_GE_UN:
3065 case OP_COND_EXC_LE:
3066 case OP_COND_EXC_LE_UN:
3067 case OP_COND_EXC_OV:
3068 case OP_COND_EXC_NO:
3070 case OP_COND_EXC_NC:
3071 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3073 case OP_SPARC_COND_EXC_EQZ:
3074 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3076 case OP_SPARC_COND_EXC_GEZ:
3077 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3079 case OP_SPARC_COND_EXC_GTZ:
3080 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3082 case OP_SPARC_COND_EXC_LEZ:
3083 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3085 case OP_SPARC_COND_EXC_LTZ:
3086 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3088 case OP_SPARC_COND_EXC_NEZ:
3089 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3091 case OP_COND_EXC_IOV:
3092 case OP_COND_EXC_IC:
3093 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
3106 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3108 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3122 /* Only used on V9 */
3123 EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
3128 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3130 case OP_SPARC_BRLEZ:
3131 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3134 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3137 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3140 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3142 case OP_SPARC_BRGEZ:
3143 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3146 /* floating point opcodes */
3148 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3150 sparc_set_template (code, sparc_o7);
3152 sparc_sethi (code, 0, sparc_o7);
3154 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3157 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3159 sparc_set_template (code, sparc_o7);
3161 sparc_sethi (code, 0, sparc_o7);
3163 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3165 /* Extend to double */
3166 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3168 case OP_STORER8_MEMBASE_REG:
3169 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3170 sparc_set (code, ins->inst_offset, sparc_o7);
3171 /* SPARCV9 handles misaligned fp loads/stores */
3172 if (!v64 && (ins->inst_offset % 8)) {
3174 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3175 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3176 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3178 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3181 if (!v64 && (ins->inst_offset % 8)) {
3183 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3184 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3186 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3189 case OP_LOADR8_MEMBASE:
3190 EMIT_LOAD_MEMBASE (ins, lddf);
3192 case OP_STORER4_MEMBASE_REG:
3193 /* This requires a double->single conversion */
3194 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3195 if (!sparc_is_imm13 (ins->inst_offset)) {
3196 sparc_set (code, ins->inst_offset, sparc_o7);
3197 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3200 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3202 case OP_LOADR4_MEMBASE: {
3203 /* ldf needs a single precision register */
3204 int dreg = ins->dreg;
3205 ins->dreg = FP_SCRATCH_REG;
3206 EMIT_LOAD_MEMBASE (ins, ldf);
3208 /* Extend to double */
3209 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3214 sparc_fmovd (code, ins->sreg1, ins->dreg);
3216 sparc_fmovs (code, ins->sreg1, ins->dreg);
3217 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3221 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3222 if (!sparc_is_imm13 (offset))
3225 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3226 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3227 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3229 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3230 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3231 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3233 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3237 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3238 if (!sparc_is_imm13 (offset))
3241 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3242 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3243 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3245 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3246 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3247 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3251 case OP_FCONV_TO_I1:
3252 case OP_FCONV_TO_U1:
3253 case OP_FCONV_TO_I2:
3254 case OP_FCONV_TO_U2:
3259 case OP_FCONV_TO_I4:
3260 case OP_FCONV_TO_U4: {
3261 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3262 if (!sparc_is_imm13 (offset))
3264 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3265 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3266 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3268 switch (ins->opcode) {
3269 case OP_FCONV_TO_I1:
3270 case OP_FCONV_TO_U1:
3271 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3273 case OP_FCONV_TO_I2:
3274 case OP_FCONV_TO_U2:
3275 sparc_set (code, 0xffff, sparc_o7);
3276 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3283 case OP_FCONV_TO_I8:
3284 case OP_FCONV_TO_U8:
3286 g_assert_not_reached ();
3290 g_assert_not_reached ();
3292 case OP_LCONV_TO_R_UN: {
3294 g_assert_not_reached ();
3297 case OP_LCONV_TO_OVF_I: {
3298 guint32 *br [3], *label [1];
3301 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3303 sparc_cmp_imm (code, ins->sreg1, 0);
3305 sparc_branch (code, 1, sparc_bneg, 0);
3309 /* ms word must be 0 */
3310 sparc_cmp_imm (code, ins->sreg2, 0);
3312 sparc_branch (code, 1, sparc_be, 0);
3317 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3320 sparc_patch (br [0], code);
3322 /* ms word must 0xfffffff */
3323 sparc_cmp_imm (code, ins->sreg2, -1);
3325 sparc_branch (code, 1, sparc_bne, 0);
3327 sparc_patch (br [2], label [0]);
3330 sparc_patch (br [1], code);
3331 if (ins->sreg1 != ins->dreg)
3332 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3336 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3339 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3342 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3345 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3349 sparc_fnegd (code, ins->sreg1, ins->dreg);
3351 /* FIXME: why don't use fnegd ? */
3352 sparc_fnegs (code, ins->sreg1, ins->dreg);
3356 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3357 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3358 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3361 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3368 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3369 sparc_clr_reg (code, ins->dreg);
3370 switch (ins->opcode) {
3373 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3375 sparc_set (code, 1, ins->dreg);
3376 sparc_fbranch (code, 1, sparc_fbu, 2);
3378 sparc_set (code, 1, ins->dreg);
3381 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3383 sparc_set (code, 1, ins->dreg);
3389 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3392 /* clt.un + brfalse */
3394 sparc_fbranch (code, 1, sparc_fbul, 0);
3397 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3398 sparc_patch (p, (guint8*)code);
3402 /* cgt.un + brfalse */
3404 sparc_fbranch (code, 1, sparc_fbug, 0);
3407 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3408 sparc_patch (p, (guint8*)code);
3412 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3413 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3416 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3417 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3420 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3421 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3424 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3425 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3428 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3429 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3431 case CEE_CKFINITE: {
3432 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3433 if (!sparc_is_imm13 (offset))
3435 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
3436 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
3437 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3438 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3439 sparc_cmp_imm (code, sparc_o7, 2047);
3440 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3442 sparc_fmovd (code, ins->sreg1, ins->dreg);
3444 sparc_fmovs (code, ins->sreg1, ins->dreg);
3445 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3451 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3453 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3455 g_assert_not_reached ();
3458 if ((((guint8*)code) - code_start) > max_len) {
3459 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3460 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3461 g_assert_not_reached ();
3471 cfg->code_len = (guint8*)code - cfg->native_code;
3475 mono_arch_register_lowlevel_calls (void)
3477 mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
3478 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3482 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3484 MonoJumpInfo *patch_info;
3486 /* FIXME: Move part of this to arch independent code */
3487 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3488 unsigned char *ip = patch_info->ip.i + code;
3491 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3493 switch (patch_info->type) {
3494 case MONO_PATCH_INFO_NONE:
3496 case MONO_PATCH_INFO_CLASS_INIT: {
3497 guint32 *ip2 = (guint32*)ip;
3498 /* Might already been changed to a nop */
3500 sparc_set_template (ip2, sparc_o7);
3501 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3503 sparc_call_simple (ip2, 0);
3507 case MONO_PATCH_INFO_METHOD_JUMP: {
3508 guint32 *ip2 = (guint32*)ip;
3509 /* Might already been patched */
3510 sparc_set_template (ip2, sparc_o7);
3516 sparc_patch ((guint32*)ip, target);
3521 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3524 guint32 *code = (guint32*)p;
3525 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3528 /* Save registers to stack */
3529 for (i = 0; i < 6; ++i)
3530 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3532 cinfo = get_call_info (sig, FALSE);
3534 /* Save float regs on V9, since they are caller saved */
3535 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3536 ArgInfo *ainfo = cinfo->args + i;
3537 gint32 stack_offset;
3539 stack_offset = ainfo->offset + ARGS_OFFSET;
3541 if (ainfo->storage == ArgInFloatReg) {
3542 if (!sparc_is_imm13 (stack_offset))
3544 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3546 else if (ainfo->storage == ArgInDoubleReg) {
3547 /* The offset is guaranteed to be aligned by the ABI rules */
3548 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3552 sparc_set (code, cfg->method, sparc_o0);
3553 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3555 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3558 /* Restore float regs on V9 */
3559 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3560 ArgInfo *ainfo = cinfo->args + i;
3561 gint32 stack_offset;
3563 stack_offset = ainfo->offset + ARGS_OFFSET;
3565 if (ainfo->storage == ArgInFloatReg) {
3566 if (!sparc_is_imm13 (stack_offset))
3568 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3570 else if (ainfo->storage == ArgInDoubleReg) {
3571 /* The offset is guaranteed to be aligned by the ABI rules */
3572 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3590 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3592 guint32 *code = (guint32*)p;
3593 int save_mode = SAVE_NONE;
3594 MonoMethod *method = cfg->method;
3596 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
3597 case MONO_TYPE_VOID:
3598 /* special case string .ctor icall */
3599 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3600 save_mode = SAVE_ONE;
3602 save_mode = SAVE_NONE;
3607 save_mode = SAVE_ONE;
3609 save_mode = SAVE_TWO;
3614 save_mode = SAVE_FP;
3616 case MONO_TYPE_VALUETYPE:
3617 save_mode = SAVE_STRUCT;
3620 save_mode = SAVE_ONE;
3624 /* Save the result to the stack and also put it into the output registers */
3626 switch (save_mode) {
3629 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3630 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3631 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3632 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3635 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3636 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3640 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3642 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3643 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3644 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3649 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3651 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3659 sparc_set (code, cfg->method, sparc_o0);
3661 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3664 /* Restore result */
3666 switch (save_mode) {
3668 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3669 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3672 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3675 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3686 mono_arch_emit_prolog (MonoCompile *cfg)
3688 MonoMethod *method = cfg->method;
3689 MonoMethodSignature *sig;
3695 cfg->code_size = 256;
3696 cfg->native_code = g_malloc (cfg->code_size);
3697 code = (guint32*)cfg->native_code;
3699 /* FIXME: Generate intermediate code instead */
3701 offset = cfg->stack_offset;
3702 offset += (16 * sizeof (gpointer)); /* register save area */
3704 offset += 4; /* struct/union return pointer */
3707 /* add parameter area size for called functions */
3708 if (cfg->param_area < (6 * sizeof (gpointer)))
3709 /* Reserve space for the first 6 arguments even if it is unused */
3710 offset += 6 * sizeof (gpointer);
3712 offset += cfg->param_area;
3714 /* align the stack size */
3715 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3718 * localloc'd memory is stored between the local variables (whose
3719 * size is given by cfg->stack_offset), and between the space reserved
3722 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3724 cfg->stack_offset = offset;
3726 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3727 /* Perform stack touching */
3731 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3732 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3733 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3734 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3737 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3740 if (strstr (cfg->method->name, "foo")) {
3741 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3742 sparc_call_simple (code, 0);
3747 sig = mono_method_signature (method);
3749 cinfo = get_call_info (sig, FALSE);
3751 /* Keep in sync with emit_load_volatile_arguments */
3752 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3753 ArgInfo *ainfo = cinfo->args + i;
3754 gint32 stack_offset;
3756 inst = cfg->varinfo [i];
3758 if (sig->hasthis && (i == 0))
3759 arg_type = &mono_defaults.object_class->byval_arg;
3761 arg_type = sig->params [i - sig->hasthis];
3763 stack_offset = ainfo->offset + ARGS_OFFSET;
3765 /* Save the split arguments so they will reside entirely on the stack */
3766 if (ainfo->storage == ArgInSplitRegStack) {
3767 /* Save the register to the stack */
3768 g_assert (inst->opcode == OP_REGOFFSET);
3769 if (!sparc_is_imm13 (stack_offset))
3771 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3774 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3775 /* Save the argument to a dword aligned stack location */
3777 * stack_offset contains the offset of the argument on the stack.
3778 * inst->inst_offset contains the dword aligned offset where the value
3781 if (ainfo->storage == ArgInIRegPair) {
3782 if (!sparc_is_imm13 (inst->inst_offset + 4))
3784 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3785 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3788 if (ainfo->storage == ArgInSplitRegStack) {
3790 g_assert_not_reached ();
3792 if (stack_offset != inst->inst_offset) {
3793 /* stack_offset is not dword aligned, so we need to make a copy */
3794 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3795 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3796 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3800 if (ainfo->storage == ArgOnStackPair) {
3802 g_assert_not_reached ();
3804 if (stack_offset != inst->inst_offset) {
3805 /* stack_offset is not dword aligned, so we need to make a copy */
3806 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3807 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3808 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3809 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3813 g_assert_not_reached ();
3816 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
3817 /* Argument in register, but need to be saved to stack */
3818 if (!sparc_is_imm13 (stack_offset))
3820 if ((stack_offset - ARGS_OFFSET) & 0x1)
3821 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3823 if ((stack_offset - ARGS_OFFSET) & 0x2)
3824 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3826 if ((stack_offset - ARGS_OFFSET) & 0x4)
3827 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3830 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3832 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3836 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
3840 /* Argument in regpair, but need to be saved to stack */
3841 if (!sparc_is_imm13 (inst->inst_offset + 4))
3843 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3844 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3846 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
3847 if (!sparc_is_imm13 (stack_offset))
3849 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3851 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
3852 /* The offset is guaranteed to be aligned by the ABI rules */
3853 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
3856 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
3857 /* Need to move into the a double precision register */
3858 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
3861 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
3862 if (inst->opcode == OP_REGVAR)
3863 /* FIXME: Load the argument into memory */
3869 if (cfg->method->save_lmf) {
3870 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
3873 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
3874 sparc_set_template (code, sparc_o7);
3875 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
3877 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
3879 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
3881 /* FIXME: add a relocation for this */
3882 sparc_set (code, cfg->method, sparc_o7);
3883 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
3885 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3886 (gpointer)"mono_arch_get_lmf_addr");
3889 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
3892 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3893 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3895 cfg->code_len = (guint8*)code - cfg->native_code;
3897 g_assert (cfg->code_len <= cfg->code_size);
3899 return (guint8*)code;
3903 mono_arch_emit_epilog (MonoCompile *cfg)
3905 MonoMethod *method = cfg->method;
3908 int max_epilog_size = 16 + 20 * 4;
3910 if (cfg->method->save_lmf)
3911 max_epilog_size += 128;
3913 if (mono_jit_trace_calls != NULL)
3914 max_epilog_size += 50;
3916 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3917 max_epilog_size += 50;
3919 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
3920 cfg->code_size *= 2;
3921 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
3922 mono_jit_stats.code_reallocs++;
3925 code = (guint32*)(cfg->native_code + cfg->code_len);
3927 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3928 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3930 if (cfg->method->save_lmf) {
3931 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
3933 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
3937 * The V8 ABI requires that calls to functions which return a structure
3940 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
3941 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
3945 /* Only fold last instruction into the restore if the exit block has an in count of 1
3946 and the previous block hasn't been optimized away since it may have an in count > 1 */
3947 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
3950 /* Try folding last instruction into the restore */
3951 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
3952 /* or reg, imm, %i0 */
3953 int reg = sparc_inst_rs1 (code [-2]);
3954 int imm = sparc_inst_imm13 (code [-2]);
3955 code [-2] = code [-1];
3957 sparc_restore_imm (code, reg, imm, sparc_o0);
3960 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
3961 /* or reg, reg, %i0 */
3962 int reg1 = sparc_inst_rs1 (code [-2]);
3963 int reg2 = sparc_inst_rs2 (code [-2]);
3964 code [-2] = code [-1];
3966 sparc_restore (code, reg1, reg2, sparc_o0);
3969 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3971 cfg->code_len = (guint8*)code - cfg->native_code;
3973 g_assert (cfg->code_len < cfg->code_size);
3978 mono_arch_emit_exceptions (MonoCompile *cfg)
3980 MonoJumpInfo *patch_info;
3985 MonoClass *exc_classes [16];
3986 guint8 *exc_throw_start [16], *exc_throw_end [16];
3988 /* Compute needed space */
3989 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3990 if (patch_info->type == MONO_PATCH_INFO_EXC)
3995 * make sure we have enough space for exceptions
3998 code_size = exc_count * (20 * 4);
4000 code_size = exc_count * 24;
4003 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4004 cfg->code_size *= 2;
4005 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4006 mono_jit_stats.code_reallocs++;
4009 code = (guint32*)(cfg->native_code + cfg->code_len);
4011 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4012 switch (patch_info->type) {
4013 case MONO_PATCH_INFO_EXC: {
4014 MonoClass *exc_class;
4015 guint32 *buf, *buf2;
4016 guint32 throw_ip, type_idx;
4019 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4021 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4022 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4023 g_assert (exc_class);
4024 throw_ip = patch_info->ip.i;
4026 /* Find a throw sequence for the same exception class */
4027 for (i = 0; i < nthrows; ++i)
4028 if (exc_classes [i] == exc_class)
4032 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4033 if (!sparc_is_imm13 (throw_offset))
4034 sparc_set32 (code, throw_offset, sparc_o1);
4036 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4037 g_assert (sparc_is_imm22 (disp));
4038 sparc_branch (code, 0, sparc_ba, disp);
4039 if (sparc_is_imm13 (throw_offset))
4040 sparc_set32 (code, throw_offset, sparc_o1);
4043 patch_info->type = MONO_PATCH_INFO_NONE;
4046 /* Emit the template for setting o1 */
4048 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4049 /* Can use a short form */
4052 sparc_set_template (code, sparc_o1);
4056 exc_classes [nthrows] = exc_class;
4057 exc_throw_start [nthrows] = (guint8*)code;
4061 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4065 /* first arg = type token */
4066 /* Pass the type index to reduce the size of the sparc_set */
4067 if (!sparc_is_imm13 (type_idx))
4068 sparc_set32 (code, type_idx, sparc_o0);
4070 /* second arg = offset between the throw ip and the current ip */
4071 /* On sparc, the saved ip points to the call instruction */
4072 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4073 sparc_set32 (buf, disp, sparc_o1);
4078 exc_throw_end [nthrows] = (guint8*)code;
4082 patch_info->data.name = "mono_arch_throw_corlib_exception";
4083 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4084 patch_info->ip.i = (guint8*)code - cfg->native_code;
4088 if (sparc_is_imm13 (type_idx)) {
4089 /* Put it into the delay slot */
4092 sparc_set32 (code, type_idx, sparc_o0);
4093 g_assert (code - buf == 1);
4104 cfg->code_len = (guint8*)code - cfg->native_code;
4106 g_assert (cfg->code_len < cfg->code_size);
4110 gboolean lmf_addr_key_inited = FALSE;
4112 #ifdef MONO_SPARC_THR_TLS
4113 thread_key_t lmf_addr_key;
4115 pthread_key_t lmf_addr_key;
4119 mono_arch_get_lmf_addr (void)
4121 /* This is perf critical so we bypass the IO layer */
4122 /* The thr_... functions seem to be somewhat faster */
4123 #ifdef MONO_SPARC_THR_TLS
4125 thr_getspecific (lmf_addr_key, &res);
4128 return pthread_getspecific (lmf_addr_key);
4132 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4135 * There seems to be no way to determine stack boundaries under solaris,
4136 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4139 #error "--with-sigaltstack=yes not supported on solaris"
4142 setup_stack (MonoJitTlsData *tls)
4145 struct sigaltstack sa;
4150 /* Setup an alternate signal stack */
4151 tls->signal_stack = mmap (0, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
4152 tls->signal_stack_size = SIGNAL_STACK_SIZE;
4155 sa.ss_sp = tls->signal_stack;
4156 sa.ss_size = SIGNAL_STACK_SIZE;
4158 g_assert (sigaltstack (&sa, NULL) == 0);
4160 sigstk.ss_sp = tls->signal_stack;
4161 sigstk.ss_size = SIGNAL_STACK_SIZE;
4162 sigstk.ss_flags = 0;
4163 g_assert (sigaltstack (&sigstk, NULL) == 0);
4170 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4172 if (!lmf_addr_key_inited) {
4175 lmf_addr_key_inited = TRUE;
4177 #ifdef MONO_SPARC_THR_TLS
4178 res = thr_keycreate (&lmf_addr_key, NULL);
4180 res = pthread_key_create (&lmf_addr_key, NULL);
4182 g_assert (res == 0);
4186 #ifdef MONO_SPARC_THR_TLS
4187 thr_setspecific (lmf_addr_key, &tls->lmf);
4189 pthread_setspecific (lmf_addr_key, &tls->lmf);
4192 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4198 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4203 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *call, int this_reg, int this_type, int vt_reg)
4205 int this_out_reg = sparc_o0;
4210 MONO_INST_NEW (cfg, ins, OP_SETREG);
4211 ins->sreg1 = vt_reg;
4212 ins->dreg = mono_regstate_next_int (cfg->rs);
4213 mono_bblock_add_inst (cfg->cbb, ins);
4215 mono_call_inst_add_outarg_reg (call, ins->dreg, sparc_o0, FALSE);
4217 this_out_reg = sparc_o1;
4219 /* Set the 'struct/union return pointer' location on the stack */
4220 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4224 /* add the this argument */
4225 if (this_reg != -1) {
4227 MONO_INST_NEW (cfg, this, OP_SETREG);
4228 this->type = this_type;
4229 this->sreg1 = this_reg;
4230 this->dreg = mono_regstate_next_int (cfg->rs);
4231 mono_bblock_add_inst (cfg->cbb, this);
4233 mono_call_inst_add_outarg_reg (call, this->dreg, this_out_reg, FALSE);
4239 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4245 * mono_arch_get_argument_info:
4246 * @csig: a method signature
4247 * @param_count: the number of parameters to consider
4248 * @arg_info: an array to store the result infos
4250 * Gathers information on parameters such as size, alignment and
4251 * padding. arg_info should be large enought to hold param_count + 1 entries.
4253 * Returns the size of the activation frame.
4256 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4262 cinfo = get_call_info (csig, FALSE);
4264 if (csig->hasthis) {
4265 ainfo = &cinfo->args [0];
4266 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4269 for (k = 0; k < param_count; k++) {
4270 ainfo = &cinfo->args [k + csig->hasthis];
4272 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4273 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4282 mono_arch_print_tree (MonoInst *tree, int arity)
4287 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4292 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)