2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
36 #include "cpu-sparc.h"
37 #include "jit-icalls.h"
40 * Sparc V9 means two things:
41 * - the instruction set
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
89 * - struct arguments/returns
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
118 #define MONO_SPARC_THR_TLS 1
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64 = TRUE;
160 static gboolean v64 = FALSE;
163 static gpointer mono_arch_get_lmf_addr (void);
166 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
169 mono_arch_regname (int reg) {
170 static const char * rnames[] = {
171 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
172 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
173 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
174 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
175 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
176 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
177 "sparc_fp", "sparc_retadr"
179 if (reg >= 0 && reg < 32)
185 mono_arch_fregname (int reg) {
186 static const char *rnames [] = {
187 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
188 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
189 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
190 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
191 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
192 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
193 "sparc_f30", "sparc_f31"
196 if (reg >= 0 && reg < 32)
203 * Initialize the cpu to execute managed code.
206 mono_arch_cpu_init (void)
209 /* make sure sparcv9 is initialized for embedded use */
210 mono_arch_cpu_optimizazions(&dummy);
214 * Initialize architecture specific code.
217 mono_arch_init (void)
222 * Cleanup architecture specific code.
225 mono_arch_cleanup (void)
230 * This function returns the optimizations supported on this cpu.
233 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
241 if (!sysinfo (SI_ISALIST, buf, 1024))
242 g_assert_not_reached ();
244 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
245 * (in)directly implies that we're a v9 or better.
246 * Improvements to this are greatly accepted...
247 * Also, we don't differentiate between v7 and v8. I sense SIGILL
248 * sniffing in my future.
250 if (getpagesize() == 8192)
251 strcpy (buf, "sparcv9");
253 strcpy (buf, "sparcv8");
257 * On some processors, the cmov instructions are even slower than the
260 if (strstr (buf, "sparcv9")) {
261 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
265 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
271 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
272 #else /* assume Sun's compiler */
273 static void flushi(void *addr)
280 void sync_instruction_memory(caddr_t addr, int len);
284 mono_arch_flush_icache (guint8 *code, gint size)
287 /* Hopefully this is optimized based on the actual CPU */
288 sync_instruction_memory (code, size);
290 gulong start = (gulong) code;
291 gulong end = start + size;
294 /* Sparcv9 chips only need flushes on 32 byte
295 * cacheline boundaries.
297 * Sparcv8 needs a flush every 8 bytes.
299 align = (sparcv9 ? 32 : 8);
301 start &= ~(align - 1);
302 end = (end + (align - 1)) & ~(align - 1);
304 while (start < end) {
306 __asm__ __volatile__ ("iflush %0"::"r"(start));
318 * Flush all register windows to memory. Every register window is saved to
319 * a 16 word area on the stack pointed to by its %sp register.
322 mono_sparc_flushw (void)
324 static guint32 start [64];
325 static int inited = 0;
327 static void (*flushw) (void);
332 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
335 sparc_restore_simple (code);
337 g_assert ((code - start) < 64);
339 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
341 flushw = (gpointer)start;
350 mono_arch_flush_register_windows (void)
352 mono_sparc_flushw ();
356 mono_arch_is_inst_imm (gint64 imm)
358 return sparc_is_imm13 (imm);
362 mono_sparc_is_v9 (void) {
367 mono_sparc_is_sparc64 (void) {
379 ArgInFloatReg, /* V9 only */
380 ArgInDoubleReg /* V9 only */
385 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
388 guint32 vt_offset; /* for valuetypes */
406 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
408 ainfo->offset = *stack_size;
411 if (*gr >= PARAM_REGS) {
412 ainfo->storage = ArgOnStack;
415 ainfo->storage = ArgInIReg;
420 /* Allways reserve stack space for parameters passed in registers */
421 (*stack_size) += sizeof (gpointer);
424 if (*gr < PARAM_REGS - 1) {
425 /* A pair of registers */
426 ainfo->storage = ArgInIRegPair;
430 else if (*gr >= PARAM_REGS) {
431 /* A pair of stack locations */
432 ainfo->storage = ArgOnStackPair;
435 ainfo->storage = ArgInSplitRegStack;
440 (*stack_size) += 2 * sizeof (gpointer);
446 #define FLOAT_PARAM_REGS 32
449 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
451 ainfo->offset = *stack_size;
454 if (*gr >= FLOAT_PARAM_REGS) {
455 ainfo->storage = ArgOnStack;
458 /* A single is passed in an even numbered fp register */
459 ainfo->storage = ArgInFloatReg;
460 ainfo->reg = *gr + 1;
465 if (*gr < FLOAT_PARAM_REGS) {
466 /* A double register */
467 ainfo->storage = ArgInDoubleReg;
472 ainfo->storage = ArgOnStack;
476 (*stack_size) += sizeof (gpointer);
484 * Obtain information about a call according to the calling convention.
485 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
486 * document for more information.
487 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
488 * the 'Sparc Compliance Definition 2.4' document.
491 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
494 int n = sig->hasthis + sig->param_count;
495 guint32 stack_size = 0;
498 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
500 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
506 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
507 /* The address of the return value is passed in %o0 */
508 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
509 cinfo->ret.reg += sparc_i0;
515 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
517 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
520 /* Emit the signature cookie just before the implicit arguments */
521 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
524 for (i = 0; i < sig->param_count; ++i) {
525 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
528 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
531 /* Emit the signature cookie just before the implicit arguments */
532 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
535 DEBUG(printf("param %d: ", i));
536 if (sig->params [i]->byref) {
537 DEBUG(printf("byref\n"));
539 add_general (&gr, &stack_size, ainfo, FALSE);
542 ptype = mono_type_get_underlying_type (sig->params [i]);
543 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
544 switch (ptype->type) {
545 case MONO_TYPE_BOOLEAN:
548 add_general (&gr, &stack_size, ainfo, FALSE);
549 /* the value is in the ls byte */
550 ainfo->offset += sizeof (gpointer) - 1;
555 add_general (&gr, &stack_size, ainfo, FALSE);
556 /* the value is in the ls word */
557 ainfo->offset += sizeof (gpointer) - 2;
561 add_general (&gr, &stack_size, ainfo, FALSE);
562 /* the value is in the ls dword */
563 ainfo->offset += sizeof (gpointer) - 4;
568 case MONO_TYPE_FNPTR:
569 case MONO_TYPE_CLASS:
570 case MONO_TYPE_OBJECT:
571 case MONO_TYPE_STRING:
572 case MONO_TYPE_SZARRAY:
573 case MONO_TYPE_ARRAY:
574 add_general (&gr, &stack_size, ainfo, FALSE);
576 case MONO_TYPE_GENERICINST:
577 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
578 add_general (&gr, &stack_size, ainfo, FALSE);
582 case MONO_TYPE_VALUETYPE:
587 add_general (&gr, &stack_size, ainfo, FALSE);
589 case MONO_TYPE_TYPEDBYREF:
590 add_general (&gr, &stack_size, ainfo, FALSE);
595 add_general (&gr, &stack_size, ainfo, FALSE);
597 add_general (&gr, &stack_size, ainfo, TRUE);
602 add_float (&fr, &stack_size, ainfo, TRUE);
605 /* single precision values are passed in integer registers */
606 add_general (&gr, &stack_size, ainfo, FALSE);
611 add_float (&fr, &stack_size, ainfo, FALSE);
614 /* double precision values are passed in a pair of registers */
615 add_general (&gr, &stack_size, ainfo, TRUE);
619 g_assert_not_reached ();
623 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
626 /* Emit the signature cookie just before the implicit arguments */
627 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
631 ret_type = mono_type_get_underlying_type (sig->ret);
632 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
633 switch (ret_type->type) {
634 case MONO_TYPE_BOOLEAN:
645 case MONO_TYPE_FNPTR:
646 case MONO_TYPE_CLASS:
647 case MONO_TYPE_OBJECT:
648 case MONO_TYPE_SZARRAY:
649 case MONO_TYPE_ARRAY:
650 case MONO_TYPE_STRING:
651 cinfo->ret.storage = ArgInIReg;
652 cinfo->ret.reg = sparc_i0;
659 cinfo->ret.storage = ArgInIReg;
660 cinfo->ret.reg = sparc_i0;
664 cinfo->ret.storage = ArgInIRegPair;
665 cinfo->ret.reg = sparc_i0;
672 cinfo->ret.storage = ArgInFReg;
673 cinfo->ret.reg = sparc_f0;
675 case MONO_TYPE_GENERICINST:
676 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
677 cinfo->ret.storage = ArgInIReg;
678 cinfo->ret.reg = sparc_i0;
684 case MONO_TYPE_VALUETYPE:
693 cinfo->ret.storage = ArgOnStack;
695 case MONO_TYPE_TYPEDBYREF:
698 /* Same as a valuetype with size 24 */
705 cinfo->ret.storage = ArgOnStack;
710 g_error ("Can't handle as return value 0x%x", sig->ret->type);
713 cinfo->stack_usage = stack_size;
714 cinfo->reg_usage = gr;
719 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
725 * FIXME: If an argument is allocated to a register, then load it from the
726 * stack in the prolog.
729 for (i = 0; i < cfg->num_varinfo; i++) {
730 MonoInst *ins = cfg->varinfo [i];
731 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
734 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
737 /* FIXME: Make arguments on stack allocateable to registers */
738 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
741 if (mono_is_regsize_var (ins->inst_vtype)) {
742 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
743 g_assert (i == vmv->idx);
745 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
753 mono_arch_get_global_int_regs (MonoCompile *cfg)
757 MonoMethodSignature *sig;
760 sig = mono_method_signature (cfg->method);
762 cinfo = get_call_info (cfg, sig, FALSE);
764 /* Use unused input registers */
765 for (i = cinfo->reg_usage; i < 6; ++i)
766 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
768 /* Use %l0..%l6 as global registers */
769 for (i = sparc_l0; i < sparc_l7; ++i)
770 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
778 * mono_arch_regalloc_cost:
780 * Return the cost, in number of memory references, of the action of
781 * allocating the variable VMV into a register during global register
785 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
791 * Set var information according to the calling convention. sparc version.
792 * The locals var stuff should most likely be split in another method.
795 mono_arch_allocate_vars (MonoCompile *m)
797 MonoMethodSignature *sig;
798 MonoMethodHeader *header;
800 int i, offset, size, align, curinst;
803 header = mono_method_get_header (m->method);
805 sig = mono_method_signature (m->method);
807 cinfo = get_call_info (m, sig, FALSE);
809 if (sig->ret->type != MONO_TYPE_VOID) {
810 switch (cinfo->ret.storage) {
814 m->ret->opcode = OP_REGVAR;
815 m->ret->inst_c0 = cinfo->ret.reg;
819 g_assert_not_reached ();
822 m->ret->opcode = OP_REGOFFSET;
823 m->ret->inst_basereg = sparc_fp;
824 m->ret->inst_offset = 64;
830 m->ret->dreg = m->ret->inst_c0;
834 * We use the ABI calling conventions for managed code as well.
835 * Exception: valuetypes are never returned in registers on V9.
836 * FIXME: Use something more optimized.
839 /* Locals are allocated backwards from %fp */
840 m->frame_reg = sparc_fp;
844 * Reserve a stack slot for holding information used during exception
847 if (header->num_clauses)
848 offset += sizeof (gpointer) * 2;
850 if (m->method->save_lmf) {
851 offset += sizeof (MonoLMF);
852 m->arch.lmf_offset = offset;
855 curinst = m->locals_start;
856 for (i = curinst; i < m->num_varinfo; ++i) {
857 inst = m->varinfo [i];
859 if (inst->opcode == OP_REGVAR) {
860 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
864 if (inst->flags & MONO_INST_IS_DEAD)
867 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
868 * pinvoke wrappers when they call functions returning structure */
869 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
870 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
872 size = mini_type_stack_size (m->generic_sharing_context, inst->inst_vtype, &align);
875 * This is needed since structures containing doubles must be doubleword
877 * FIXME: Do this only if needed.
879 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
883 * variables are accessed as negative offsets from %fp, so increase
884 * the offset before assigning it to a variable
889 offset &= ~(align - 1);
890 inst->opcode = OP_REGOFFSET;
891 inst->inst_basereg = sparc_fp;
892 inst->inst_offset = STACK_BIAS + -offset;
894 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
897 if (sig->call_convention == MONO_CALL_VARARG) {
898 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
901 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
903 if (inst->opcode != OP_REGVAR) {
904 ArgInfo *ainfo = &cinfo->args [i];
905 gboolean inreg = TRUE;
909 if (sig->hasthis && (i == 0))
910 arg_type = &mono_defaults.object_class->byval_arg;
912 arg_type = sig->params [i - sig->hasthis];
915 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
916 || (arg_type->type == MONO_TYPE_R8)))
918 * Since float arguments are passed in integer registers, we need to
919 * save them to the stack in the prolog.
924 /* FIXME: Allocate volatile arguments to registers */
925 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
928 if (MONO_TYPE_ISSTRUCT (arg_type))
929 /* FIXME: this isn't needed */
932 inst->opcode = OP_REGOFFSET;
935 storage = ArgOnStack;
937 storage = ainfo->storage;
942 inst->opcode = OP_REGVAR;
943 inst->dreg = sparc_i0 + ainfo->reg;
948 * Since float regs are volatile, we save the arguments to
949 * the stack in the prolog.
950 * FIXME: Avoid this if the method contains no calls.
954 case ArgInSplitRegStack:
955 /* Split arguments are saved to the stack in the prolog */
956 inst->opcode = OP_REGOFFSET;
957 /* in parent frame */
958 inst->inst_basereg = sparc_fp;
959 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
961 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
963 * It is very hard to load doubles from non-doubleword aligned
964 * memory locations. So if the offset is misaligned, we copy the
965 * argument to a stack location in the prolog.
967 if ((inst->inst_offset - STACK_BIAS) % 8) {
968 inst->inst_basereg = sparc_fp;
972 offset &= ~(align - 1);
973 inst->inst_offset = STACK_BIAS + -offset;
982 if (MONO_TYPE_ISSTRUCT (arg_type)) {
983 /* Add a level of indirection */
985 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
986 * are destructively modified in a lot of places in inssel.brg.
989 MONO_INST_NEW (m, indir, 0);
991 inst->opcode = OP_SPARC_INARG_VT;
992 inst->inst_left = indir;
998 * spillvars are stored between the normal locals and the storage reserved
1002 m->stack_offset = offset;
1004 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1006 mono_spillvar_offset_float (m, 0);
1012 make_group (MonoCompile *cfg, MonoInst *left, int basereg, int offset)
1016 MONO_INST_NEW (cfg, group, OP_GROUP);
1017 group->inst_left = left;
1018 group->inst_basereg = basereg;
1019 group->inst_imm = offset;
1025 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1028 MonoMethodSignature *tmp_sig;
1032 * mono_ArgIterator_Setup assumes the signature cookie is
1033 * passed first and all the arguments which were before it are
1034 * passed on the stack after the signature. So compensate by
1035 * passing a different signature.
1037 tmp_sig = mono_metadata_signature_dup (call->signature);
1038 tmp_sig->param_count -= call->signature->sentinelpos;
1039 tmp_sig->sentinelpos = 0;
1040 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1042 /* FIXME: Add support for signature tokens to AOT */
1043 cfg->disable_aot = TRUE;
1044 /* We allways pass the signature on the stack for simplicity */
1045 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
1046 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset);
1047 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1048 sig_arg->inst_p0 = tmp_sig;
1049 arg->inst_left = sig_arg;
1050 arg->type = STACK_PTR;
1051 MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
1055 * take the arguments and generate the arch-specific
1056 * instructions to properly call the function in call.
1057 * This includes pushing, moving arguments to the right register
1061 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1063 MonoMethodSignature *sig;
1067 guint32 extra_space = 0;
1069 sig = call->signature;
1070 n = sig->param_count + sig->hasthis;
1072 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1074 for (i = 0; i < n; ++i) {
1075 ainfo = cinfo->args + i;
1077 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1078 /* Emit the signature cookie just before the first implicit argument */
1079 emit_sig_cookie (cfg, call, cinfo);
1082 if (is_virtual && i == 0) {
1083 /* the argument will be attached to the call instruction */
1084 in = call->args [i];
1086 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1087 in = call->args [i];
1088 arg->cil_code = in->cil_code;
1089 arg->inst_left = in;
1090 arg->type = in->type;
1091 MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
1093 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1096 guint32 offset, pad;
1104 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1105 size = sizeof (MonoTypedRef);
1106 align = sizeof (gpointer);
1110 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1113 * Can't use mini_type_stack_size (), but that
1114 * aligns the size to sizeof (gpointer), which is larger
1115 * than the size of the source, leading to reads of invalid
1116 * memory if the source is at the end of address space or
1119 size = mono_class_value_size (in->klass, &align);
1123 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1124 * use the normal OUTARG opcodes to pass the address of the location to
1127 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1128 inst->inst_left = in;
1130 /* The first 6 argument locations are reserved */
1131 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1132 cinfo->stack_usage = 6 * sizeof (gpointer);
1134 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1135 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1137 inst->inst_c1 = STACK_BIAS + offset;
1138 inst->backend.size = size;
1139 arg->inst_left = inst;
1141 cinfo->stack_usage += size;
1142 cinfo->stack_usage += pad;
1145 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + ainfo->offset);
1147 switch (ainfo->storage) {
1151 if (ainfo->storage == ArgInIRegPair)
1152 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1153 arg->backend.reg3 = sparc_o0 + ainfo->reg;
1154 call->used_iregs |= 1 << ainfo->reg;
1156 if ((i >= sig->hasthis) && !sig->params [i - sig->hasthis]->byref && ((sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) || (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4))) {
1157 /* An fp value is passed in an ireg */
1159 if (arg->opcode == OP_SPARC_OUTARG_REGPAIR)
1160 arg->opcode = OP_SPARC_OUTARG_REGPAIR_FLOAT;
1162 arg->opcode = OP_SPARC_OUTARG_FLOAT;
1165 * The OUTARG (freg) implementation needs an extra dword to store
1166 * the temporary value.
1172 arg->opcode = OP_SPARC_OUTARG_MEM;
1174 case ArgOnStackPair:
1175 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1177 case ArgInSplitRegStack:
1178 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1179 arg->backend.reg3 = sparc_o0 + ainfo->reg;
1180 call->used_iregs |= 1 << ainfo->reg;
1183 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1184 arg->backend.reg3 = sparc_f0 + ainfo->reg;
1186 case ArgInDoubleReg:
1187 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1188 arg->backend.reg3 = sparc_f0 + ainfo->reg;
1196 /* Handle the case where there are no implicit arguments */
1197 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1198 emit_sig_cookie (cfg, call, cinfo);
1201 call->stack_usage = cinfo->stack_usage + extra_space;
1202 call->out_ireg_args = NULL;
1203 call->out_freg_args = NULL;
1204 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1205 cfg->flags |= MONO_CFG_HAS_CALLS;
1211 /* Map opcode to the sparc condition codes */
1212 static inline SparcCond
1213 opcode_to_sparc_cond (int opcode)
1235 case OP_COND_EXC_EQ:
1238 case OP_COND_EXC_NE_UN:
1245 case OP_COND_EXC_LT:
1251 case OP_COND_EXC_LT_UN:
1257 case OP_COND_EXC_GT:
1263 case OP_COND_EXC_GT_UN:
1267 case OP_COND_EXC_GE:
1271 case OP_COND_EXC_GE_UN:
1275 case OP_COND_EXC_LE:
1279 case OP_COND_EXC_LE_UN:
1281 case OP_COND_EXC_OV:
1282 case OP_COND_EXC_IOV:
1285 case OP_COND_EXC_IC:
1287 case OP_COND_EXC_NO:
1288 case OP_COND_EXC_NC:
1291 g_assert_not_reached ();
1296 #define COMPUTE_DISP(ins) \
1297 if (ins->flags & MONO_INST_BRLABEL) { \
1298 if (ins->inst_i0->inst_c0) \
1299 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1302 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1305 if (ins->inst_true_bb->native_offset) \
1306 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1309 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1314 #define DEFAULT_ICC sparc_xcc_short
1316 #define DEFAULT_ICC sparc_icc_short
1320 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1324 COMPUTE_DISP(ins); \
1325 predict = (disp != 0) ? 1 : 0; \
1326 g_assert (sparc_is_imm19 (disp)); \
1327 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1328 if (filldelay) sparc_nop (code); \
1330 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1331 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1335 COMPUTE_DISP(ins); \
1336 predict = (disp != 0) ? 1 : 0; \
1337 g_assert (sparc_is_imm19 (disp)); \
1338 sparc_fbranch (code, (annul), cond, disp); \
1339 if (filldelay) sparc_nop (code); \
1342 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1343 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1346 COMPUTE_DISP(ins); \
1347 g_assert (sparc_is_imm22 (disp)); \
1348 sparc_ ## bop (code, (annul), cond, disp); \
1349 if (filldelay) sparc_nop (code); \
1351 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1352 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1355 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1359 COMPUTE_DISP(ins); \
1360 predict = (disp != 0) ? 1 : 0; \
1361 g_assert (sparc_is_imm19 (disp)); \
1362 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1363 if (filldelay) sparc_nop (code); \
1366 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1369 COMPUTE_DISP(ins); \
1370 g_assert (sparc_is_imm22 (disp)); \
1371 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1372 if (filldelay) sparc_nop (code); \
1375 /* emit an exception if condition is fail */
1377 * We put the exception throwing code out-of-line, at the end of the method
1379 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1380 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1381 MONO_PATCH_INFO_EXC, sexc_name); \
1382 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1383 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1386 sparc_branch (code, 0, cond, 0); \
1388 if (filldelay) sparc_nop (code); \
1391 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1393 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1394 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1395 MONO_PATCH_INFO_EXC, sexc_name); \
1396 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1400 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1401 if (sparc_is_imm13 ((ins)->inst_imm)) \
1402 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1404 sparc_set (code, ins->inst_imm, sparc_o7); \
1405 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1409 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1410 if (sparc_is_imm13 (ins->inst_offset)) \
1411 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1413 sparc_set (code, ins->inst_offset, sparc_o7); \
1414 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1419 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1421 if (ins->inst_imm == 0) \
1424 sparc_set (code, ins->inst_imm, sparc_o7); \
1427 if (!sparc_is_imm13 (ins->inst_offset)) { \
1428 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1429 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1432 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1435 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1436 if (!sparc_is_imm13 (ins->inst_offset)) { \
1437 sparc_set (code, ins->inst_offset, sparc_o7); \
1438 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1441 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1444 #define EMIT_CALL() do { \
1446 sparc_set_template (code, sparc_o7); \
1447 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1450 sparc_call_simple (code, 0); \
1456 * A call template is 7 instructions long, so we want to avoid it if possible.
1459 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1463 /* FIXME: This only works if the target method is already compiled */
1464 if (0 && v64 && !cfg->compile_aot) {
1465 MonoJumpInfo patch_info;
1467 patch_info.type = patch_type;
1468 patch_info.data.target = data;
1470 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1472 /* FIXME: Add optimizations if the target is close enough */
1473 sparc_set (code, target, sparc_o7);
1474 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1478 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1486 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1490 MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (ins, n, &bb->ins_list, node) {
1491 MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
1493 switch (ins->opcode) {
1495 /* remove unnecessary multiplication with 1 */
1496 if (ins->inst_imm == 1) {
1497 if (ins->dreg != ins->sreg1) {
1498 ins->opcode = OP_MOVE;
1506 case OP_LOAD_MEMBASE:
1507 case OP_LOADI4_MEMBASE:
1509 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1510 * OP_LOAD_MEMBASE offset(basereg), reg
1512 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1513 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1514 ins->inst_basereg == last_ins->inst_destbasereg &&
1515 ins->inst_offset == last_ins->inst_offset) {
1516 if (ins->dreg == last_ins->sreg1) {
1520 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1521 ins->opcode = OP_MOVE;
1522 ins->sreg1 = last_ins->sreg1;
1526 * Note: reg1 must be different from the basereg in the second load
1527 * OP_LOAD_MEMBASE offset(basereg), reg1
1528 * OP_LOAD_MEMBASE offset(basereg), reg2
1530 * OP_LOAD_MEMBASE offset(basereg), reg1
1531 * OP_MOVE reg1, reg2
1533 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1534 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1535 ins->inst_basereg != last_ins->dreg &&
1536 ins->inst_basereg == last_ins->inst_basereg &&
1537 ins->inst_offset == last_ins->inst_offset) {
1539 if (ins->dreg == last_ins->dreg) {
1543 ins->opcode = OP_MOVE;
1544 ins->sreg1 = last_ins->dreg;
1547 //g_assert_not_reached ();
1551 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1552 * OP_LOAD_MEMBASE offset(basereg), reg
1554 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1555 * OP_ICONST reg, imm
1557 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1558 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1559 ins->inst_basereg == last_ins->inst_destbasereg &&
1560 ins->inst_offset == last_ins->inst_offset) {
1561 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1562 ins->opcode = OP_ICONST;
1563 ins->inst_c0 = last_ins->inst_imm;
1564 g_assert_not_reached (); // check this rule
1569 case OP_LOADI1_MEMBASE:
1570 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1571 ins->inst_basereg == last_ins->inst_destbasereg &&
1572 ins->inst_offset == last_ins->inst_offset) {
1573 if (ins->dreg == last_ins->sreg1) {
1577 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1578 ins->opcode = OP_MOVE;
1579 ins->sreg1 = last_ins->sreg1;
1583 case OP_LOADI2_MEMBASE:
1584 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1585 ins->inst_basereg == last_ins->inst_destbasereg &&
1586 ins->inst_offset == last_ins->inst_offset) {
1587 if (ins->dreg == last_ins->sreg1) {
1591 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1592 ins->opcode = OP_MOVE;
1593 ins->sreg1 = last_ins->sreg1;
1597 case OP_STOREI4_MEMBASE_IMM:
1598 /* Convert pairs of 0 stores to a dword 0 store */
1599 /* Used when initializing temporaries */
1600 /* We know sparc_fp is dword aligned */
1601 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1602 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1603 (ins->inst_destbasereg == sparc_fp) &&
1604 (ins->inst_offset < 0) &&
1605 ((ins->inst_offset % 8) == 0) &&
1606 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1607 (ins->inst_imm == 0) &&
1608 (last_ins->inst_imm == 0)) {
1610 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1611 last_ins->inst_offset = ins->inst_offset;
1623 case OP_COND_EXC_EQ:
1624 case OP_COND_EXC_GE:
1625 case OP_COND_EXC_GT:
1626 case OP_COND_EXC_LE:
1627 case OP_COND_EXC_LT:
1628 case OP_COND_EXC_NE_UN:
1630 * Convert compare with zero+branch to BRcc
1633 * This only works in 64 bit mode, since it examines all 64
1634 * bits of the register.
1635 * Only do this if the method is small since BPr only has a 16bit
1638 if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
1639 (last_ins->opcode == OP_COMPARE_IMM) &&
1640 (last_ins->inst_imm == 0)) {
1641 switch (ins->opcode) {
1643 ins->opcode = OP_SPARC_BRZ;
1646 ins->opcode = OP_SPARC_BRNZ;
1649 ins->opcode = OP_SPARC_BRLZ;
1652 ins->opcode = OP_SPARC_BRGZ;
1655 ins->opcode = OP_SPARC_BRGEZ;
1658 ins->opcode = OP_SPARC_BRLEZ;
1660 case OP_COND_EXC_EQ:
1661 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1663 case OP_COND_EXC_GE:
1664 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1666 case OP_COND_EXC_GT:
1667 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1669 case OP_COND_EXC_LE:
1670 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1672 case OP_COND_EXC_LT:
1673 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1675 case OP_COND_EXC_NE_UN:
1676 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1679 g_assert_not_reached ();
1681 last_ins->data = ins->data;
1682 last_ins->opcode = ins->opcode;
1683 last_ins->type = ins->type;
1684 last_ins->ssa_op = ins->ssa_op;
1685 last_ins->flags = ins->flags;
1686 last_ins->dreg = ins->dreg;
1687 last_ins->sreg2 = ins->sreg2;
1688 last_ins->backend = ins->backend;
1689 last_ins->klass = ins->klass;
1690 last_ins->cil_code = ins->cil_code;
1701 if (ins->dreg == ins->sreg1) {
1706 * OP_MOVE sreg, dreg
1707 * OP_MOVE dreg, sreg
1709 if (last_ins && last_ins->opcode == OP_MOVE &&
1710 ins->sreg1 == last_ins->dreg &&
1711 ins->dreg == last_ins->sreg1) {
1721 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1723 MonoSpillInfo **si, *info;
1725 g_assert (spillvar == 0);
1727 si = &cfg->spill_info_float;
1730 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1731 cfg->stack_offset += sizeof (double);
1732 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1733 info->offset = - cfg->stack_offset;
1736 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1739 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1742 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1744 mono_local_regalloc (cfg, bb);
1748 sparc_patch (guint32 *code, const gpointer target)
1751 guint32 ins = *code;
1752 guint32 op = ins >> 30;
1753 guint32 op2 = (ins >> 22) & 0x7;
1754 guint32 rd = (ins >> 25) & 0x1f;
1755 guint8* target8 = (guint8*)target;
1756 gint64 disp = (target8 - (guint8*)code) >> 2;
1759 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1761 if ((op == 0) && (op2 == 2)) {
1762 if (!sparc_is_imm22 (disp))
1765 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1767 else if ((op == 0) && (op2 == 1)) {
1768 if (!sparc_is_imm19 (disp))
1771 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1773 else if ((op == 0) && (op2 == 3)) {
1774 if (!sparc_is_imm16 (disp))
1777 *code &= ~(0x180000 | 0x3fff);
1778 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1780 else if ((op == 0) && (op2 == 6)) {
1781 if (!sparc_is_imm22 (disp))
1784 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1786 else if ((op == 0) && (op2 == 4)) {
1787 guint32 ins2 = code [1];
1789 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1790 /* sethi followed by or */
1792 sparc_set (p, target8, rd);
1793 while (p <= (code + 1))
1796 else if (ins2 == 0x01000000) {
1797 /* sethi followed by nop */
1799 sparc_set (p, target8, rd);
1800 while (p <= (code + 1))
1803 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1804 /* sethi followed by load/store */
1806 guint32 t = (guint32)target8;
1807 *code &= ~(0x3fffff);
1809 *(code + 1) &= ~(0x3ff);
1810 *(code + 1) |= (t & 0x3ff);
1814 (sparc_inst_rd (ins) == sparc_g1) &&
1815 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1816 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1817 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1821 reg = sparc_inst_rd (c [1]);
1822 sparc_set (p, target8, reg);
1826 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1827 (sparc_inst_imm (ins2))) {
1828 /* sethi followed by jmpl */
1830 guint32 t = (guint32)target8;
1831 *code &= ~(0x3fffff);
1833 *(code + 1) &= ~(0x3ff);
1834 *(code + 1) |= (t & 0x3ff);
1840 else if (op == 01) {
1841 gint64 disp = (target8 - (guint8*)code) >> 2;
1843 if (!sparc_is_imm30 (disp))
1845 sparc_call_simple (code, target8 - (guint8*)code);
1847 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1849 g_assert (sparc_is_imm13 (target8));
1851 *code |= (guint32)target8;
1853 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1854 /* sparc_set case 5. */
1858 reg = sparc_inst_rd (c [3]);
1859 sparc_set (p, target, reg);
1866 // g_print ("patched with 0x%08x\n", ins);
1870 * mono_sparc_emit_save_lmf:
1872 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1873 * trampolines as well.
1876 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1879 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1880 /* Save previous_lmf */
1881 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1882 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1884 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1885 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1891 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1893 /* Load previous_lmf */
1894 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
1896 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
1897 /* *(lmf) = previous_lmf */
1898 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
1903 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
1906 * Since register windows are saved to the current value of %sp, we need to
1907 * set the sp field in the lmf before the call, not in the prolog.
1909 if (cfg->method->save_lmf) {
1910 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
1913 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
1920 emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
1922 MonoCallInst *call = (MonoCallInst*)ins;
1926 * The sparc ABI requires that calls to functions which return a structure
1927 * contain an additional unimpl instruction which is checked by the callee.
1929 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
1930 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
1931 size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
1933 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
1934 sparc_unimp (code, size & 0xfff);
1941 emit_move_return_value (MonoInst *ins, guint32 *code)
1943 /* Move return value to the target register */
1944 /* FIXME: do more things in the local reg allocator */
1945 switch (ins->opcode) {
1947 case OP_VOIDCALL_REG:
1948 case OP_VOIDCALL_MEMBASE:
1952 case OP_CALL_MEMBASE:
1953 g_assert (ins->dreg == sparc_o0);
1957 case OP_LCALL_MEMBASE:
1959 * ins->dreg is the least significant reg due to the lreg: LCALL rule
1960 * in inssel-long32.brg.
1963 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
1965 g_assert (ins->dreg == sparc_o1);
1970 case OP_FCALL_MEMBASE:
1972 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
1973 sparc_fmovs (code, sparc_f0, ins->dreg);
1974 sparc_fstod (code, ins->dreg, ins->dreg);
1977 sparc_fmovd (code, sparc_f0, ins->dreg);
1979 sparc_fmovs (code, sparc_f0, ins->dreg);
1980 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
1981 sparc_fstod (code, ins->dreg, ins->dreg);
1983 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
1988 case OP_VCALL_MEMBASE:
1998 * emit_load_volatile_arguments:
2000 * Load volatile arguments from the stack to the original input registers.
2001 * Required before a tail call.
2004 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2006 MonoMethod *method = cfg->method;
2007 MonoMethodSignature *sig;
2012 /* FIXME: Generate intermediate code instead */
2014 sig = mono_method_signature (method);
2016 cinfo = get_call_info (cfg, sig, FALSE);
2018 /* This is the opposite of the code in emit_prolog */
2020 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2021 ArgInfo *ainfo = cinfo->args + i;
2022 gint32 stack_offset;
2024 inst = cfg->args [i];
2026 if (sig->hasthis && (i == 0))
2027 arg_type = &mono_defaults.object_class->byval_arg;
2029 arg_type = sig->params [i - sig->hasthis];
2031 stack_offset = ainfo->offset + ARGS_OFFSET;
2032 ireg = sparc_i0 + ainfo->reg;
2034 if (ainfo->storage == ArgInSplitRegStack) {
2035 g_assert (inst->opcode == OP_REGOFFSET);
2037 if (!sparc_is_imm13 (stack_offset))
2039 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2042 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2043 if (ainfo->storage == ArgInIRegPair) {
2044 if (!sparc_is_imm13 (inst->inst_offset + 4))
2046 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2047 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2050 if (ainfo->storage == ArgInSplitRegStack) {
2051 if (stack_offset != inst->inst_offset) {
2052 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2053 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2054 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2059 if (ainfo->storage == ArgOnStackPair) {
2060 if (stack_offset != inst->inst_offset) {
2061 /* stack_offset is not dword aligned, so we need to make a copy */
2062 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2063 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2065 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2066 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2071 g_assert_not_reached ();
2074 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2075 /* Argument in register, but need to be saved to stack */
2076 if (!sparc_is_imm13 (stack_offset))
2078 if ((stack_offset - ARGS_OFFSET) & 0x1)
2079 /* FIXME: Is this ldsb or ldub ? */
2080 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2082 if ((stack_offset - ARGS_OFFSET) & 0x2)
2083 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2085 if ((stack_offset - ARGS_OFFSET) & 0x4)
2086 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2089 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2091 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2094 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2095 /* Argument in regpair, but need to be saved to stack */
2096 if (!sparc_is_imm13 (inst->inst_offset + 4))
2098 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2099 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2101 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2104 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2108 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2109 if (inst->opcode == OP_REGVAR)
2110 /* FIXME: Load the argument into memory */
2120 * mono_sparc_is_virtual_call:
2122 * Determine whenever the instruction at CODE is a virtual call.
2125 mono_sparc_is_virtual_call (guint32 *code)
2132 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2134 * Register indirect call. If it is a virtual call, then the
2135 * instruction in the delay slot is a special kind of nop.
2138 /* Construct special nop */
2139 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2142 if (code [1] == p [0])
2150 * mono_arch_get_vcall_slot:
2152 * Determine the vtable slot used by a virtual call.
2155 mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
2157 guint32 *code = (guint32*)(gpointer)code8;
2158 guint32 ins = code [0];
2159 guint32 prev_ins = code [-1];
2161 mono_sparc_flushw ();
2165 if (!mono_sparc_is_virtual_call (code))
2168 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2169 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2170 /* ld [r1 + CONST ], r2; call r2 */
2171 guint32 base = sparc_inst_rs1 (prev_ins);
2172 gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
2175 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2177 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2179 base_val = regs [base];
2181 *displacement = disp;
2183 return (gpointer)base_val;
2185 else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
2186 /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
2187 /* Decode a sparc_set32 */
2188 guint32 base = sparc_inst_rs1 (prev_ins);
2191 guint32 s1 = code [-3];
2192 guint32 s2 = code [-2];
2199 g_assert (sparc_inst_op (s1) == 0);
2200 g_assert (sparc_inst_op2 (s1) == 4);
2203 g_assert (sparc_inst_op (s2) == 2);
2204 g_assert (sparc_inst_op3 (s2) == 2);
2205 g_assert (sparc_inst_i (s2) == 1);
2206 g_assert (sparc_inst_rs1 (s2) == sparc_inst_rd (s2));
2207 g_assert (sparc_inst_rd (s1) == sparc_inst_rs1 (s2));
2209 disp = ((s1 & 0x3fffff) << 10) | sparc_inst_imm13 (s2);
2211 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2213 base_val = regs [base];
2215 *displacement = disp;
2217 return (gpointer)base_val;
2219 g_assert_not_reached ();
2222 g_assert_not_reached ();
2228 mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
2232 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
2235 return (gpointer*)((char*)vt + displacement);
2239 #define BR_SMALL_SIZE 2
2240 #define BR_LARGE_SIZE 2
2241 #define JUMP_IMM_SIZE 5
2242 #define ENABLE_WRONG_METHOD_CHECK 0
2245 * LOCKING: called with the domain lock held
2248 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
2252 guint32 *code, *start;
2254 for (i = 0; i < count; ++i) {
2255 MonoIMTCheckItem *item = imt_entries [i];
2256 if (item->is_equals) {
2257 if (item->check_target_idx) {
2258 if (!item->compare_done)
2259 item->chunk_size += CMP_SIZE;
2260 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2262 item->chunk_size += JUMP_IMM_SIZE;
2263 #if ENABLE_WRONG_METHOD_CHECK
2264 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2268 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2269 imt_entries [item->check_target_idx]->compare_done = TRUE;
2271 size += item->chunk_size;
2273 code = mono_code_manager_reserve (domain->code_mp, size * 4);
2276 for (i = 0; i < count; ++i) {
2277 MonoIMTCheckItem *item = imt_entries [i];
2278 item->code_target = (guint8*)code;
2279 if (item->is_equals) {
2280 if (item->check_target_idx) {
2281 if (!item->compare_done) {
2282 sparc_set (code, (guint32)item->method, sparc_g5);
2283 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2285 item->jmp_code = (guint8*)code;
2286 sparc_branch (code, 0, sparc_bne, 0);
2288 sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
2289 sparc_ld (code, sparc_g5, 0, sparc_g5);
2290 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2293 /* enable the commented code to assert on wrong method */
2294 #if ENABLE_WRONG_METHOD_CHECK
2295 g_assert_not_reached ();
2297 sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
2298 sparc_ld (code, sparc_g5, 0, sparc_g5);
2299 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2301 #if ENABLE_WRONG_METHOD_CHECK
2302 g_assert_not_reached ();
2306 sparc_set (code, (guint32)item->method, sparc_g5);
2307 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2308 item->jmp_code = (guint8*)code;
2309 sparc_branch (code, 0, sparc_beu, 0);
2313 /* patch the branches to get to the target items */
2314 for (i = 0; i < count; ++i) {
2315 MonoIMTCheckItem *item = imt_entries [i];
2316 if (item->jmp_code) {
2317 if (item->check_target_idx) {
2318 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2323 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2325 mono_stats.imt_thunks_size += (code - start) * 4;
2326 g_assert (code - start <= size);
2331 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
2334 g_assert_not_reached ();
2337 return (MonoMethod*)regs [sparc_g1];
2341 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method)
2343 mono_sparc_flushw ();
2345 return (gpointer)regs [sparc_o0];
2349 * Some conventions used in the following code.
2350 * 2) The only scratch registers we have are o7 and g1. We try to
2351 * stick to o7 when we can, and use g1 when necessary.
2355 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2360 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2364 if (cfg->opt & MONO_OPT_PEEPHOLE)
2365 peephole_pass (cfg, bb);
2367 if (cfg->verbose_level > 2)
2368 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2370 cpos = bb->max_offset;
2372 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2376 MONO_BB_FOR_EACH_INS (bb, ins) {
2379 offset = (guint8*)code - cfg->native_code;
2381 spec = ins_get_spec (ins->opcode);
2383 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2385 if (offset > (cfg->code_size - max_len - 16)) {
2386 cfg->code_size *= 2;
2387 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2388 code = (guint32*)(cfg->native_code + offset);
2390 code_start = (guint8*)code;
2391 // if (ins->cil_code)
2392 // g_print ("cil code\n");
2393 mono_debug_record_line_number (cfg, ins, offset);
2395 switch (ins->opcode) {
2396 case OP_STOREI1_MEMBASE_IMM:
2397 EMIT_STORE_MEMBASE_IMM (ins, stb);
2399 case OP_STOREI2_MEMBASE_IMM:
2400 EMIT_STORE_MEMBASE_IMM (ins, sth);
2402 case OP_STORE_MEMBASE_IMM:
2403 EMIT_STORE_MEMBASE_IMM (ins, sti);
2405 case OP_STOREI4_MEMBASE_IMM:
2406 EMIT_STORE_MEMBASE_IMM (ins, st);
2408 case OP_STOREI8_MEMBASE_IMM:
2410 EMIT_STORE_MEMBASE_IMM (ins, stx);
2412 /* Only generated by peephole opts */
2413 g_assert ((ins->inst_offset % 8) == 0);
2414 g_assert (ins->inst_imm == 0);
2415 EMIT_STORE_MEMBASE_IMM (ins, stx);
2418 case OP_STOREI1_MEMBASE_REG:
2419 EMIT_STORE_MEMBASE_REG (ins, stb);
2421 case OP_STOREI2_MEMBASE_REG:
2422 EMIT_STORE_MEMBASE_REG (ins, sth);
2424 case OP_STOREI4_MEMBASE_REG:
2425 EMIT_STORE_MEMBASE_REG (ins, st);
2427 case OP_STOREI8_MEMBASE_REG:
2429 EMIT_STORE_MEMBASE_REG (ins, stx);
2431 /* Only used by OP_MEMSET */
2432 EMIT_STORE_MEMBASE_REG (ins, std);
2435 case OP_STORE_MEMBASE_REG:
2436 EMIT_STORE_MEMBASE_REG (ins, sti);
2440 sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
2442 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2447 sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
2449 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2453 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
2456 sparc_set (code, ins->inst_c0, ins->dreg);
2457 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2459 case OP_LOADI4_MEMBASE:
2461 EMIT_LOAD_MEMBASE (ins, ldsw);
2463 EMIT_LOAD_MEMBASE (ins, ld);
2466 case OP_LOADU4_MEMBASE:
2467 EMIT_LOAD_MEMBASE (ins, ld);
2469 case OP_LOADU1_MEMBASE:
2470 EMIT_LOAD_MEMBASE (ins, ldub);
2472 case OP_LOADI1_MEMBASE:
2473 EMIT_LOAD_MEMBASE (ins, ldsb);
2475 case OP_LOADU2_MEMBASE:
2476 EMIT_LOAD_MEMBASE (ins, lduh);
2478 case OP_LOADI2_MEMBASE:
2479 EMIT_LOAD_MEMBASE (ins, ldsh);
2481 case OP_LOAD_MEMBASE:
2483 EMIT_LOAD_MEMBASE (ins, ldx);
2485 EMIT_LOAD_MEMBASE (ins, ld);
2489 case OP_LOADI8_MEMBASE:
2490 EMIT_LOAD_MEMBASE (ins, ldx);
2494 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2495 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2498 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2499 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2502 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2505 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2506 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2508 case CEE_CONV_OVF_U4:
2509 /* Only used on V9 */
2510 sparc_cmp_imm (code, ins->sreg1, 0);
2511 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2512 MONO_PATCH_INFO_EXC, "OverflowException");
2513 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2515 sparc_set (code, 1, sparc_o7);
2516 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2517 sparc_cmp (code, ins->sreg1, sparc_o7);
2518 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2519 MONO_PATCH_INFO_EXC, "OverflowException");
2520 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2522 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2524 case CEE_CONV_OVF_I4_UN:
2525 /* Only used on V9 */
2530 /* Only used on V9 */
2531 sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
2535 /* Only used on V9 */
2536 sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
2541 sparc_cmp (code, ins->sreg1, ins->sreg2);
2543 case OP_COMPARE_IMM:
2544 case OP_ICOMPARE_IMM:
2545 if (sparc_is_imm13 (ins->inst_imm))
2546 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2548 sparc_set (code, ins->inst_imm, sparc_o7);
2549 sparc_cmp (code, ins->sreg1, sparc_o7);
2554 * gdb does not like encountering 'ta 1' in the debugged code. So
2555 * instead of emitting a trap, we emit a call a C function and place a
2558 //sparc_ta (code, 1);
2559 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2564 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2568 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2573 /* according to inssel-long32.brg, this should set cc */
2574 EMIT_ALU_IMM (ins, add, TRUE);
2578 /* according to inssel-long32.brg, this should set cc */
2579 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2583 EMIT_ALU_IMM (ins, addx, TRUE);
2587 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2591 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2596 /* according to inssel-long32.brg, this should set cc */
2597 EMIT_ALU_IMM (ins, sub, TRUE);
2601 /* according to inssel-long32.brg, this should set cc */
2602 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2606 EMIT_ALU_IMM (ins, subx, TRUE);
2610 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2614 EMIT_ALU_IMM (ins, and, FALSE);
2618 /* Sign extend sreg1 into %y */
2619 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2620 sparc_wry (code, sparc_o7, sparc_g0);
2621 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2622 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2626 sparc_wry (code, sparc_g0, sparc_g0);
2627 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2632 /* Transform division into a shift */
2633 for (i = 1; i < 30; ++i) {
2635 if (ins->inst_imm == imm)
2641 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2642 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2643 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2646 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2647 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2648 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2649 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2650 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2654 /* Sign extend sreg1 into %y */
2655 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2656 sparc_wry (code, sparc_o7, sparc_g0);
2657 EMIT_ALU_IMM (ins, sdiv, TRUE);
2658 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2664 /* Sign extend sreg1 into %y */
2665 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2666 sparc_wry (code, sparc_o7, sparc_g0);
2667 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2668 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2669 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2670 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2674 sparc_wry (code, sparc_g0, sparc_g0);
2675 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2676 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2677 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2681 /* Sign extend sreg1 into %y */
2682 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2683 sparc_wry (code, sparc_o7, sparc_g0);
2684 if (!sparc_is_imm13 (ins->inst_imm)) {
2685 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2686 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2687 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2688 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2691 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2692 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2693 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2695 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2699 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2703 EMIT_ALU_IMM (ins, or, FALSE);
2707 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2711 EMIT_ALU_IMM (ins, xor, FALSE);
2715 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2719 if (ins->inst_imm < (1 << 5))
2720 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2722 sparc_set (code, ins->inst_imm, sparc_o7);
2723 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2728 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2732 if (ins->inst_imm < (1 << 5))
2733 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2735 sparc_set (code, ins->inst_imm, sparc_o7);
2736 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2740 case OP_ISHR_UN_IMM:
2741 if (ins->inst_imm < (1 << 5))
2742 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2744 sparc_set (code, ins->inst_imm, sparc_o7);
2745 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2750 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2753 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2756 if (ins->inst_imm < (1 << 6))
2757 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2759 sparc_set (code, ins->inst_imm, sparc_o7);
2760 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2764 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2767 if (ins->inst_imm < (1 << 6))
2768 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2770 sparc_set (code, ins->inst_imm, sparc_o7);
2771 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2775 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2777 case OP_LSHR_UN_IMM:
2778 if (ins->inst_imm < (1 << 6))
2779 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2781 sparc_set (code, ins->inst_imm, sparc_o7);
2782 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2787 /* can't use sparc_not */
2788 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2792 /* can't use sparc_neg */
2793 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2797 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2803 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2806 /* Transform multiplication into a shift */
2807 for (i = 0; i < 30; ++i) {
2809 if (ins->inst_imm == imm)
2813 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2815 EMIT_ALU_IMM (ins, smul, FALSE);
2820 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2821 sparc_rdy (code, sparc_g1);
2822 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2823 sparc_cmp (code, sparc_g1, sparc_o7);
2824 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2826 case CEE_MUL_OVF_UN:
2827 case OP_IMUL_OVF_UN:
2828 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2829 sparc_rdy (code, sparc_o7);
2830 sparc_cmp (code, sparc_o7, sparc_g0);
2831 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2834 sparc_set (code, ins->inst_c0, ins->dreg);
2837 sparc_set (code, ins->inst_l, ins->dreg);
2840 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2841 sparc_set_template (code, ins->dreg);
2846 if (ins->sreg1 != ins->dreg)
2847 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2850 /* Only used on V9 */
2851 if (ins->sreg1 != ins->dreg)
2852 sparc_fmovd (code, ins->sreg1, ins->dreg);
2854 case OP_SPARC_SETFREG_FLOAT:
2855 /* Only used on V9 */
2856 sparc_fdtos (code, ins->sreg1, ins->dreg);
2859 if (cfg->method->save_lmf)
2862 code = emit_load_volatile_arguments (cfg, code);
2863 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2864 sparc_set_template (code, sparc_o7);
2865 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2866 /* Restore parent frame in delay slot */
2867 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2870 /* ensure ins->sreg1 is not NULL */
2871 /* Might be misaligned in case of vtypes so use a byte load */
2872 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2875 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2876 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2883 call = (MonoCallInst*)ins;
2884 g_assert (!call->virtual);
2885 code = emit_save_sp_to_lmf (cfg, code);
2886 if (ins->flags & MONO_INST_HAS_METHOD)
2887 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2889 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2891 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2892 code = emit_move_return_value (ins, code);
2897 case OP_VOIDCALL_REG:
2899 call = (MonoCallInst*)ins;
2900 code = emit_save_sp_to_lmf (cfg, code);
2901 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2903 * We emit a special kind of nop in the delay slot to tell the
2904 * trampoline code that this is a virtual call, thus an unbox
2905 * trampoline might need to be called.
2908 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2912 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2913 code = emit_move_return_value (ins, code);
2915 case OP_FCALL_MEMBASE:
2916 case OP_LCALL_MEMBASE:
2917 case OP_VCALL_MEMBASE:
2918 case OP_VOIDCALL_MEMBASE:
2919 case OP_CALL_MEMBASE:
2920 call = (MonoCallInst*)ins;
2921 code = emit_save_sp_to_lmf (cfg, code);
2922 if (sparc_is_imm13 (ins->inst_offset)) {
2923 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2925 sparc_set (code, ins->inst_offset, sparc_o7);
2926 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2928 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2930 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2934 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
2935 code = emit_move_return_value (ins, code);
2938 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2939 sparc_fdtos (code, ins->sreg1, sparc_f0);
2942 sparc_fmovd (code, ins->sreg1, ins->dreg);
2944 /* FIXME: Why not use fmovd ? */
2945 sparc_fmovs (code, ins->sreg1, ins->dreg);
2946 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2951 g_assert_not_reached ();
2956 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2957 /* Perform stack touching */
2961 /* Keep alignment */
2962 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2963 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2964 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2966 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2968 size_reg = sparc_g4;
2970 size_reg = sparc_g1;
2972 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2975 size_reg = ins->sreg1;
2977 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2978 /* Keep %sp valid at all times */
2979 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2980 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
2981 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
2983 if (ins->flags & MONO_INST_INIT) {
2985 /* Initialize memory region */
2986 sparc_cmp_imm (code, size_reg, 0);
2988 sparc_branch (code, 0, sparc_be, 0);
2990 sparc_set (code, 0, sparc_o7);
2991 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
2995 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
2997 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
2998 sparc_cmp (code, sparc_o7, size_reg);
3000 sparc_branch (code, 0, sparc_bl, 0);
3001 sparc_patch (br [2], br [1]);
3003 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3004 sparc_patch (br [0], code);
3008 case OP_SPARC_LOCALLOC_IMM: {
3009 gint32 offset = ins->inst_c0;
3011 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3012 /* Perform stack touching */
3016 offset = ALIGN_TO (offset, MONO_ARCH_LOCALLOC_ALIGNMENT);
3017 if (sparc_is_imm13 (offset))
3018 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3020 sparc_set (code, offset, sparc_o7);
3021 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3023 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
3024 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3025 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3031 while (i < offset) {
3033 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3037 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3043 sparc_set (code, offset, sparc_o7);
3044 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3045 /* beginning of loop */
3048 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3050 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3051 sparc_cmp_imm (code, sparc_o7, 0);
3053 sparc_branch (code, 0, sparc_bne, 0);
3055 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3056 sparc_patch (br [1], br [0]);
3062 /* The return is done in the epilog */
3063 g_assert_not_reached ();
3066 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3067 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3068 (gpointer)"mono_arch_throw_exception");
3072 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3073 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3074 (gpointer)"mono_arch_rethrow_exception");
3077 case OP_START_HANDLER: {
3079 * The START_HANDLER instruction marks the beginning of a handler
3080 * block. It is called using a call instruction, so %o7 contains
3081 * the return address. Since the handler executes in the same stack
3082 * frame as the method itself, we can't use save/restore to save
3083 * the return address. Instead, we save it into a dedicated
3086 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3087 if (!sparc_is_imm13 (spvar->inst_offset)) {
3088 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3089 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3092 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3095 case OP_ENDFILTER: {
3096 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3097 if (!sparc_is_imm13 (spvar->inst_offset)) {
3098 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3099 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3102 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3103 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3105 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3108 case OP_ENDFINALLY: {
3109 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3110 if (!sparc_is_imm13 (spvar->inst_offset)) {
3111 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3112 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3115 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3116 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3120 case OP_CALL_HANDLER:
3121 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3122 /* This is a jump inside the method, so call_simple works even on V9 */
3123 sparc_call_simple (code, 0);
3127 ins->inst_c0 = (guint8*)code - cfg->native_code;
3130 if ((ins->inst_target_bb == bb->next_bb) &&
3131 ins->node.next == &bb->ins_list)
3133 if (ins->flags & MONO_INST_BRLABEL) {
3134 if (ins->inst_i0->inst_c0) {
3135 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3136 g_assert (sparc_is_imm22 (disp));
3137 sparc_branch (code, 1, sparc_ba, disp);
3139 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3140 sparc_branch (code, 1, sparc_ba, 0);
3143 if (ins->inst_target_bb->native_offset) {
3144 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3145 g_assert (sparc_is_imm22 (disp));
3146 sparc_branch (code, 1, sparc_ba, disp);
3148 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3149 sparc_branch (code, 1, sparc_ba, 0);
3155 sparc_jmp (code, ins->sreg1, sparc_g0);
3163 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3164 sparc_clr_reg (code, ins->dreg);
3165 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3168 sparc_clr_reg (code, ins->dreg);
3170 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3172 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3175 sparc_set (code, 1, ins->dreg);
3183 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3184 sparc_clr_reg (code, ins->dreg);
3185 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3188 sparc_clr_reg (code, ins->dreg);
3189 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3191 sparc_set (code, 1, ins->dreg);
3194 case OP_COND_EXC_EQ:
3195 case OP_COND_EXC_NE_UN:
3196 case OP_COND_EXC_LT:
3197 case OP_COND_EXC_LT_UN:
3198 case OP_COND_EXC_GT:
3199 case OP_COND_EXC_GT_UN:
3200 case OP_COND_EXC_GE:
3201 case OP_COND_EXC_GE_UN:
3202 case OP_COND_EXC_LE:
3203 case OP_COND_EXC_LE_UN:
3204 case OP_COND_EXC_OV:
3205 case OP_COND_EXC_NO:
3207 case OP_COND_EXC_NC:
3208 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3210 case OP_SPARC_COND_EXC_EQZ:
3211 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3213 case OP_SPARC_COND_EXC_GEZ:
3214 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3216 case OP_SPARC_COND_EXC_GTZ:
3217 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3219 case OP_SPARC_COND_EXC_LEZ:
3220 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3222 case OP_SPARC_COND_EXC_LTZ:
3223 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3225 case OP_SPARC_COND_EXC_NEZ:
3226 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3228 case OP_COND_EXC_IOV:
3229 case OP_COND_EXC_IC:
3230 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
3243 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3245 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3259 /* Only used on V9 */
3260 EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
3265 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3267 case OP_SPARC_BRLEZ:
3268 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3271 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3274 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3277 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3279 case OP_SPARC_BRGEZ:
3280 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3283 /* floating point opcodes */
3285 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3287 sparc_set_template (code, sparc_o7);
3289 sparc_sethi (code, 0, sparc_o7);
3291 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3294 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3296 sparc_set_template (code, sparc_o7);
3298 sparc_sethi (code, 0, sparc_o7);
3300 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3302 /* Extend to double */
3303 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3305 case OP_STORER8_MEMBASE_REG:
3306 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3307 sparc_set (code, ins->inst_offset, sparc_o7);
3308 /* SPARCV9 handles misaligned fp loads/stores */
3309 if (!v64 && (ins->inst_offset % 8)) {
3311 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3312 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3313 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3315 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3318 if (!v64 && (ins->inst_offset % 8)) {
3320 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3321 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3323 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3326 case OP_LOADR8_MEMBASE:
3327 EMIT_LOAD_MEMBASE (ins, lddf);
3329 case OP_STORER4_MEMBASE_REG:
3330 /* This requires a double->single conversion */
3331 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3332 if (!sparc_is_imm13 (ins->inst_offset)) {
3333 sparc_set (code, ins->inst_offset, sparc_o7);
3334 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3337 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3339 case OP_LOADR4_MEMBASE: {
3340 /* ldf needs a single precision register */
3341 int dreg = ins->dreg;
3342 ins->dreg = FP_SCRATCH_REG;
3343 EMIT_LOAD_MEMBASE (ins, ldf);
3345 /* Extend to double */
3346 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3351 sparc_fmovd (code, ins->sreg1, ins->dreg);
3353 sparc_fmovs (code, ins->sreg1, ins->dreg);
3354 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3358 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3360 if (!sparc_is_imm13 (offset)) {
3361 sparc_set (code, offset, sparc_o7);
3362 sparc_stx (code, ins->sreg1, sparc_sp, offset);
3363 sparc_lddf (code, sparc_sp, offset, FP_SCRATCH_REG);
3365 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3366 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3368 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3370 if (!sparc_is_imm13 (offset)) {
3371 sparc_set (code, offset, sparc_o7);
3372 sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
3373 sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3375 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3376 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3378 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3380 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3384 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3386 if (!sparc_is_imm13 (offset)) {
3387 sparc_set (code, offset, sparc_o7);
3388 sparc_stx (code, ins->sreg1, sparc_sp, sparc_o7);
3389 sparc_lddf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3391 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3392 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3394 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3396 if (!sparc_is_imm13 (offset)) {
3397 sparc_set (code, offset, sparc_o7);
3398 sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
3399 sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3401 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3402 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3404 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3408 case OP_FCONV_TO_I1:
3409 case OP_FCONV_TO_U1:
3410 case OP_FCONV_TO_I2:
3411 case OP_FCONV_TO_U2:
3416 case OP_FCONV_TO_I4:
3417 case OP_FCONV_TO_U4: {
3418 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3419 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3420 if (!sparc_is_imm13 (offset)) {
3421 sparc_set (code, offset, sparc_o7);
3422 sparc_stdf (code, FP_SCRATCH_REG, sparc_sp, sparc_o7);
3423 sparc_ld (code, sparc_sp, sparc_o7, ins->dreg);
3425 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3426 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3429 switch (ins->opcode) {
3430 case OP_FCONV_TO_I1:
3431 case OP_FCONV_TO_U1:
3432 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3434 case OP_FCONV_TO_I2:
3435 case OP_FCONV_TO_U2:
3436 sparc_set (code, 0xffff, sparc_o7);
3437 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3444 case OP_FCONV_TO_I8:
3445 case OP_FCONV_TO_U8:
3447 g_assert_not_reached ();
3451 g_assert_not_reached ();
3453 case OP_LCONV_TO_R_UN: {
3455 g_assert_not_reached ();
3458 case OP_LCONV_TO_OVF_I: {
3459 guint32 *br [3], *label [1];
3462 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3464 sparc_cmp_imm (code, ins->sreg1, 0);
3466 sparc_branch (code, 1, sparc_bneg, 0);
3470 /* ms word must be 0 */
3471 sparc_cmp_imm (code, ins->sreg2, 0);
3473 sparc_branch (code, 1, sparc_be, 0);
3478 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3481 sparc_patch (br [0], code);
3483 /* ms word must 0xfffffff */
3484 sparc_cmp_imm (code, ins->sreg2, -1);
3486 sparc_branch (code, 1, sparc_bne, 0);
3488 sparc_patch (br [2], label [0]);
3491 sparc_patch (br [1], code);
3492 if (ins->sreg1 != ins->dreg)
3493 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3497 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3500 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3503 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3506 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3510 sparc_fnegd (code, ins->sreg1, ins->dreg);
3512 /* FIXME: why don't use fnegd ? */
3513 sparc_fnegs (code, ins->sreg1, ins->dreg);
3517 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3518 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3519 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3522 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3529 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3530 sparc_clr_reg (code, ins->dreg);
3531 switch (ins->opcode) {
3534 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3536 sparc_set (code, 1, ins->dreg);
3537 sparc_fbranch (code, 1, sparc_fbu, 2);
3539 sparc_set (code, 1, ins->dreg);
3542 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3544 sparc_set (code, 1, ins->dreg);
3550 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3553 /* clt.un + brfalse */
3555 sparc_fbranch (code, 1, sparc_fbul, 0);
3558 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3559 sparc_patch (p, (guint8*)code);
3563 /* cgt.un + brfalse */
3565 sparc_fbranch (code, 1, sparc_fbug, 0);
3568 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3569 sparc_patch (p, (guint8*)code);
3573 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3574 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3577 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3578 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3581 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3582 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3585 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3586 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3589 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3590 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3593 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3594 if (!sparc_is_imm13 (offset)) {
3595 sparc_set (code, offset, sparc_o7);
3596 sparc_stdf (code, ins->sreg1, sparc_sp, sparc_o7);
3597 sparc_lduh (code, sparc_sp, sparc_o7, sparc_o7);
3599 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
3600 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
3602 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3603 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3604 sparc_cmp_imm (code, sparc_o7, 2047);
3605 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3607 sparc_fmovd (code, ins->sreg1, ins->dreg);
3609 sparc_fmovs (code, ins->sreg1, ins->dreg);
3610 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3615 case OP_MEMORY_BARRIER:
3616 sparc_membar (code, sparc_membar_all);
3621 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3623 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3625 g_assert_not_reached ();
3628 if ((((guint8*)code) - code_start) > max_len) {
3629 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3630 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3631 g_assert_not_reached ();
3637 cfg->code_len = (guint8*)code - cfg->native_code;
3641 mono_arch_register_lowlevel_calls (void)
3643 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3647 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3649 MonoJumpInfo *patch_info;
3651 /* FIXME: Move part of this to arch independent code */
3652 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3653 unsigned char *ip = patch_info->ip.i + code;
3656 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3658 switch (patch_info->type) {
3659 case MONO_PATCH_INFO_NONE:
3661 case MONO_PATCH_INFO_CLASS_INIT: {
3662 guint32 *ip2 = (guint32*)ip;
3663 /* Might already been changed to a nop */
3665 sparc_set_template (ip2, sparc_o7);
3666 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3668 sparc_call_simple (ip2, 0);
3672 case MONO_PATCH_INFO_METHOD_JUMP: {
3673 guint32 *ip2 = (guint32*)ip;
3674 /* Might already been patched */
3675 sparc_set_template (ip2, sparc_o7);
3681 sparc_patch ((guint32*)ip, target);
3686 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3689 guint32 *code = (guint32*)p;
3690 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3693 /* Save registers to stack */
3694 for (i = 0; i < 6; ++i)
3695 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3697 cinfo = get_call_info (cfg, sig, FALSE);
3699 /* Save float regs on V9, since they are caller saved */
3700 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3701 ArgInfo *ainfo = cinfo->args + i;
3702 gint32 stack_offset;
3704 stack_offset = ainfo->offset + ARGS_OFFSET;
3706 if (ainfo->storage == ArgInFloatReg) {
3707 if (!sparc_is_imm13 (stack_offset))
3709 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3711 else if (ainfo->storage == ArgInDoubleReg) {
3712 /* The offset is guaranteed to be aligned by the ABI rules */
3713 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3717 sparc_set (code, cfg->method, sparc_o0);
3718 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3720 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3723 /* Restore float regs on V9 */
3724 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3725 ArgInfo *ainfo = cinfo->args + i;
3726 gint32 stack_offset;
3728 stack_offset = ainfo->offset + ARGS_OFFSET;
3730 if (ainfo->storage == ArgInFloatReg) {
3731 if (!sparc_is_imm13 (stack_offset))
3733 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3735 else if (ainfo->storage == ArgInDoubleReg) {
3736 /* The offset is guaranteed to be aligned by the ABI rules */
3737 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3755 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3757 guint32 *code = (guint32*)p;
3758 int save_mode = SAVE_NONE;
3759 MonoMethod *method = cfg->method;
3761 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
3762 case MONO_TYPE_VOID:
3763 /* special case string .ctor icall */
3764 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3765 save_mode = SAVE_ONE;
3767 save_mode = SAVE_NONE;
3772 save_mode = SAVE_ONE;
3774 save_mode = SAVE_TWO;
3779 save_mode = SAVE_FP;
3781 case MONO_TYPE_VALUETYPE:
3782 save_mode = SAVE_STRUCT;
3785 save_mode = SAVE_ONE;
3789 /* Save the result to the stack and also put it into the output registers */
3791 switch (save_mode) {
3794 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3795 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3796 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3797 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3800 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3801 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3805 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3807 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3808 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3809 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3814 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3816 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3824 sparc_set (code, cfg->method, sparc_o0);
3826 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3829 /* Restore result */
3831 switch (save_mode) {
3833 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3834 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3837 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3840 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3851 mono_arch_emit_prolog (MonoCompile *cfg)
3853 MonoMethod *method = cfg->method;
3854 MonoMethodSignature *sig;
3860 cfg->code_size = 256;
3861 cfg->native_code = g_malloc (cfg->code_size);
3862 code = (guint32*)cfg->native_code;
3864 /* FIXME: Generate intermediate code instead */
3866 offset = cfg->stack_offset;
3867 offset += (16 * sizeof (gpointer)); /* register save area */
3869 offset += 4; /* struct/union return pointer */
3872 /* add parameter area size for called functions */
3873 if (cfg->param_area < (6 * sizeof (gpointer)))
3874 /* Reserve space for the first 6 arguments even if it is unused */
3875 offset += 6 * sizeof (gpointer);
3877 offset += cfg->param_area;
3879 /* align the stack size */
3880 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3883 * localloc'd memory is stored between the local variables (whose
3884 * size is given by cfg->stack_offset), and between the space reserved
3887 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3889 cfg->stack_offset = offset;
3891 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3892 /* Perform stack touching */
3896 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3897 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3898 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3899 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3902 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3905 if (strstr (cfg->method->name, "foo")) {
3906 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3907 sparc_call_simple (code, 0);
3912 sig = mono_method_signature (method);
3914 cinfo = get_call_info (cfg, sig, FALSE);
3916 /* Keep in sync with emit_load_volatile_arguments */
3917 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3918 ArgInfo *ainfo = cinfo->args + i;
3919 gint32 stack_offset;
3921 inst = cfg->args [i];
3923 if (sig->hasthis && (i == 0))
3924 arg_type = &mono_defaults.object_class->byval_arg;
3926 arg_type = sig->params [i - sig->hasthis];
3928 stack_offset = ainfo->offset + ARGS_OFFSET;
3930 /* Save the split arguments so they will reside entirely on the stack */
3931 if (ainfo->storage == ArgInSplitRegStack) {
3932 /* Save the register to the stack */
3933 g_assert (inst->opcode == OP_REGOFFSET);
3934 if (!sparc_is_imm13 (stack_offset))
3936 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3939 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3940 /* Save the argument to a dword aligned stack location */
3942 * stack_offset contains the offset of the argument on the stack.
3943 * inst->inst_offset contains the dword aligned offset where the value
3946 if (ainfo->storage == ArgInIRegPair) {
3947 if (!sparc_is_imm13 (inst->inst_offset + 4))
3949 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3950 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3953 if (ainfo->storage == ArgInSplitRegStack) {
3955 g_assert_not_reached ();
3957 if (stack_offset != inst->inst_offset) {
3958 /* stack_offset is not dword aligned, so we need to make a copy */
3959 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3960 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3961 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3965 if (ainfo->storage == ArgOnStackPair) {
3967 g_assert_not_reached ();
3969 if (stack_offset != inst->inst_offset) {
3970 /* stack_offset is not dword aligned, so we need to make a copy */
3971 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3972 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3973 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3974 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3978 g_assert_not_reached ();
3981 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
3982 /* Argument in register, but need to be saved to stack */
3983 if (!sparc_is_imm13 (stack_offset))
3985 if ((stack_offset - ARGS_OFFSET) & 0x1)
3986 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3988 if ((stack_offset - ARGS_OFFSET) & 0x2)
3989 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3991 if ((stack_offset - ARGS_OFFSET) & 0x4)
3992 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3995 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3997 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4001 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4005 /* Argument in regpair, but need to be saved to stack */
4006 if (!sparc_is_imm13 (inst->inst_offset + 4))
4008 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4009 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4011 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4012 if (!sparc_is_imm13 (stack_offset))
4014 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4016 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4017 /* The offset is guaranteed to be aligned by the ABI rules */
4018 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4021 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4022 /* Need to move into the a double precision register */
4023 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4026 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4027 if (inst->opcode == OP_REGVAR)
4028 /* FIXME: Load the argument into memory */
4034 if (cfg->method->save_lmf) {
4035 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4038 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4039 sparc_set_template (code, sparc_o7);
4040 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4042 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4044 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4046 /* FIXME: add a relocation for this */
4047 sparc_set (code, cfg->method, sparc_o7);
4048 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4050 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4051 (gpointer)"mono_arch_get_lmf_addr");
4054 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4057 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4058 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4060 cfg->code_len = (guint8*)code - cfg->native_code;
4062 g_assert (cfg->code_len <= cfg->code_size);
4064 return (guint8*)code;
4068 mono_arch_emit_epilog (MonoCompile *cfg)
4070 MonoMethod *method = cfg->method;
4073 int max_epilog_size = 16 + 20 * 4;
4075 if (cfg->method->save_lmf)
4076 max_epilog_size += 128;
4078 if (mono_jit_trace_calls != NULL)
4079 max_epilog_size += 50;
4081 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4082 max_epilog_size += 50;
4084 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4085 cfg->code_size *= 2;
4086 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4087 mono_jit_stats.code_reallocs++;
4090 code = (guint32*)(cfg->native_code + cfg->code_len);
4092 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4093 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4095 if (cfg->method->save_lmf) {
4096 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4098 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4102 * The V8 ABI requires that calls to functions which return a structure
4105 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4106 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4110 /* Only fold last instruction into the restore if the exit block has an in count of 1
4111 and the previous block hasn't been optimized away since it may have an in count > 1 */
4112 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4115 /* Try folding last instruction into the restore */
4116 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4117 /* or reg, imm, %i0 */
4118 int reg = sparc_inst_rs1 (code [-2]);
4119 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4120 code [-2] = code [-1];
4122 sparc_restore_imm (code, reg, imm, sparc_o0);
4125 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4126 /* or reg, reg, %i0 */
4127 int reg1 = sparc_inst_rs1 (code [-2]);
4128 int reg2 = sparc_inst_rs2 (code [-2]);
4129 code [-2] = code [-1];
4131 sparc_restore (code, reg1, reg2, sparc_o0);
4134 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4136 cfg->code_len = (guint8*)code - cfg->native_code;
4138 g_assert (cfg->code_len < cfg->code_size);
4143 mono_arch_emit_exceptions (MonoCompile *cfg)
4145 MonoJumpInfo *patch_info;
4150 MonoClass *exc_classes [16];
4151 guint8 *exc_throw_start [16], *exc_throw_end [16];
4153 /* Compute needed space */
4154 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4155 if (patch_info->type == MONO_PATCH_INFO_EXC)
4160 * make sure we have enough space for exceptions
4163 code_size = exc_count * (20 * 4);
4165 code_size = exc_count * 24;
4168 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4169 cfg->code_size *= 2;
4170 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4171 mono_jit_stats.code_reallocs++;
4174 code = (guint32*)(cfg->native_code + cfg->code_len);
4176 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4177 switch (patch_info->type) {
4178 case MONO_PATCH_INFO_EXC: {
4179 MonoClass *exc_class;
4180 guint32 *buf, *buf2;
4181 guint32 throw_ip, type_idx;
4184 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4186 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4187 g_assert (exc_class);
4188 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4189 throw_ip = patch_info->ip.i;
4191 /* Find a throw sequence for the same exception class */
4192 for (i = 0; i < nthrows; ++i)
4193 if (exc_classes [i] == exc_class)
4197 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4198 if (!sparc_is_imm13 (throw_offset))
4199 sparc_set32 (code, throw_offset, sparc_o1);
4201 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4202 g_assert (sparc_is_imm22 (disp));
4203 sparc_branch (code, 0, sparc_ba, disp);
4204 if (sparc_is_imm13 (throw_offset))
4205 sparc_set32 (code, throw_offset, sparc_o1);
4208 patch_info->type = MONO_PATCH_INFO_NONE;
4211 /* Emit the template for setting o1 */
4213 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4214 /* Can use a short form */
4217 sparc_set_template (code, sparc_o1);
4221 exc_classes [nthrows] = exc_class;
4222 exc_throw_start [nthrows] = (guint8*)code;
4226 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4230 /* first arg = type token */
4231 /* Pass the type index to reduce the size of the sparc_set */
4232 if (!sparc_is_imm13 (type_idx))
4233 sparc_set32 (code, type_idx, sparc_o0);
4235 /* second arg = offset between the throw ip and the current ip */
4236 /* On sparc, the saved ip points to the call instruction */
4237 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4238 sparc_set32 (buf, disp, sparc_o1);
4243 exc_throw_end [nthrows] = (guint8*)code;
4247 patch_info->data.name = "mono_arch_throw_corlib_exception";
4248 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4249 patch_info->ip.i = (guint8*)code - cfg->native_code;
4253 if (sparc_is_imm13 (type_idx)) {
4254 /* Put it into the delay slot */
4257 sparc_set32 (code, type_idx, sparc_o0);
4258 g_assert (code - buf == 1);
4269 cfg->code_len = (guint8*)code - cfg->native_code;
4271 g_assert (cfg->code_len < cfg->code_size);
4275 gboolean lmf_addr_key_inited = FALSE;
4277 #ifdef MONO_SPARC_THR_TLS
4278 thread_key_t lmf_addr_key;
4280 pthread_key_t lmf_addr_key;
4284 mono_arch_get_lmf_addr (void)
4286 /* This is perf critical so we bypass the IO layer */
4287 /* The thr_... functions seem to be somewhat faster */
4288 #ifdef MONO_SPARC_THR_TLS
4290 thr_getspecific (lmf_addr_key, &res);
4293 return pthread_getspecific (lmf_addr_key);
4297 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4300 * There seems to be no way to determine stack boundaries under solaris,
4301 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4304 #error "--with-sigaltstack=yes not supported on solaris"
4309 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4311 if (!lmf_addr_key_inited) {
4314 lmf_addr_key_inited = TRUE;
4316 #ifdef MONO_SPARC_THR_TLS
4317 res = thr_keycreate (&lmf_addr_key, NULL);
4319 res = pthread_key_create (&lmf_addr_key, NULL);
4321 g_assert (res == 0);
4325 #ifdef MONO_SPARC_THR_TLS
4326 thr_setspecific (lmf_addr_key, &tls->lmf);
4328 pthread_setspecific (lmf_addr_key, &tls->lmf);
4333 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4338 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *call, int this_reg, int this_type, int vt_reg)
4340 int this_out_reg = sparc_o0;
4345 MONO_INST_NEW (cfg, ins, OP_MOVE);
4346 ins->sreg1 = vt_reg;
4347 ins->dreg = mono_regstate_next_int (cfg->rs);
4348 mono_bblock_add_inst (cfg->cbb, ins);
4350 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, sparc_o0, FALSE);
4352 this_out_reg = sparc_o1;
4354 /* Set the 'struct/union return pointer' location on the stack */
4355 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4359 /* add the this argument */
4360 if (this_reg != -1) {
4362 MONO_INST_NEW (cfg, this, OP_MOVE);
4363 this->type = this_type;
4364 this->sreg1 = this_reg;
4365 this->dreg = mono_regstate_next_int (cfg->rs);
4366 mono_bblock_add_inst (cfg->cbb, this);
4368 mono_call_inst_add_outarg_reg (cfg, call, this->dreg, this_out_reg, FALSE);
4374 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4376 MonoInst *ins = NULL;
4378 if (cmethod->klass == mono_defaults.thread_class &&
4379 strcmp (cmethod->name, "MemoryBarrier") == 0) {
4381 MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
4388 * mono_arch_get_argument_info:
4389 * @csig: a method signature
4390 * @param_count: the number of parameters to consider
4391 * @arg_info: an array to store the result infos
4393 * Gathers information on parameters such as size, alignment and
4394 * padding. arg_info should be large enought to hold param_count + 1 entries.
4396 * Returns the size of the activation frame.
4399 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4405 cinfo = get_call_info (NULL, csig, FALSE);
4407 if (csig->hasthis) {
4408 ainfo = &cinfo->args [0];
4409 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4412 for (k = 0; k < param_count; k++) {
4413 ainfo = &cinfo->args [k + csig->hasthis];
4415 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4416 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4425 mono_arch_print_tree (MonoInst *tree, int arity)
4430 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4435 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)