2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
33 #include "mini-sparc.h"
36 #include "cpu-sparc.h"
37 #include "jit-icalls.h"
40 * Sparc V9 means two things:
41 * - the instruction set
44 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
45 * processors in use are 64 bit processors. The V9 ABI is only usable if the
46 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
47 * instructions without using the 64 bit ABI.
52 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
53 * code. Unused input registers are used for global register allocation.
54 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
55 * - %l0..%l6 is used for global register allocation
56 * - %o7 and %g1 is used as scratch registers in opcodes
57 * - all floating point registers are used for local register allocation except %f0.
58 * Only double precision registers are used.
60 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
61 * used for local allocation.
66 * - doubles and longs must be stored in dword aligned locations
70 * The following things are not implemented or do not work:
71 * - some fp arithmetic corner cases
72 * The following tests in mono/mini are expected to fail:
73 * - test_0_simple_double_casts
74 * This test casts (guint64)-1 to double and then back to guint64 again.
75 * Under x86, it returns 0, while under sparc it returns -1.
77 * In addition to this, the runtime requires the trunc function, or its
78 * solaris counterpart, aintl, to do some double->int conversions. If this
79 * function is not available, it is emulated somewhat, but the results can be
85 * - optimize sparc_set according to the memory model
86 * - when non-AOT compiling, compute patch targets immediately so we don't
87 * have to emit the 6 byte template.
89 * - struct arguments/returns
94 * - sparc_call_simple can't be used in a lot of places since the displacement
95 * might not fit into an imm30.
96 * - g1 can't be used in a lot of places since it is used as a scratch reg in
98 * - sparc_f0 can't be used as a scratch register on V9
99 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
101 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
102 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
103 * be a double precision register which has no single precision part.
104 * - passing/returning structs is hard to implement, because:
105 * - the spec is very hard to understand
106 * - it requires knowledge about the fields of structure, needs to handle
107 * nested structures etc.
111 * Possible optimizations:
112 * - delay slot scheduling
113 * - allocate large constants to registers
114 * - add more mul/div/rem optimizations
118 #define MONO_SPARC_THR_TLS 1
122 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
123 * causing infinite loops in dominator computation. So glib-2.4 is required.
126 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
127 #error "glib 2.4 or later is required for 64 bit mode."
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64 = TRUE;
160 static gboolean v64 = FALSE;
163 static gpointer mono_arch_get_lmf_addr (void);
166 mono_arch_regname (int reg) {
167 static const char * rnames[] = {
168 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
169 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
170 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
171 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
172 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
173 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
174 "sparc_fp", "sparc_retadr"
176 if (reg >= 0 && reg < 32)
182 mono_arch_fregname (int reg) {
183 static const char *rnames [] = {
184 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
185 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
186 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
187 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
188 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
189 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
190 "sparc_f30", "sparc_f31"
193 if (reg >= 0 && reg < 32)
200 * Initialize the cpu to execute managed code.
203 mono_arch_cpu_init (void)
206 /* make sure sparcv9 is initialized for embedded use */
207 mono_arch_cpu_optimizazions(&dummy);
211 * Initialize architecture specific code.
214 mono_arch_init (void)
219 * Cleanup architecture specific code.
222 mono_arch_cleanup (void)
227 * This function returns the optimizations supported on this cpu.
230 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
238 if (!sysinfo (SI_ISALIST, buf, 1024))
239 g_assert_not_reached ();
241 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
242 * (in)directly implies that we're a v9 or better.
243 * Improvements to this are greatly accepted...
244 * Also, we don't differentiate between v7 and v8. I sense SIGILL
245 * sniffing in my future.
247 if (getpagesize() == 8192)
248 strcpy (buf, "sparcv9");
250 strcpy (buf, "sparcv8");
254 * On some processors, the cmov instructions are even slower than the
257 if (strstr (buf, "sparcv9")) {
258 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
262 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
268 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
269 #else /* assume Sun's compiler */
270 static void flushi(void *addr)
277 void sync_instruction_memory(caddr_t addr, int len);
281 mono_arch_flush_icache (guint8 *code, gint size)
284 /* Hopefully this is optimized based on the actual CPU */
285 sync_instruction_memory (code, size);
287 gulong start = (gulong) code;
288 gulong end = start + size;
291 /* Sparcv9 chips only need flushes on 32 byte
292 * cacheline boundaries.
294 * Sparcv8 needs a flush every 8 bytes.
296 align = (sparcv9 ? 32 : 8);
298 start &= ~(align - 1);
299 end = (end + (align - 1)) & ~(align - 1);
301 while (start < end) {
303 __asm__ __volatile__ ("iflush %0"::"r"(start));
315 * Flush all register windows to memory. Every register window is saved to
316 * a 16 word area on the stack pointed to by its %sp register.
319 mono_sparc_flushw (void)
321 static guint32 start [64];
322 static int inited = 0;
324 static void (*flushw) (void);
329 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
332 sparc_restore_simple (code);
334 g_assert ((code - start) < 64);
336 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
338 flushw = (gpointer)start;
347 mono_arch_flush_register_windows (void)
349 mono_sparc_flushw ();
353 mono_arch_is_inst_imm (gint64 imm)
355 return sparc_is_imm13 (imm);
359 mono_sparc_is_v9 (void) {
364 mono_sparc_is_sparc64 (void) {
376 ArgInFloatReg, /* V9 only */
377 ArgInDoubleReg /* V9 only */
382 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
385 guint32 vt_offset; /* for valuetypes */
403 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
405 ainfo->offset = *stack_size;
408 if (*gr >= PARAM_REGS) {
409 ainfo->storage = ArgOnStack;
412 ainfo->storage = ArgInIReg;
417 /* Allways reserve stack space for parameters passed in registers */
418 (*stack_size) += sizeof (gpointer);
421 if (*gr < PARAM_REGS - 1) {
422 /* A pair of registers */
423 ainfo->storage = ArgInIRegPair;
427 else if (*gr >= PARAM_REGS) {
428 /* A pair of stack locations */
429 ainfo->storage = ArgOnStackPair;
432 ainfo->storage = ArgInSplitRegStack;
437 (*stack_size) += 2 * sizeof (gpointer);
443 #define FLOAT_PARAM_REGS 32
446 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
448 ainfo->offset = *stack_size;
451 if (*gr >= FLOAT_PARAM_REGS) {
452 ainfo->storage = ArgOnStack;
455 /* A single is passed in an even numbered fp register */
456 ainfo->storage = ArgInFloatReg;
457 ainfo->reg = *gr + 1;
462 if (*gr < FLOAT_PARAM_REGS) {
463 /* A double register */
464 ainfo->storage = ArgInDoubleReg;
469 ainfo->storage = ArgOnStack;
473 (*stack_size) += sizeof (gpointer);
481 * Obtain information about a call according to the calling convention.
482 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
483 * document for more information.
484 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
485 * the 'Sparc Compliance Definition 2.4' document.
488 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
491 int n = sig->hasthis + sig->param_count;
492 guint32 stack_size = 0;
495 MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
497 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
503 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
504 /* The address of the return value is passed in %o0 */
505 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
506 cinfo->ret.reg += sparc_i0;
512 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
514 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
517 /* Emit the signature cookie just before the implicit arguments */
518 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
521 for (i = 0; i < sig->param_count; ++i) {
522 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
525 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
528 /* Emit the signature cookie just before the implicit arguments */
529 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
532 DEBUG(printf("param %d: ", i));
533 if (sig->params [i]->byref) {
534 DEBUG(printf("byref\n"));
536 add_general (&gr, &stack_size, ainfo, FALSE);
539 ptype = mono_type_get_underlying_type (sig->params [i]);
540 ptype = mini_get_basic_type_from_generic (gsctx, ptype);
541 switch (ptype->type) {
542 case MONO_TYPE_BOOLEAN:
545 add_general (&gr, &stack_size, ainfo, FALSE);
546 /* the value is in the ls byte */
547 ainfo->offset += sizeof (gpointer) - 1;
552 add_general (&gr, &stack_size, ainfo, FALSE);
553 /* the value is in the ls word */
554 ainfo->offset += sizeof (gpointer) - 2;
558 add_general (&gr, &stack_size, ainfo, FALSE);
559 /* the value is in the ls dword */
560 ainfo->offset += sizeof (gpointer) - 4;
565 case MONO_TYPE_FNPTR:
566 case MONO_TYPE_CLASS:
567 case MONO_TYPE_OBJECT:
568 case MONO_TYPE_STRING:
569 case MONO_TYPE_SZARRAY:
570 case MONO_TYPE_ARRAY:
571 add_general (&gr, &stack_size, ainfo, FALSE);
573 case MONO_TYPE_GENERICINST:
574 if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
575 add_general (&gr, &stack_size, ainfo, FALSE);
579 case MONO_TYPE_VALUETYPE:
584 add_general (&gr, &stack_size, ainfo, FALSE);
586 case MONO_TYPE_TYPEDBYREF:
587 add_general (&gr, &stack_size, ainfo, FALSE);
592 add_general (&gr, &stack_size, ainfo, FALSE);
594 add_general (&gr, &stack_size, ainfo, TRUE);
599 add_float (&fr, &stack_size, ainfo, TRUE);
602 /* single precision values are passed in integer registers */
603 add_general (&gr, &stack_size, ainfo, FALSE);
608 add_float (&fr, &stack_size, ainfo, FALSE);
611 /* double precision values are passed in a pair of registers */
612 add_general (&gr, &stack_size, ainfo, TRUE);
616 g_assert_not_reached ();
620 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
623 /* Emit the signature cookie just before the implicit arguments */
624 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
628 ret_type = mono_type_get_underlying_type (sig->ret);
629 ret_type = mini_get_basic_type_from_generic (gsctx, ret_type);
630 switch (ret_type->type) {
631 case MONO_TYPE_BOOLEAN:
642 case MONO_TYPE_FNPTR:
643 case MONO_TYPE_CLASS:
644 case MONO_TYPE_OBJECT:
645 case MONO_TYPE_SZARRAY:
646 case MONO_TYPE_ARRAY:
647 case MONO_TYPE_STRING:
648 cinfo->ret.storage = ArgInIReg;
649 cinfo->ret.reg = sparc_i0;
656 cinfo->ret.storage = ArgInIReg;
657 cinfo->ret.reg = sparc_i0;
661 cinfo->ret.storage = ArgInIRegPair;
662 cinfo->ret.reg = sparc_i0;
669 cinfo->ret.storage = ArgInFReg;
670 cinfo->ret.reg = sparc_f0;
672 case MONO_TYPE_GENERICINST:
673 if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
674 cinfo->ret.storage = ArgInIReg;
675 cinfo->ret.reg = sparc_i0;
681 case MONO_TYPE_VALUETYPE:
690 cinfo->ret.storage = ArgOnStack;
692 case MONO_TYPE_TYPEDBYREF:
695 /* Same as a valuetype with size 24 */
702 cinfo->ret.storage = ArgOnStack;
707 g_error ("Can't handle as return value 0x%x", sig->ret->type);
710 cinfo->stack_usage = stack_size;
711 cinfo->reg_usage = gr;
716 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
722 * FIXME: If an argument is allocated to a register, then load it from the
723 * stack in the prolog.
726 for (i = 0; i < cfg->num_varinfo; i++) {
727 MonoInst *ins = cfg->varinfo [i];
728 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
731 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
734 /* FIXME: Make arguments on stack allocateable to registers */
735 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
738 if (mono_is_regsize_var (ins->inst_vtype)) {
739 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
740 g_assert (i == vmv->idx);
742 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
750 mono_arch_get_global_int_regs (MonoCompile *cfg)
754 MonoMethodSignature *sig;
757 sig = mono_method_signature (cfg->method);
759 cinfo = get_call_info (cfg, sig, FALSE);
761 /* Use unused input registers */
762 for (i = cinfo->reg_usage; i < 6; ++i)
763 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
765 /* Use %l0..%l6 as global registers */
766 for (i = sparc_l0; i < sparc_l7; ++i)
767 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
775 * mono_arch_regalloc_cost:
777 * Return the cost, in number of memory references, of the action of
778 * allocating the variable VMV into a register during global register
782 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
788 * Set var information according to the calling convention. sparc version.
789 * The locals var stuff should most likely be split in another method.
793 mono_arch_allocate_vars (MonoCompile *cfg)
795 MonoMethodSignature *sig;
796 MonoMethodHeader *header;
798 int i, offset, size, align, curinst;
801 header = mono_method_get_header (cfg->method);
803 sig = mono_method_signature (cfg->method);
805 cinfo = get_call_info (cfg, sig, FALSE);
807 if (sig->ret->type != MONO_TYPE_VOID) {
808 switch (cinfo->ret.storage) {
811 cfg->ret->opcode = OP_REGVAR;
812 cfg->ret->inst_c0 = cinfo->ret.reg;
815 if (cfg->new_ir && ((sig->ret->type == MONO_TYPE_I8) || (sig->ret->type == MONO_TYPE_U8))) {
816 MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
817 MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
819 low->opcode = OP_REGVAR;
820 low->dreg = cinfo->ret.reg + 1;
821 high->opcode = OP_REGVAR;
822 high->dreg = cinfo->ret.reg;
824 cfg->ret->opcode = OP_REGVAR;
825 cfg->ret->inst_c0 = cinfo->ret.reg;
829 g_assert_not_reached ();
832 cfg->vret_addr->opcode = OP_REGOFFSET;
833 cfg->vret_addr->inst_basereg = sparc_fp;
834 cfg->vret_addr->inst_offset = 64;
840 cfg->ret->dreg = cfg->ret->inst_c0;
844 * We use the ABI calling conventions for managed code as well.
845 * Exception: valuetypes are never returned in registers on V9.
846 * FIXME: Use something more optimized.
849 /* Locals are allocated backwards from %fp */
850 cfg->frame_reg = sparc_fp;
854 * Reserve a stack slot for holding information used during exception
857 if (header->num_clauses)
858 offset += sizeof (gpointer) * 2;
860 if (cfg->method->save_lmf) {
861 offset += sizeof (MonoLMF);
862 cfg->arch.lmf_offset = offset;
865 curinst = cfg->locals_start;
866 for (i = curinst; i < cfg->num_varinfo; ++i) {
867 inst = cfg->varinfo [i];
869 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
870 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
874 if (inst->flags & MONO_INST_IS_DEAD)
877 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
878 * pinvoke wrappers when they call functions returning structure */
879 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
880 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
882 size = mini_type_stack_size (cfg->generic_sharing_context, inst->inst_vtype, &align);
885 * This is needed since structures containing doubles must be doubleword
887 * FIXME: Do this only if needed.
889 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
893 * variables are accessed as negative offsets from %fp, so increase
894 * the offset before assigning it to a variable
899 offset &= ~(align - 1);
900 inst->opcode = OP_REGOFFSET;
901 inst->inst_basereg = sparc_fp;
902 inst->inst_offset = STACK_BIAS + -offset;
904 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
907 if (sig->call_convention == MONO_CALL_VARARG) {
908 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
911 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
912 inst = cfg->args [i];
913 if (inst->opcode != OP_REGVAR) {
914 ArgInfo *ainfo = &cinfo->args [i];
915 gboolean inreg = TRUE;
919 if (sig->hasthis && (i == 0))
920 arg_type = &mono_defaults.object_class->byval_arg;
922 arg_type = sig->params [i - sig->hasthis];
925 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
926 || (arg_type->type == MONO_TYPE_R8)))
928 * Since float arguments are passed in integer registers, we need to
929 * save them to the stack in the prolog.
934 /* FIXME: Allocate volatile arguments to registers */
935 /* FIXME: This makes the argument holding a vtype address into volatile */
936 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
939 if (MONO_TYPE_ISSTRUCT (arg_type))
940 /* FIXME: this isn't needed */
943 inst->opcode = OP_REGOFFSET;
946 storage = ArgOnStack;
948 storage = ainfo->storage;
952 inst->opcode = OP_REGVAR;
953 inst->dreg = sparc_i0 + ainfo->reg;
956 if (cfg->new_ir && (inst->type == STACK_I8)) {
957 MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
958 MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
960 low->opcode = OP_REGVAR;
961 low->dreg = sparc_i0 + ainfo->reg + 1;
962 high->opcode = OP_REGVAR;
963 high->dreg = sparc_i0 + ainfo->reg;
965 inst->opcode = OP_REGVAR;
966 inst->dreg = sparc_i0 + ainfo->reg;
971 * Since float regs are volatile, we save the arguments to
972 * the stack in the prolog.
973 * FIXME: Avoid this if the method contains no calls.
977 case ArgInSplitRegStack:
978 /* Split arguments are saved to the stack in the prolog */
979 inst->opcode = OP_REGOFFSET;
980 /* in parent frame */
981 inst->inst_basereg = sparc_fp;
982 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
984 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
986 * It is very hard to load doubles from non-doubleword aligned
987 * memory locations. So if the offset is misaligned, we copy the
988 * argument to a stack location in the prolog.
990 if ((inst->inst_offset - STACK_BIAS) % 8) {
991 inst->inst_basereg = sparc_fp;
995 offset &= ~(align - 1);
996 inst->inst_offset = STACK_BIAS + -offset;
1005 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1006 /* Add a level of indirection */
1008 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1009 * are destructively modified in a lot of places in inssel.brg.
1012 MONO_INST_NEW (cfg, indir, 0);
1014 inst->opcode = OP_VTARG_ADDR;
1015 inst->inst_left = indir;
1020 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1022 offset = ALIGN_TO (offset, 8);
1023 cfg->arch.float_spill_slot_offset = offset;
1026 * spillvars are stored between the normal locals and the storage reserved
1030 cfg->stack_offset = offset;
1036 mono_arch_create_vars (MonoCompile *cfg)
1038 MonoMethodSignature *sig;
1040 sig = mono_method_signature (cfg->method);
1042 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1043 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1044 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1045 printf ("vret_addr = ");
1046 mono_print_ins (cfg->vret_addr);
1052 make_group (MonoCompile *cfg, MonoInst *left, int basereg, int offset)
1056 MONO_INST_NEW (cfg, group, OP_GROUP);
1057 group->inst_left = left;
1058 group->inst_basereg = basereg;
1059 group->inst_imm = offset;
1065 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1068 MonoMethodSignature *tmp_sig;
1072 * mono_ArgIterator_Setup assumes the signature cookie is
1073 * passed first and all the arguments which were before it are
1074 * passed on the stack after the signature. So compensate by
1075 * passing a different signature.
1077 tmp_sig = mono_metadata_signature_dup (call->signature);
1078 tmp_sig->param_count -= call->signature->sentinelpos;
1079 tmp_sig->sentinelpos = 0;
1080 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1082 /* FIXME: Add support for signature tokens to AOT */
1083 cfg->disable_aot = TRUE;
1084 /* We allways pass the signature on the stack for simplicity */
1085 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
1086 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset);
1087 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1088 sig_arg->inst_p0 = tmp_sig;
1089 arg->inst_left = sig_arg;
1090 arg->type = STACK_PTR;
1091 /* prepend, so they get reversed */
1092 arg->next = call->out_args;
1093 call->out_args = arg;
1097 * take the arguments and generate the arch-specific
1098 * instructions to properly call the function in call.
1099 * This includes pushing, moving arguments to the right register
1103 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
1105 MonoMethodSignature *sig;
1109 guint32 extra_space = 0;
1111 sig = call->signature;
1112 n = sig->param_count + sig->hasthis;
1114 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1116 for (i = 0; i < n; ++i) {
1117 ainfo = cinfo->args + i;
1119 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1120 /* Emit the signature cookie just before the first implicit argument */
1121 emit_sig_cookie (cfg, call, cinfo);
1124 if (is_virtual && i == 0) {
1125 /* the argument will be attached to the call instruction */
1126 in = call->args [i];
1128 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1129 in = call->args [i];
1130 arg->cil_code = in->cil_code;
1131 arg->inst_left = in;
1132 arg->type = in->type;
1133 /* prepend, we'll need to reverse them later */
1134 arg->next = call->out_args;
1135 call->out_args = arg;
1137 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1140 guint32 offset, pad;
1148 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1149 size = sizeof (MonoTypedRef);
1150 align = sizeof (gpointer);
1154 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1157 * Can't use mini_type_stack_size (), but that
1158 * aligns the size to sizeof (gpointer), which is larger
1159 * than the size of the source, leading to reads of invalid
1160 * memory if the source is at the end of address space or
1163 size = mono_class_value_size (in->klass, &align);
1167 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1168 * use the normal OUTARG opcodes to pass the address of the location to
1171 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1172 inst->inst_left = in;
1174 /* The first 6 argument locations are reserved */
1175 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1176 cinfo->stack_usage = 6 * sizeof (gpointer);
1178 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1179 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1181 inst->inst_c1 = STACK_BIAS + offset;
1182 inst->backend.size = size;
1183 arg->inst_left = inst;
1185 cinfo->stack_usage += size;
1186 cinfo->stack_usage += pad;
1189 arg->inst_right = make_group (cfg, (MonoInst*)call, sparc_sp, ARGS_OFFSET + ainfo->offset);
1191 switch (ainfo->storage) {
1195 if (ainfo->storage == ArgInIRegPair)
1196 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1197 arg->backend.reg3 = sparc_o0 + ainfo->reg;
1198 call->used_iregs |= 1 << ainfo->reg;
1200 if ((i >= sig->hasthis) && !sig->params [i - sig->hasthis]->byref && ((sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) || (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4))) {
1201 /* An fp value is passed in an ireg */
1203 if (arg->opcode == OP_SPARC_OUTARG_REGPAIR)
1204 arg->opcode = OP_SPARC_OUTARG_REGPAIR_FLOAT;
1206 arg->opcode = OP_SPARC_OUTARG_FLOAT;
1209 * The OUTARG (freg) implementation needs an extra dword to store
1210 * the temporary value.
1216 arg->opcode = OP_SPARC_OUTARG_MEM;
1218 case ArgOnStackPair:
1219 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1221 case ArgInSplitRegStack:
1222 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1223 arg->backend.reg3 = sparc_o0 + ainfo->reg;
1224 call->used_iregs |= 1 << ainfo->reg;
1227 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1228 arg->backend.reg3 = sparc_f0 + ainfo->reg;
1230 case ArgInDoubleReg:
1231 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1232 arg->backend.reg3 = sparc_f0 + ainfo->reg;
1240 /* Handle the case where there are no implicit arguments */
1241 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1242 emit_sig_cookie (cfg, call, cinfo);
1246 * Reverse the call->out_args list.
1249 MonoInst *prev = NULL, *list = call->out_args, *next;
1256 call->out_args = prev;
1258 call->stack_usage = cinfo->stack_usage + extra_space;
1259 call->out_ireg_args = NULL;
1260 call->out_freg_args = NULL;
1261 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1262 cfg->flags |= MONO_CFG_HAS_CALLS;
1268 /* FIXME: Remove these later */
1269 #define NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { \
1270 MONO_INST_NEW ((cfg), (dest), (op)); \
1271 (dest)->dreg = (dr); \
1272 (dest)->inst_basereg = (base); \
1273 (dest)->inst_offset = (offset); \
1274 (dest)->type = STACK_I4; \
1277 #define EMIT_NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
1279 #undef MONO_EMIT_NEW_STORE_MEMBASE_IMM
1280 #define MONO_EMIT_NEW_STORE_MEMBASE_IMM(cfg,op,base,offset,imm) do { \
1282 MONO_INST_NEW ((cfg), (inst), (op)); \
1283 inst->inst_destbasereg = base; \
1284 inst->inst_offset = offset; \
1285 inst->inst_p1 = (gpointer)(gssize)imm; \
1286 MONO_ADD_INS ((cfg)->cbb, inst); \
1290 add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1294 MONO_INST_NEW (cfg, arg, 0);
1300 arg->opcode = OP_MOVE;
1301 arg->dreg = mono_alloc_ireg (cfg);
1303 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1306 arg->opcode = OP_FMOVE;
1307 arg->dreg = mono_alloc_freg (cfg);
1309 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1312 g_assert_not_reached ();
1315 MONO_ADD_INS (cfg->cbb, arg);
1319 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1322 int dreg = mono_alloc_ireg (cfg);
1324 EMIT_NEW_LOAD_MEMBASE (cfg, arg, OP_LOAD_MEMBASE, dreg, sparc_sp, offset);
1325 MONO_ADD_INS (cfg->cbb, arg);
1327 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1331 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1333 int offset = ARGS_OFFSET + ainfo->offset;
1335 switch (ainfo->storage) {
1337 add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
1338 add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1340 case ArgOnStackPair:
1341 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
1342 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1344 case ArgInSplitRegStack:
1345 add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
1346 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
1349 g_assert_not_reached ();
1354 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1356 int offset = ARGS_OFFSET + ainfo->offset;
1358 switch (ainfo->storage) {
1360 /* floating-point <-> integer transfer must go through memory */
1361 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1363 /* Load into a register pair */
1364 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1365 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1367 case ArgOnStackPair:
1368 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1370 case ArgInSplitRegStack:
1371 /* floating-point <-> integer transfer must go through memory */
1372 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1373 /* Load most significant word into register */
1374 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1377 g_assert_not_reached ();
1382 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1384 int offset = ARGS_OFFSET + ainfo->offset;
1386 switch (ainfo->storage) {
1388 /* floating-point <-> integer transfer must go through memory */
1389 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1390 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1393 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1396 g_assert_not_reached ();
1401 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1404 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1407 guint32 align, offset, pad, size;
1409 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1410 size = sizeof (MonoTypedRef);
1411 align = sizeof (gpointer);
1414 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1417 * Other backends use mono_type_stack_size (), but that
1418 * aligns the size to 8, which is larger than the size of
1419 * the source, leading to reads of invalid memory if the
1420 * source is at the end of address space.
1422 size = mono_class_value_size (in->klass, &align);
1425 /* The first 6 argument locations are reserved */
1426 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1427 cinfo->stack_usage = 6 * sizeof (gpointer);
1429 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1430 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1432 cinfo->stack_usage += size;
1433 cinfo->stack_usage += pad;
1436 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1437 * use the normal OUTARG opcodes to pass the address of the location to
1441 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1442 arg->sreg1 = in->dreg;
1443 arg->klass = in->klass;
1444 arg->backend.size = size;
1445 arg->inst_p0 = call;
1446 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1447 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1448 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1449 MONO_ADD_INS (cfg->cbb, arg);
1451 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1452 arg->dreg = mono_alloc_preg (cfg);
1453 arg->sreg1 = sparc_sp;
1454 arg->inst_imm = STACK_BIAS + offset;
1455 MONO_ADD_INS (cfg->cbb, arg);
1457 emit_pass_other (cfg, call, ainfo, NULL, arg);
1462 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1464 int offset = ARGS_OFFSET + ainfo->offset;
1467 switch (ainfo->storage) {
1469 add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1476 opcode = OP_STOREI1_MEMBASE_REG;
1477 else if (offset & 0x2)
1478 opcode = OP_STOREI2_MEMBASE_REG;
1480 opcode = OP_STOREI4_MEMBASE_REG;
1481 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1485 g_assert_not_reached ();
1490 emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1492 MonoMethodSignature *tmp_sig;
1495 * mono_ArgIterator_Setup assumes the signature cookie is
1496 * passed first and all the arguments which were before it are
1497 * passed on the stack after the signature. So compensate by
1498 * passing a different signature.
1500 tmp_sig = mono_metadata_signature_dup (call->signature);
1501 tmp_sig->param_count -= call->signature->sentinelpos;
1502 tmp_sig->sentinelpos = 0;
1503 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1505 /* FIXME: Add support for signature tokens to AOT */
1506 cfg->disable_aot = TRUE;
1507 /* We allways pass the signature on the stack for simplicity */
1508 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1512 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1515 MonoMethodSignature *sig;
1519 guint32 extra_space = 0;
1521 sig = call->signature;
1522 n = sig->param_count + sig->hasthis;
1524 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1526 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1527 /* Set the 'struct/union return pointer' location on the stack */
1528 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1531 for (i = 0; i < n; ++i) {
1534 ainfo = cinfo->args + i;
1536 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1537 /* Emit the signature cookie just before the first implicit argument */
1538 emit_sig_cookie2 (cfg, call, cinfo);
1541 in = call->args [i];
1543 if (sig->hasthis && (i == 0))
1544 arg_type = &mono_defaults.object_class->byval_arg;
1546 arg_type = sig->params [i - sig->hasthis];
1548 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1549 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1550 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1551 emit_pass_long (cfg, call, ainfo, in);
1552 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1553 emit_pass_double (cfg, call, ainfo, in);
1554 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1555 emit_pass_float (cfg, call, ainfo, in);
1557 emit_pass_other (cfg, call, ainfo, arg_type, in);
1560 /* Handle the case where there are no implicit arguments */
1561 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1562 emit_sig_cookie2 (cfg, call, cinfo);
1565 call->stack_usage = cinfo->stack_usage + extra_space;
1571 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1573 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1574 int size = ins->backend.size;
1576 mini_emit_memcpy2 (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
1580 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1582 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1584 switch (cinfo->ret.storage) {
1586 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1589 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg + 2);
1590 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
1593 if (mono_method_signature (method)->ret->type == MONO_TYPE_R4)
1594 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1596 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1599 g_assert_not_reached ();
1605 int cond_to_sparc_cond [][3] = {
1606 {sparc_be, sparc_be, sparc_fbe},
1607 {sparc_bne, sparc_bne, 0},
1608 {sparc_ble, sparc_ble, sparc_fble},
1609 {sparc_bge, sparc_bge, sparc_fbge},
1610 {sparc_bl, sparc_bl, sparc_fbl},
1611 {sparc_bg, sparc_bg, sparc_fbg},
1612 {sparc_bleu, sparc_bleu, 0},
1613 {sparc_beu, sparc_beu, 0},
1614 {sparc_blu, sparc_blu, sparc_fbl},
1615 {sparc_bgu, sparc_bgu, sparc_fbg}
1618 /* Map opcode to the sparc condition codes */
1619 static inline SparcCond
1620 opcode_to_sparc_cond (int opcode)
1626 case OP_COND_EXC_OV:
1627 case OP_COND_EXC_IOV:
1630 case OP_COND_EXC_IC:
1632 case OP_COND_EXC_NO:
1633 case OP_COND_EXC_NC:
1636 rel = mono_opcode_to_cond (opcode);
1637 t = mono_opcode_to_type (opcode, -1);
1639 return cond_to_sparc_cond [rel][t];
1646 #define COMPUTE_DISP(ins) \
1647 if (ins->flags & MONO_INST_BRLABEL) { \
1648 if (ins->inst_i0->inst_c0) \
1649 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1652 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1655 if (ins->inst_true_bb->native_offset) \
1656 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1659 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1664 #define DEFAULT_ICC sparc_xcc_short
1666 #define DEFAULT_ICC sparc_icc_short
1670 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1674 COMPUTE_DISP(ins); \
1675 predict = (disp != 0) ? 1 : 0; \
1676 g_assert (sparc_is_imm19 (disp)); \
1677 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1678 if (filldelay) sparc_nop (code); \
1680 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1681 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1685 COMPUTE_DISP(ins); \
1686 predict = (disp != 0) ? 1 : 0; \
1687 g_assert (sparc_is_imm19 (disp)); \
1688 sparc_fbranch (code, (annul), cond, disp); \
1689 if (filldelay) sparc_nop (code); \
1692 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1693 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1696 COMPUTE_DISP(ins); \
1697 g_assert (sparc_is_imm22 (disp)); \
1698 sparc_ ## bop (code, (annul), cond, disp); \
1699 if (filldelay) sparc_nop (code); \
1701 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1702 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1705 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1709 COMPUTE_DISP(ins); \
1710 predict = (disp != 0) ? 1 : 0; \
1711 g_assert (sparc_is_imm19 (disp)); \
1712 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1713 if (filldelay) sparc_nop (code); \
1716 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1719 COMPUTE_DISP(ins); \
1720 g_assert (sparc_is_imm22 (disp)); \
1721 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1722 if (filldelay) sparc_nop (code); \
1725 /* emit an exception if condition is fail */
1727 * We put the exception throwing code out-of-line, at the end of the method
1729 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1730 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1731 MONO_PATCH_INFO_EXC, sexc_name); \
1732 if (sparcv9 && ((icc) != sparc_icc_short)) { \
1733 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1736 sparc_branch (code, 0, cond, 0); \
1738 if (filldelay) sparc_nop (code); \
1741 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1743 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1744 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1745 MONO_PATCH_INFO_EXC, sexc_name); \
1746 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1750 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1751 if (sparc_is_imm13 ((ins)->inst_imm)) \
1752 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1754 sparc_set (code, ins->inst_imm, sparc_o7); \
1755 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1759 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1760 if (sparc_is_imm13 (ins->inst_offset)) \
1761 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1763 sparc_set (code, ins->inst_offset, sparc_o7); \
1764 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1769 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1771 if (ins->inst_imm == 0) \
1774 sparc_set (code, ins->inst_imm, sparc_o7); \
1777 if (!sparc_is_imm13 (ins->inst_offset)) { \
1778 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1779 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1782 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1785 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1786 if (!sparc_is_imm13 (ins->inst_offset)) { \
1787 sparc_set (code, ins->inst_offset, sparc_o7); \
1788 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1791 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1794 #define EMIT_CALL() do { \
1796 sparc_set_template (code, sparc_o7); \
1797 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1800 sparc_call_simple (code, 0); \
1806 * A call template is 7 instructions long, so we want to avoid it if possible.
1809 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1813 /* FIXME: This only works if the target method is already compiled */
1814 if (0 && v64 && !cfg->compile_aot) {
1815 MonoJumpInfo patch_info;
1817 patch_info.type = patch_type;
1818 patch_info.data.target = data;
1820 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1822 /* FIXME: Add optimizations if the target is close enough */
1823 sparc_set (code, target, sparc_o7);
1824 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1828 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1836 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1841 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1843 MonoInst *ins, *n, *last_ins = NULL;
1846 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1847 switch (ins->opcode) {
1849 /* remove unnecessary multiplication with 1 */
1850 if (ins->inst_imm == 1) {
1851 if (ins->dreg != ins->sreg1) {
1852 ins->opcode = OP_MOVE;
1854 MONO_DELETE_INS (bb, ins);
1860 case OP_LOAD_MEMBASE:
1861 case OP_LOADI4_MEMBASE:
1863 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1864 * OP_LOAD_MEMBASE offset(basereg), reg
1866 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1867 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1868 ins->inst_basereg == last_ins->inst_destbasereg &&
1869 ins->inst_offset == last_ins->inst_offset) {
1870 if (ins->dreg == last_ins->sreg1) {
1871 MONO_DELETE_INS (bb, ins);
1874 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1875 ins->opcode = OP_MOVE;
1876 ins->sreg1 = last_ins->sreg1;
1880 * Note: reg1 must be different from the basereg in the second load
1881 * OP_LOAD_MEMBASE offset(basereg), reg1
1882 * OP_LOAD_MEMBASE offset(basereg), reg2
1884 * OP_LOAD_MEMBASE offset(basereg), reg1
1885 * OP_MOVE reg1, reg2
1887 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1888 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1889 ins->inst_basereg != last_ins->dreg &&
1890 ins->inst_basereg == last_ins->inst_basereg &&
1891 ins->inst_offset == last_ins->inst_offset) {
1893 if (ins->dreg == last_ins->dreg) {
1894 MONO_DELETE_INS (bb, ins);
1897 ins->opcode = OP_MOVE;
1898 ins->sreg1 = last_ins->dreg;
1901 //g_assert_not_reached ();
1905 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1906 * OP_LOAD_MEMBASE offset(basereg), reg
1908 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1909 * OP_ICONST reg, imm
1911 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1912 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1913 ins->inst_basereg == last_ins->inst_destbasereg &&
1914 ins->inst_offset == last_ins->inst_offset) {
1915 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1916 ins->opcode = OP_ICONST;
1917 ins->inst_c0 = last_ins->inst_imm;
1918 g_assert_not_reached (); // check this rule
1923 case OP_LOADI1_MEMBASE:
1924 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1925 ins->inst_basereg == last_ins->inst_destbasereg &&
1926 ins->inst_offset == last_ins->inst_offset) {
1927 if (ins->dreg == last_ins->sreg1) {
1928 MONO_DELETE_INS (bb, ins);
1931 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1932 ins->opcode = OP_MOVE;
1933 ins->sreg1 = last_ins->sreg1;
1937 case OP_LOADI2_MEMBASE:
1938 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1939 ins->inst_basereg == last_ins->inst_destbasereg &&
1940 ins->inst_offset == last_ins->inst_offset) {
1941 if (ins->dreg == last_ins->sreg1) {
1942 MONO_DELETE_INS (bb, ins);
1945 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1946 ins->opcode = OP_MOVE;
1947 ins->sreg1 = last_ins->sreg1;
1951 case OP_STOREI4_MEMBASE_IMM:
1952 /* Convert pairs of 0 stores to a dword 0 store */
1953 /* Used when initializing temporaries */
1954 /* We know sparc_fp is dword aligned */
1955 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1956 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1957 (ins->inst_destbasereg == sparc_fp) &&
1958 (ins->inst_offset < 0) &&
1959 ((ins->inst_offset % 8) == 0) &&
1960 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1961 (ins->inst_imm == 0) &&
1962 (last_ins->inst_imm == 0)) {
1964 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1965 last_ins->inst_offset = ins->inst_offset;
1966 MONO_DELETE_INS (bb, ins);
1977 case OP_COND_EXC_EQ:
1978 case OP_COND_EXC_GE:
1979 case OP_COND_EXC_GT:
1980 case OP_COND_EXC_LE:
1981 case OP_COND_EXC_LT:
1982 case OP_COND_EXC_NE_UN:
1984 * Convert compare with zero+branch to BRcc
1987 * This only works in 64 bit mode, since it examines all 64
1988 * bits of the register.
1989 * Only do this if the method is small since BPr only has a 16bit
1992 if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
1993 (last_ins->opcode == OP_COMPARE_IMM) &&
1994 (last_ins->inst_imm == 0)) {
1995 switch (ins->opcode) {
1997 ins->opcode = OP_SPARC_BRZ;
2000 ins->opcode = OP_SPARC_BRNZ;
2003 ins->opcode = OP_SPARC_BRLZ;
2006 ins->opcode = OP_SPARC_BRGZ;
2009 ins->opcode = OP_SPARC_BRGEZ;
2012 ins->opcode = OP_SPARC_BRLEZ;
2014 case OP_COND_EXC_EQ:
2015 ins->opcode = OP_SPARC_COND_EXC_EQZ;
2017 case OP_COND_EXC_GE:
2018 ins->opcode = OP_SPARC_COND_EXC_GEZ;
2020 case OP_COND_EXC_GT:
2021 ins->opcode = OP_SPARC_COND_EXC_GTZ;
2023 case OP_COND_EXC_LE:
2024 ins->opcode = OP_SPARC_COND_EXC_LEZ;
2026 case OP_COND_EXC_LT:
2027 ins->opcode = OP_SPARC_COND_EXC_LTZ;
2029 case OP_COND_EXC_NE_UN:
2030 ins->opcode = OP_SPARC_COND_EXC_NEZ;
2033 g_assert_not_reached ();
2035 ins->sreg1 = last_ins->sreg1;
2037 MONO_DELETE_INS (bb, ins);
2045 if (ins->dreg == ins->sreg1) {
2046 MONO_DELETE_INS (bb, ins);
2050 * OP_MOVE sreg, dreg
2051 * OP_MOVE dreg, sreg
2053 if (last_ins && last_ins->opcode == OP_MOVE &&
2054 ins->sreg1 == last_ins->dreg &&
2055 ins->dreg == last_ins->sreg1) {
2056 MONO_DELETE_INS (bb, ins);
2064 bb->last_ins = last_ins;
2068 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
2072 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
2075 sparc_patch (guint32 *code, const gpointer target)
2078 guint32 ins = *code;
2079 guint32 op = ins >> 30;
2080 guint32 op2 = (ins >> 22) & 0x7;
2081 guint32 rd = (ins >> 25) & 0x1f;
2082 guint8* target8 = (guint8*)target;
2083 gint64 disp = (target8 - (guint8*)code) >> 2;
2086 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2088 if ((op == 0) && (op2 == 2)) {
2089 if (!sparc_is_imm22 (disp))
2092 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2094 else if ((op == 0) && (op2 == 1)) {
2095 if (!sparc_is_imm19 (disp))
2098 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
2100 else if ((op == 0) && (op2 == 3)) {
2101 if (!sparc_is_imm16 (disp))
2104 *code &= ~(0x180000 | 0x3fff);
2105 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
2107 else if ((op == 0) && (op2 == 6)) {
2108 if (!sparc_is_imm22 (disp))
2111 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2113 else if ((op == 0) && (op2 == 4)) {
2114 guint32 ins2 = code [1];
2116 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
2117 /* sethi followed by or */
2119 sparc_set (p, target8, rd);
2120 while (p <= (code + 1))
2123 else if (ins2 == 0x01000000) {
2124 /* sethi followed by nop */
2126 sparc_set (p, target8, rd);
2127 while (p <= (code + 1))
2130 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
2131 /* sethi followed by load/store */
2133 guint32 t = (guint32)target8;
2134 *code &= ~(0x3fffff);
2136 *(code + 1) &= ~(0x3ff);
2137 *(code + 1) |= (t & 0x3ff);
2141 (sparc_inst_rd (ins) == sparc_g1) &&
2142 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
2143 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
2144 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
2148 reg = sparc_inst_rd (c [1]);
2149 sparc_set (p, target8, reg);
2153 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
2154 (sparc_inst_imm (ins2))) {
2155 /* sethi followed by jmpl */
2157 guint32 t = (guint32)target8;
2158 *code &= ~(0x3fffff);
2160 *(code + 1) &= ~(0x3ff);
2161 *(code + 1) |= (t & 0x3ff);
2167 else if (op == 01) {
2168 gint64 disp = (target8 - (guint8*)code) >> 2;
2170 if (!sparc_is_imm30 (disp))
2172 sparc_call_simple (code, target8 - (guint8*)code);
2174 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
2176 g_assert (sparc_is_imm13 (target8));
2178 *code |= (guint32)target8;
2180 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
2181 /* sparc_set case 5. */
2185 reg = sparc_inst_rd (c [3]);
2186 sparc_set (p, target, reg);
2193 // g_print ("patched with 0x%08x\n", ins);
2197 * mono_sparc_emit_save_lmf:
2199 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
2200 * trampolines as well.
2203 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2206 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2207 /* Save previous_lmf */
2208 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
2209 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2211 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2212 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
2218 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2220 /* Load previous_lmf */
2221 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2223 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2224 /* *(lmf) = previous_lmf */
2225 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2230 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2233 * Since register windows are saved to the current value of %sp, we need to
2234 * set the sp field in the lmf before the call, not in the prolog.
2236 if (cfg->method->save_lmf) {
2237 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2240 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2247 emit_vret_token (MonoGenericSharingContext *gsctx, MonoInst *ins, guint32 *code)
2249 MonoCallInst *call = (MonoCallInst*)ins;
2253 * The sparc ABI requires that calls to functions which return a structure
2254 * contain an additional unimpl instruction which is checked by the callee.
2256 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2257 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2258 size = mini_type_stack_size (gsctx, call->signature->ret, NULL);
2260 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2261 sparc_unimp (code, size & 0xfff);
2268 emit_move_return_value (MonoInst *ins, guint32 *code)
2270 /* Move return value to the target register */
2271 /* FIXME: do more things in the local reg allocator */
2272 switch (ins->opcode) {
2274 case OP_VOIDCALL_REG:
2275 case OP_VOIDCALL_MEMBASE:
2279 case OP_CALL_MEMBASE:
2280 g_assert (ins->dreg == sparc_o0);
2284 case OP_LCALL_MEMBASE:
2286 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2287 * in inssel-long32.brg.
2290 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2292 g_assert (ins->dreg == sparc_o1);
2297 case OP_FCALL_MEMBASE:
2299 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2300 sparc_fmovs (code, sparc_f0, ins->dreg);
2301 sparc_fstod (code, ins->dreg, ins->dreg);
2304 sparc_fmovd (code, sparc_f0, ins->dreg);
2306 sparc_fmovs (code, sparc_f0, ins->dreg);
2307 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2308 sparc_fstod (code, ins->dreg, ins->dreg);
2310 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2315 case OP_VCALL_MEMBASE:
2318 case OP_VCALL2_MEMBASE:
2328 * emit_load_volatile_arguments:
2330 * Load volatile arguments from the stack to the original input registers.
2331 * Required before a tail call.
2334 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2336 MonoMethod *method = cfg->method;
2337 MonoMethodSignature *sig;
2342 /* FIXME: Generate intermediate code instead */
2344 sig = mono_method_signature (method);
2346 cinfo = get_call_info (cfg, sig, FALSE);
2348 /* This is the opposite of the code in emit_prolog */
2350 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2351 ArgInfo *ainfo = cinfo->args + i;
2352 gint32 stack_offset;
2355 inst = cfg->args [i];
2357 if (sig->hasthis && (i == 0))
2358 arg_type = &mono_defaults.object_class->byval_arg;
2360 arg_type = sig->params [i - sig->hasthis];
2362 stack_offset = ainfo->offset + ARGS_OFFSET;
2363 ireg = sparc_i0 + ainfo->reg;
2365 if (ainfo->storage == ArgInSplitRegStack) {
2366 g_assert (inst->opcode == OP_REGOFFSET);
2368 if (!sparc_is_imm13 (stack_offset))
2370 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2373 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2374 if (ainfo->storage == ArgInIRegPair) {
2375 if (!sparc_is_imm13 (inst->inst_offset + 4))
2377 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2378 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2381 if (ainfo->storage == ArgInSplitRegStack) {
2382 if (stack_offset != inst->inst_offset) {
2383 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2384 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2385 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2390 if (ainfo->storage == ArgOnStackPair) {
2391 if (stack_offset != inst->inst_offset) {
2392 /* stack_offset is not dword aligned, so we need to make a copy */
2393 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2394 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2396 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2397 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2402 g_assert_not_reached ();
2405 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2406 /* Argument in register, but need to be saved to stack */
2407 if (!sparc_is_imm13 (stack_offset))
2409 if ((stack_offset - ARGS_OFFSET) & 0x1)
2410 /* FIXME: Is this ldsb or ldub ? */
2411 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2413 if ((stack_offset - ARGS_OFFSET) & 0x2)
2414 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2416 if ((stack_offset - ARGS_OFFSET) & 0x4)
2417 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2420 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2422 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2425 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2426 /* Argument in regpair, but need to be saved to stack */
2427 if (!sparc_is_imm13 (inst->inst_offset + 4))
2429 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2430 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2432 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2435 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2439 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2440 if (inst->opcode == OP_REGVAR)
2441 /* FIXME: Load the argument into memory */
2451 * mono_sparc_is_virtual_call:
2453 * Determine whenever the instruction at CODE is a virtual call.
2456 mono_sparc_is_virtual_call (guint32 *code)
2463 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2465 * Register indirect call. If it is a virtual call, then the
2466 * instruction in the delay slot is a special kind of nop.
2469 /* Construct special nop */
2470 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2473 if (code [1] == p [0])
2481 * mono_arch_get_vcall_slot:
2483 * Determine the vtable slot used by a virtual call.
2486 mono_arch_get_vcall_slot (guint8 *code8, gpointer *regs, int *displacement)
2488 guint32 *code = (guint32*)(gpointer)code8;
2489 guint32 ins = code [0];
2490 guint32 prev_ins = code [-1];
2492 mono_sparc_flushw ();
2496 if (!mono_sparc_is_virtual_call (code))
2499 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2500 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 1) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2501 /* ld [r1 + CONST ], r2; call r2 */
2502 guint32 base = sparc_inst_rs1 (prev_ins);
2503 gint32 disp = (((gint32)(sparc_inst_imm13 (prev_ins))) << 19) >> 19;
2506 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2508 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2510 base_val = regs [base];
2512 *displacement = disp;
2514 return (gpointer)base_val;
2516 else if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_i (prev_ins) == 0) && (sparc_inst_op3 (prev_ins) == 0)) {
2517 /* set r1, ICONST; ld [r1 + r2], r2; call r2 */
2518 /* Decode a sparc_set32 */
2519 guint32 base = sparc_inst_rs1 (prev_ins);
2522 guint32 s1 = code [-3];
2523 guint32 s2 = code [-2];
2530 g_assert (sparc_inst_op (s1) == 0);
2531 g_assert (sparc_inst_op2 (s1) == 4);
2534 g_assert (sparc_inst_op (s2) == 2);
2535 g_assert (sparc_inst_op3 (s2) == 2);
2536 g_assert (sparc_inst_i (s2) == 1);
2537 g_assert (sparc_inst_rs1 (s2) == sparc_inst_rd (s2));
2538 g_assert (sparc_inst_rd (s1) == sparc_inst_rs1 (s2));
2540 disp = ((s1 & 0x3fffff) << 10) | sparc_inst_imm13 (s2);
2542 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2544 base_val = regs [base];
2546 *displacement = disp;
2548 return (gpointer)base_val;
2550 g_assert_not_reached ();
2553 g_assert_not_reached ();
2559 mono_arch_get_vcall_slot_addr (guint8 *code, gpointer *regs)
2563 vt = mono_arch_get_vcall_slot (code, regs, &displacement);
2566 return (gpointer*)((char*)vt + displacement);
2570 #define BR_SMALL_SIZE 2
2571 #define BR_LARGE_SIZE 2
2572 #define JUMP_IMM_SIZE 5
2573 #define ENABLE_WRONG_METHOD_CHECK 0
2576 * LOCKING: called with the domain lock held
2579 mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count)
2583 guint32 *code, *start;
2585 for (i = 0; i < count; ++i) {
2586 MonoIMTCheckItem *item = imt_entries [i];
2587 if (item->is_equals) {
2588 if (item->check_target_idx) {
2589 if (!item->compare_done)
2590 item->chunk_size += CMP_SIZE;
2591 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2593 item->chunk_size += JUMP_IMM_SIZE;
2594 #if ENABLE_WRONG_METHOD_CHECK
2595 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2599 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2600 imt_entries [item->check_target_idx]->compare_done = TRUE;
2602 size += item->chunk_size;
2604 code = mono_code_manager_reserve (domain->code_mp, size * 4);
2607 for (i = 0; i < count; ++i) {
2608 MonoIMTCheckItem *item = imt_entries [i];
2609 item->code_target = (guint8*)code;
2610 if (item->is_equals) {
2611 if (item->check_target_idx) {
2612 if (!item->compare_done) {
2613 sparc_set (code, (guint32)item->method, sparc_g5);
2614 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2616 item->jmp_code = (guint8*)code;
2617 sparc_branch (code, 0, sparc_bne, 0);
2619 sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
2620 sparc_ld (code, sparc_g5, 0, sparc_g5);
2621 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2624 /* enable the commented code to assert on wrong method */
2625 #if ENABLE_WRONG_METHOD_CHECK
2626 g_assert_not_reached ();
2628 sparc_set (code, ((guint32)(&(vtable->vtable [item->vtable_slot]))), sparc_g5);
2629 sparc_ld (code, sparc_g5, 0, sparc_g5);
2630 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2632 #if ENABLE_WRONG_METHOD_CHECK
2633 g_assert_not_reached ();
2637 sparc_set (code, (guint32)item->method, sparc_g5);
2638 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2639 item->jmp_code = (guint8*)code;
2640 sparc_branch (code, 0, sparc_beu, 0);
2644 /* patch the branches to get to the target items */
2645 for (i = 0; i < count; ++i) {
2646 MonoIMTCheckItem *item = imt_entries [i];
2647 if (item->jmp_code) {
2648 if (item->check_target_idx) {
2649 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2654 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2656 mono_stats.imt_thunks_size += (code - start) * 4;
2657 g_assert (code - start <= size);
2662 mono_arch_find_imt_method (gpointer *regs, guint8 *code)
2665 g_assert_not_reached ();
2668 return (MonoMethod*)regs [sparc_g1];
2672 mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
2674 mono_sparc_flushw ();
2676 return (gpointer)regs [sparc_o0];
2680 * Some conventions used in the following code.
2681 * 2) The only scratch registers we have are o7 and g1. We try to
2682 * stick to o7 when we can, and use g1 when necessary.
2686 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2691 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2692 MonoInst *last_ins = NULL;
2696 if (cfg->verbose_level > 2)
2697 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2699 cpos = bb->max_offset;
2701 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2705 MONO_BB_FOR_EACH_INS (bb, ins) {
2708 offset = (guint8*)code - cfg->native_code;
2710 spec = ins_get_spec (ins->opcode);
2712 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2714 if (offset > (cfg->code_size - max_len - 16)) {
2715 cfg->code_size *= 2;
2716 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2717 code = (guint32*)(cfg->native_code + offset);
2719 code_start = (guint8*)code;
2720 // if (ins->cil_code)
2721 // g_print ("cil code\n");
2722 mono_debug_record_line_number (cfg, ins, offset);
2724 switch (ins->opcode) {
2725 case OP_STOREI1_MEMBASE_IMM:
2726 EMIT_STORE_MEMBASE_IMM (ins, stb);
2728 case OP_STOREI2_MEMBASE_IMM:
2729 EMIT_STORE_MEMBASE_IMM (ins, sth);
2731 case OP_STORE_MEMBASE_IMM:
2732 EMIT_STORE_MEMBASE_IMM (ins, sti);
2734 case OP_STOREI4_MEMBASE_IMM:
2735 EMIT_STORE_MEMBASE_IMM (ins, st);
2737 case OP_STOREI8_MEMBASE_IMM:
2739 EMIT_STORE_MEMBASE_IMM (ins, stx);
2741 /* Only generated by peephole opts */
2742 g_assert ((ins->inst_offset % 8) == 0);
2743 g_assert (ins->inst_imm == 0);
2744 EMIT_STORE_MEMBASE_IMM (ins, stx);
2747 case OP_STOREI1_MEMBASE_REG:
2748 EMIT_STORE_MEMBASE_REG (ins, stb);
2750 case OP_STOREI2_MEMBASE_REG:
2751 EMIT_STORE_MEMBASE_REG (ins, sth);
2753 case OP_STOREI4_MEMBASE_REG:
2754 EMIT_STORE_MEMBASE_REG (ins, st);
2756 case OP_STOREI8_MEMBASE_REG:
2758 EMIT_STORE_MEMBASE_REG (ins, stx);
2760 /* Only used by OP_MEMSET */
2761 EMIT_STORE_MEMBASE_REG (ins, std);
2764 case OP_STORE_MEMBASE_REG:
2765 EMIT_STORE_MEMBASE_REG (ins, sti);
2768 sparc_set (code, ins->inst_c0, ins->dreg);
2769 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2771 case OP_LOADI4_MEMBASE:
2773 EMIT_LOAD_MEMBASE (ins, ldsw);
2775 EMIT_LOAD_MEMBASE (ins, ld);
2778 case OP_LOADU4_MEMBASE:
2779 EMIT_LOAD_MEMBASE (ins, ld);
2781 case OP_LOADU1_MEMBASE:
2782 EMIT_LOAD_MEMBASE (ins, ldub);
2784 case OP_LOADI1_MEMBASE:
2785 EMIT_LOAD_MEMBASE (ins, ldsb);
2787 case OP_LOADU2_MEMBASE:
2788 EMIT_LOAD_MEMBASE (ins, lduh);
2790 case OP_LOADI2_MEMBASE:
2791 EMIT_LOAD_MEMBASE (ins, ldsh);
2793 case OP_LOAD_MEMBASE:
2795 EMIT_LOAD_MEMBASE (ins, ldx);
2797 EMIT_LOAD_MEMBASE (ins, ld);
2801 case OP_LOADI8_MEMBASE:
2802 EMIT_LOAD_MEMBASE (ins, ldx);
2805 case OP_ICONV_TO_I1:
2806 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2807 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2809 case OP_ICONV_TO_I2:
2810 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2811 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2813 case OP_ICONV_TO_U1:
2814 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2816 case OP_ICONV_TO_U2:
2817 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2818 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2820 case OP_LCONV_TO_OVF_U4:
2821 case OP_ICONV_TO_OVF_U4:
2822 /* Only used on V9 */
2823 sparc_cmp_imm (code, ins->sreg1, 0);
2824 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2825 MONO_PATCH_INFO_EXC, "OverflowException");
2826 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2828 sparc_set (code, 1, sparc_o7);
2829 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2830 sparc_cmp (code, ins->sreg1, sparc_o7);
2831 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2832 MONO_PATCH_INFO_EXC, "OverflowException");
2833 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2835 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2837 case OP_LCONV_TO_OVF_I4_UN:
2838 case OP_ICONV_TO_OVF_I4_UN:
2839 /* Only used on V9 */
2845 sparc_cmp (code, ins->sreg1, ins->sreg2);
2847 case OP_COMPARE_IMM:
2848 case OP_ICOMPARE_IMM:
2849 if (sparc_is_imm13 (ins->inst_imm))
2850 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2852 sparc_set (code, ins->inst_imm, sparc_o7);
2853 sparc_cmp (code, ins->sreg1, sparc_o7);
2858 * gdb does not like encountering 'ta 1' in the debugged code. So
2859 * instead of emitting a trap, we emit a call a C function and place a
2862 //sparc_ta (code, 1);
2863 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2868 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2871 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2876 /* according to inssel-long32.brg, this should set cc */
2877 EMIT_ALU_IMM (ins, add, TRUE);
2881 /* according to inssel-long32.brg, this should set cc */
2882 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2886 EMIT_ALU_IMM (ins, addx, TRUE);
2890 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2893 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2898 /* according to inssel-long32.brg, this should set cc */
2899 EMIT_ALU_IMM (ins, sub, TRUE);
2903 /* according to inssel-long32.brg, this should set cc */
2904 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2908 EMIT_ALU_IMM (ins, subx, TRUE);
2911 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2915 EMIT_ALU_IMM (ins, and, FALSE);
2918 /* Sign extend sreg1 into %y */
2919 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2920 sparc_wry (code, sparc_o7, sparc_g0);
2921 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2922 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2925 sparc_wry (code, sparc_g0, sparc_g0);
2926 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2932 /* Transform division into a shift */
2933 for (i = 1; i < 30; ++i) {
2935 if (ins->inst_imm == imm)
2941 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2942 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2943 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2946 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2947 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2948 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2949 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2950 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2954 /* Sign extend sreg1 into %y */
2955 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2956 sparc_wry (code, sparc_o7, sparc_g0);
2957 EMIT_ALU_IMM (ins, sdiv, TRUE);
2958 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2962 case OP_IDIV_UN_IMM:
2963 sparc_wry (code, sparc_g0, sparc_g0);
2964 EMIT_ALU_IMM (ins, udiv, FALSE);
2967 /* Sign extend sreg1 into %y */
2968 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2969 sparc_wry (code, sparc_o7, sparc_g0);
2970 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2971 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2972 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2973 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2976 sparc_wry (code, sparc_g0, sparc_g0);
2977 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2978 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2979 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2983 /* Sign extend sreg1 into %y */
2984 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2985 sparc_wry (code, sparc_o7, sparc_g0);
2986 if (!sparc_is_imm13 (ins->inst_imm)) {
2987 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2988 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2989 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2990 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2993 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2994 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2995 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2997 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3000 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3004 EMIT_ALU_IMM (ins, or, FALSE);
3007 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3011 EMIT_ALU_IMM (ins, xor, FALSE);
3014 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
3018 if (ins->inst_imm < (1 << 5))
3019 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3021 sparc_set (code, ins->inst_imm, sparc_o7);
3022 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
3026 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
3030 if (ins->inst_imm < (1 << 5))
3031 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3033 sparc_set (code, ins->inst_imm, sparc_o7);
3034 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
3038 case OP_ISHR_UN_IMM:
3039 if (ins->inst_imm < (1 << 5))
3040 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3042 sparc_set (code, ins->inst_imm, sparc_o7);
3043 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
3047 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
3050 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
3053 if (ins->inst_imm < (1 << 6))
3054 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3056 sparc_set (code, ins->inst_imm, sparc_o7);
3057 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
3061 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
3064 if (ins->inst_imm < (1 << 6))
3065 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3067 sparc_set (code, ins->inst_imm, sparc_o7);
3068 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
3072 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
3074 case OP_LSHR_UN_IMM:
3075 if (ins->inst_imm < (1 << 6))
3076 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3078 sparc_set (code, ins->inst_imm, sparc_o7);
3079 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
3083 /* can't use sparc_not */
3084 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
3087 /* can't use sparc_neg */
3088 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
3091 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3097 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
3100 /* Transform multiplication into a shift */
3101 for (i = 0; i < 30; ++i) {
3103 if (ins->inst_imm == imm)
3107 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
3109 EMIT_ALU_IMM (ins, smul, FALSE);
3113 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3114 sparc_rdy (code, sparc_g1);
3115 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
3116 sparc_cmp (code, sparc_g1, sparc_o7);
3117 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3119 case OP_IMUL_OVF_UN:
3120 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3121 sparc_rdy (code, sparc_o7);
3122 sparc_cmp (code, sparc_o7, sparc_g0);
3123 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3126 sparc_set (code, ins->inst_c0, ins->dreg);
3129 sparc_set (code, ins->inst_l, ins->dreg);
3132 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3133 sparc_set_template (code, ins->dreg);
3136 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3137 sparc_set_template (code, ins->dreg);
3139 case OP_ICONV_TO_I4:
3140 case OP_ICONV_TO_U4:
3142 if (ins->sreg1 != ins->dreg)
3143 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3147 if (ins->sreg1 != ins->dreg)
3148 sparc_fmovd (code, ins->sreg1, ins->dreg);
3150 sparc_fmovs (code, ins->sreg1, ins->dreg);
3151 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3154 case OP_SPARC_SETFREG_FLOAT:
3155 /* Only used on V9 */
3156 sparc_fdtos (code, ins->sreg1, ins->dreg);
3159 if (cfg->method->save_lmf)
3162 code = emit_load_volatile_arguments (cfg, code);
3163 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3164 sparc_set_template (code, sparc_o7);
3165 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
3166 /* Restore parent frame in delay slot */
3167 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3170 /* ensure ins->sreg1 is not NULL */
3171 /* Might be misaligned in case of vtypes so use a byte load */
3172 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
3175 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
3176 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
3184 call = (MonoCallInst*)ins;
3185 g_assert (!call->virtual);
3186 code = emit_save_sp_to_lmf (cfg, code);
3187 if (ins->flags & MONO_INST_HAS_METHOD)
3188 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3190 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3192 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
3193 code = emit_move_return_value (ins, code);
3199 case OP_VOIDCALL_REG:
3201 call = (MonoCallInst*)ins;
3202 code = emit_save_sp_to_lmf (cfg, code);
3203 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
3205 * We emit a special kind of nop in the delay slot to tell the
3206 * trampoline code that this is a virtual call, thus an unbox
3207 * trampoline might need to be called.
3210 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3214 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
3215 code = emit_move_return_value (ins, code);
3217 case OP_FCALL_MEMBASE:
3218 case OP_LCALL_MEMBASE:
3219 case OP_VCALL_MEMBASE:
3220 case OP_VCALL2_MEMBASE:
3221 case OP_VOIDCALL_MEMBASE:
3222 case OP_CALL_MEMBASE:
3223 call = (MonoCallInst*)ins;
3224 code = emit_save_sp_to_lmf (cfg, code);
3225 if (sparc_is_imm13 (ins->inst_offset)) {
3226 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
3228 sparc_set (code, ins->inst_offset, sparc_o7);
3229 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
3231 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
3233 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3237 code = emit_vret_token (cfg->generic_sharing_context, ins, code);
3238 code = emit_move_return_value (ins, code);
3241 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
3242 sparc_fdtos (code, ins->sreg1, sparc_f0);
3245 sparc_fmovd (code, ins->sreg1, ins->dreg);
3247 /* FIXME: Why not use fmovd ? */
3248 sparc_fmovs (code, ins->sreg1, ins->dreg);
3249 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3254 g_assert_not_reached ();
3260 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3261 /* Perform stack touching */
3265 /* Keep alignment */
3266 /* Add 4 to compensate for the rounding of localloc_offset */
3267 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
3268 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
3269 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
3271 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
3273 size_reg = sparc_g4;
3275 size_reg = sparc_g1;
3277 sparc_mov_reg_reg (code, ins->dreg, size_reg);
3280 size_reg = ins->sreg1;
3282 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3283 /* Keep %sp valid at all times */
3284 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3285 /* Round localloc_offset too so the result is at least 8 aligned */
3286 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3287 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3288 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3290 if (ins->flags & MONO_INST_INIT) {
3292 /* Initialize memory region */
3293 sparc_cmp_imm (code, size_reg, 0);
3295 sparc_branch (code, 0, sparc_be, 0);
3297 sparc_set (code, 0, sparc_o7);
3298 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
3302 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3304 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3305 sparc_cmp (code, sparc_o7, size_reg);
3307 sparc_branch (code, 0, sparc_bl, 0);
3308 sparc_patch (br [2], br [1]);
3310 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3311 sparc_patch (br [0], code);
3315 case OP_LOCALLOC_IMM: {
3316 gint32 offset = ins->inst_imm;
3319 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3320 /* Perform stack touching */
3324 /* To compensate for the rounding of localloc_offset */
3325 offset += sizeof (gpointer);
3326 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3327 if (sparc_is_imm13 (offset))
3328 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3330 sparc_set (code, offset, sparc_o7);
3331 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3333 /* Round localloc_offset too so the result is at least 8 aligned */
3334 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3335 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3336 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3337 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3343 while (i < offset) {
3345 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3349 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3355 sparc_set (code, offset, sparc_o7);
3356 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3357 /* beginning of loop */
3360 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3362 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3363 sparc_cmp_imm (code, sparc_o7, 0);
3365 sparc_branch (code, 0, sparc_bne, 0);
3367 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3368 sparc_patch (br [1], br [0]);
3374 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3375 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3376 (gpointer)"mono_arch_throw_exception");
3380 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3381 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3382 (gpointer)"mono_arch_rethrow_exception");
3385 case OP_START_HANDLER: {
3387 * The START_HANDLER instruction marks the beginning of a handler
3388 * block. It is called using a call instruction, so %o7 contains
3389 * the return address. Since the handler executes in the same stack
3390 * frame as the method itself, we can't use save/restore to save
3391 * the return address. Instead, we save it into a dedicated
3394 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3395 if (!sparc_is_imm13 (spvar->inst_offset)) {
3396 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3397 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3400 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3403 case OP_ENDFILTER: {
3404 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3405 if (!sparc_is_imm13 (spvar->inst_offset)) {
3406 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3407 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3410 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3411 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3413 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3416 case OP_ENDFINALLY: {
3417 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3418 if (!sparc_is_imm13 (spvar->inst_offset)) {
3419 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3420 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3423 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3424 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3428 case OP_CALL_HANDLER:
3429 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3430 /* This is a jump inside the method, so call_simple works even on V9 */
3431 sparc_call_simple (code, 0);
3435 ins->inst_c0 = (guint8*)code - cfg->native_code;
3439 case OP_DUMMY_STORE:
3440 case OP_NOT_REACHED:
3444 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3445 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3447 if (ins->flags & MONO_INST_BRLABEL) {
3448 if (ins->inst_i0->inst_c0) {
3449 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3450 g_assert (sparc_is_imm22 (disp));
3451 sparc_branch (code, 1, sparc_ba, disp);
3453 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3454 sparc_branch (code, 1, sparc_ba, 0);
3457 if (ins->inst_target_bb->native_offset) {
3458 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3459 g_assert (sparc_is_imm22 (disp));
3460 sparc_branch (code, 1, sparc_ba, disp);
3462 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3463 sparc_branch (code, 1, sparc_ba, 0);
3469 sparc_jmp (code, ins->sreg1, sparc_g0);
3477 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3478 sparc_clr_reg (code, ins->dreg);
3479 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3482 sparc_clr_reg (code, ins->dreg);
3484 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3486 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3489 sparc_set (code, 1, ins->dreg);
3497 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3498 sparc_clr_reg (code, ins->dreg);
3499 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3502 sparc_clr_reg (code, ins->dreg);
3503 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3505 sparc_set (code, 1, ins->dreg);
3508 case OP_COND_EXC_EQ:
3509 case OP_COND_EXC_NE_UN:
3510 case OP_COND_EXC_LT:
3511 case OP_COND_EXC_LT_UN:
3512 case OP_COND_EXC_GT:
3513 case OP_COND_EXC_GT_UN:
3514 case OP_COND_EXC_GE:
3515 case OP_COND_EXC_GE_UN:
3516 case OP_COND_EXC_LE:
3517 case OP_COND_EXC_LE_UN:
3518 case OP_COND_EXC_OV:
3519 case OP_COND_EXC_NO:
3521 case OP_COND_EXC_NC:
3522 case OP_COND_EXC_IEQ:
3523 case OP_COND_EXC_INE_UN:
3524 case OP_COND_EXC_ILT:
3525 case OP_COND_EXC_ILT_UN:
3526 case OP_COND_EXC_IGT:
3527 case OP_COND_EXC_IGT_UN:
3528 case OP_COND_EXC_IGE:
3529 case OP_COND_EXC_IGE_UN:
3530 case OP_COND_EXC_ILE:
3531 case OP_COND_EXC_ILE_UN:
3532 case OP_COND_EXC_IOV:
3533 case OP_COND_EXC_INO:
3534 case OP_COND_EXC_IC:
3535 case OP_COND_EXC_INC:
3539 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3542 case OP_SPARC_COND_EXC_EQZ:
3543 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3545 case OP_SPARC_COND_EXC_GEZ:
3546 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3548 case OP_SPARC_COND_EXC_GTZ:
3549 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3551 case OP_SPARC_COND_EXC_LEZ:
3552 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3554 case OP_SPARC_COND_EXC_LTZ:
3555 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3557 case OP_SPARC_COND_EXC_NEZ:
3558 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3572 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3574 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3579 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3581 case OP_SPARC_BRLEZ:
3582 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3585 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3588 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3591 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3593 case OP_SPARC_BRGEZ:
3594 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3597 /* floating point opcodes */
3599 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3601 sparc_set_template (code, sparc_o7);
3603 sparc_sethi (code, 0, sparc_o7);
3605 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3608 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3610 sparc_set_template (code, sparc_o7);
3612 sparc_sethi (code, 0, sparc_o7);
3614 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3616 /* Extend to double */
3617 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3619 case OP_STORER8_MEMBASE_REG:
3620 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3621 sparc_set (code, ins->inst_offset, sparc_o7);
3622 /* SPARCV9 handles misaligned fp loads/stores */
3623 if (!v64 && (ins->inst_offset % 8)) {
3625 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3626 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3627 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3629 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3632 if (!v64 && (ins->inst_offset % 8)) {
3634 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3635 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3637 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3640 case OP_LOADR8_MEMBASE:
3641 EMIT_LOAD_MEMBASE (ins, lddf);
3643 case OP_STORER4_MEMBASE_REG:
3644 /* This requires a double->single conversion */
3645 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3646 if (!sparc_is_imm13 (ins->inst_offset)) {
3647 sparc_set (code, ins->inst_offset, sparc_o7);
3648 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3651 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3653 case OP_LOADR4_MEMBASE: {
3654 /* ldf needs a single precision register */
3655 int dreg = ins->dreg;
3656 ins->dreg = FP_SCRATCH_REG;
3657 EMIT_LOAD_MEMBASE (ins, ldf);
3659 /* Extend to double */
3660 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3663 case OP_ICONV_TO_R4: {
3664 gint32 offset = cfg->arch.float_spill_slot_offset;
3666 if (!sparc_is_imm13 (offset)) {
3667 sparc_set (code, offset, sparc_o7);
3668 sparc_stx (code, ins->sreg1, sparc_sp, offset);
3669 sparc_lddf (code, sparc_sp, offset, FP_SCRATCH_REG);
3671 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3672 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3674 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3676 if (!sparc_is_imm13 (offset)) {
3677 sparc_set (code, offset, sparc_o7);
3678 sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
3679 sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3681 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3682 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3684 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3686 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3689 case OP_ICONV_TO_R8: {
3690 gint32 offset = cfg->arch.float_spill_slot_offset;
3692 if (!sparc_is_imm13 (offset)) {
3693 sparc_set (code, offset, sparc_o7);
3694 sparc_stx (code, ins->sreg1, sparc_sp, sparc_o7);
3695 sparc_lddf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3697 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3698 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3700 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3702 if (!sparc_is_imm13 (offset)) {
3703 sparc_set (code, offset, sparc_o7);
3704 sparc_st (code, ins->sreg1, sparc_sp, sparc_o7);
3705 sparc_ldf (code, sparc_sp, sparc_o7, FP_SCRATCH_REG);
3707 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3708 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3710 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3714 case OP_FCONV_TO_I1:
3715 case OP_FCONV_TO_U1:
3716 case OP_FCONV_TO_I2:
3717 case OP_FCONV_TO_U2:
3722 case OP_FCONV_TO_I4:
3723 case OP_FCONV_TO_U4: {
3724 gint32 offset = cfg->arch.float_spill_slot_offset;
3725 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3726 if (!sparc_is_imm13 (offset)) {
3727 sparc_set (code, offset, sparc_o7);
3728 sparc_stdf (code, FP_SCRATCH_REG, sparc_sp, sparc_o7);
3729 sparc_ld (code, sparc_sp, sparc_o7, ins->dreg);
3731 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3732 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3735 switch (ins->opcode) {
3736 case OP_FCONV_TO_I1:
3737 case OP_FCONV_TO_U1:
3738 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3740 case OP_FCONV_TO_I2:
3741 case OP_FCONV_TO_U2:
3742 sparc_set (code, 0xffff, sparc_o7);
3743 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3750 case OP_FCONV_TO_I8:
3751 case OP_FCONV_TO_U8:
3753 g_assert_not_reached ();
3755 case OP_FCONV_TO_R4:
3756 /* FIXME: Change precision ? */
3758 sparc_fmovd (code, ins->sreg1, ins->dreg);
3760 sparc_fmovs (code, ins->sreg1, ins->dreg);
3761 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3764 case OP_LCONV_TO_R_UN: {
3766 g_assert_not_reached ();
3769 case OP_LCONV_TO_OVF_I:
3770 case OP_LCONV_TO_OVF_I4_2: {
3771 guint32 *br [3], *label [1];
3774 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3776 sparc_cmp_imm (code, ins->sreg1, 0);
3778 sparc_branch (code, 1, sparc_bneg, 0);
3782 /* ms word must be 0 */
3783 sparc_cmp_imm (code, ins->sreg2, 0);
3785 sparc_branch (code, 1, sparc_be, 0);
3790 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3793 sparc_patch (br [0], code);
3795 /* ms word must 0xfffffff */
3796 sparc_cmp_imm (code, ins->sreg2, -1);
3798 sparc_branch (code, 1, sparc_bne, 0);
3800 sparc_patch (br [2], label [0]);
3803 sparc_patch (br [1], code);
3804 if (ins->sreg1 != ins->dreg)
3805 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3809 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3812 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3815 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3818 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3822 sparc_fnegd (code, ins->sreg1, ins->dreg);
3824 /* FIXME: why don't use fnegd ? */
3825 sparc_fnegs (code, ins->sreg1, ins->dreg);
3829 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3830 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3831 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3834 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3841 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3842 sparc_clr_reg (code, ins->dreg);
3843 switch (ins->opcode) {
3846 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3848 sparc_set (code, 1, ins->dreg);
3849 sparc_fbranch (code, 1, sparc_fbu, 2);
3851 sparc_set (code, 1, ins->dreg);
3854 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3856 sparc_set (code, 1, ins->dreg);
3862 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3865 /* clt.un + brfalse */
3867 sparc_fbranch (code, 1, sparc_fbul, 0);
3870 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3871 sparc_patch (p, (guint8*)code);
3875 /* cgt.un + brfalse */
3877 sparc_fbranch (code, 1, sparc_fbug, 0);
3880 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3881 sparc_patch (p, (guint8*)code);
3885 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3886 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3889 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3890 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3893 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3894 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3897 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3898 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3901 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3902 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3905 gint32 offset = cfg->arch.float_spill_slot_offset;
3906 if (!sparc_is_imm13 (offset)) {
3907 sparc_set (code, offset, sparc_o7);
3908 sparc_stdf (code, ins->sreg1, sparc_sp, sparc_o7);
3909 sparc_lduh (code, sparc_sp, sparc_o7, sparc_o7);
3911 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
3912 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
3914 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3915 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3916 sparc_cmp_imm (code, sparc_o7, 2047);
3917 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3919 sparc_fmovd (code, ins->sreg1, ins->dreg);
3921 sparc_fmovs (code, ins->sreg1, ins->dreg);
3922 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3927 case OP_MEMORY_BARRIER:
3928 sparc_membar (code, sparc_membar_all);
3933 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3935 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3937 g_assert_not_reached ();
3940 if ((((guint8*)code) - code_start) > max_len) {
3941 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3942 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3943 g_assert_not_reached ();
3951 cfg->code_len = (guint8*)code - cfg->native_code;
3955 mono_arch_register_lowlevel_calls (void)
3957 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3961 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3963 MonoJumpInfo *patch_info;
3965 /* FIXME: Move part of this to arch independent code */
3966 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3967 unsigned char *ip = patch_info->ip.i + code;
3970 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3972 switch (patch_info->type) {
3973 case MONO_PATCH_INFO_NONE:
3975 case MONO_PATCH_INFO_CLASS_INIT: {
3976 guint32 *ip2 = (guint32*)ip;
3977 /* Might already been changed to a nop */
3979 sparc_set_template (ip2, sparc_o7);
3980 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
3982 sparc_call_simple (ip2, 0);
3986 case MONO_PATCH_INFO_METHOD_JUMP: {
3987 guint32 *ip2 = (guint32*)ip;
3988 /* Might already been patched */
3989 sparc_set_template (ip2, sparc_o7);
3995 sparc_patch ((guint32*)ip, target);
4000 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4003 guint32 *code = (guint32*)p;
4004 MonoMethodSignature *sig = mono_method_signature (cfg->method);
4007 /* Save registers to stack */
4008 for (i = 0; i < 6; ++i)
4009 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
4011 cinfo = get_call_info (cfg, sig, FALSE);
4013 /* Save float regs on V9, since they are caller saved */
4014 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4015 ArgInfo *ainfo = cinfo->args + i;
4016 gint32 stack_offset;
4018 stack_offset = ainfo->offset + ARGS_OFFSET;
4020 if (ainfo->storage == ArgInFloatReg) {
4021 if (!sparc_is_imm13 (stack_offset))
4023 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4025 else if (ainfo->storage == ArgInDoubleReg) {
4026 /* The offset is guaranteed to be aligned by the ABI rules */
4027 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4031 sparc_set (code, cfg->method, sparc_o0);
4032 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
4034 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
4037 /* Restore float regs on V9 */
4038 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4039 ArgInfo *ainfo = cinfo->args + i;
4040 gint32 stack_offset;
4042 stack_offset = ainfo->offset + ARGS_OFFSET;
4044 if (ainfo->storage == ArgInFloatReg) {
4045 if (!sparc_is_imm13 (stack_offset))
4047 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4049 else if (ainfo->storage == ArgInDoubleReg) {
4050 /* The offset is guaranteed to be aligned by the ABI rules */
4051 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4069 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4071 guint32 *code = (guint32*)p;
4072 int save_mode = SAVE_NONE;
4073 MonoMethod *method = cfg->method;
4075 switch (mono_type_get_underlying_type (mono_method_signature (method)->ret)->type) {
4076 case MONO_TYPE_VOID:
4077 /* special case string .ctor icall */
4078 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
4079 save_mode = SAVE_ONE;
4081 save_mode = SAVE_NONE;
4086 save_mode = SAVE_ONE;
4088 save_mode = SAVE_TWO;
4093 save_mode = SAVE_FP;
4095 case MONO_TYPE_VALUETYPE:
4096 save_mode = SAVE_STRUCT;
4099 save_mode = SAVE_ONE;
4103 /* Save the result to the stack and also put it into the output registers */
4105 switch (save_mode) {
4108 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
4109 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
4110 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4111 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
4114 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
4115 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4119 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
4121 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
4122 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
4123 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
4128 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4130 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
4138 sparc_set (code, cfg->method, sparc_o0);
4140 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
4143 /* Restore result */
4145 switch (save_mode) {
4147 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
4148 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
4151 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
4154 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
4165 mono_arch_emit_prolog (MonoCompile *cfg)
4167 MonoMethod *method = cfg->method;
4168 MonoMethodSignature *sig;
4174 cfg->code_size = 256;
4175 cfg->native_code = g_malloc (cfg->code_size);
4176 code = (guint32*)cfg->native_code;
4178 /* FIXME: Generate intermediate code instead */
4180 offset = cfg->stack_offset;
4181 offset += (16 * sizeof (gpointer)); /* register save area */
4183 offset += 4; /* struct/union return pointer */
4186 /* add parameter area size for called functions */
4187 if (cfg->param_area < (6 * sizeof (gpointer)))
4188 /* Reserve space for the first 6 arguments even if it is unused */
4189 offset += 6 * sizeof (gpointer);
4191 offset += cfg->param_area;
4193 /* align the stack size */
4194 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
4197 * localloc'd memory is stored between the local variables (whose
4198 * size is given by cfg->stack_offset), and between the space reserved
4201 cfg->arch.localloc_offset = offset - cfg->stack_offset;
4203 cfg->stack_offset = offset;
4205 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4206 /* Perform stack touching */
4210 if (!sparc_is_imm13 (- cfg->stack_offset)) {
4211 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4212 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
4213 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
4216 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
4219 if (strstr (cfg->method->name, "foo")) {
4220 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4221 sparc_call_simple (code, 0);
4226 sig = mono_method_signature (method);
4228 cinfo = get_call_info (cfg, sig, FALSE);
4230 /* Keep in sync with emit_load_volatile_arguments */
4231 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4232 ArgInfo *ainfo = cinfo->args + i;
4233 gint32 stack_offset;
4235 inst = cfg->args [i];
4237 if (sig->hasthis && (i == 0))
4238 arg_type = &mono_defaults.object_class->byval_arg;
4240 arg_type = sig->params [i - sig->hasthis];
4242 stack_offset = ainfo->offset + ARGS_OFFSET;
4244 /* Save the split arguments so they will reside entirely on the stack */
4245 if (ainfo->storage == ArgInSplitRegStack) {
4246 /* Save the register to the stack */
4247 g_assert (inst->opcode == OP_REGOFFSET);
4248 if (!sparc_is_imm13 (stack_offset))
4250 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
4253 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
4254 /* Save the argument to a dword aligned stack location */
4256 * stack_offset contains the offset of the argument on the stack.
4257 * inst->inst_offset contains the dword aligned offset where the value
4260 if (ainfo->storage == ArgInIRegPair) {
4261 if (!sparc_is_imm13 (inst->inst_offset + 4))
4263 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4264 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4267 if (ainfo->storage == ArgInSplitRegStack) {
4269 g_assert_not_reached ();
4271 if (stack_offset != inst->inst_offset) {
4272 /* stack_offset is not dword aligned, so we need to make a copy */
4273 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4274 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4275 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4279 if (ainfo->storage == ArgOnStackPair) {
4281 g_assert_not_reached ();
4283 if (stack_offset != inst->inst_offset) {
4284 /* stack_offset is not dword aligned, so we need to make a copy */
4285 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4286 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4287 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4288 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4292 g_assert_not_reached ();
4295 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4296 /* Argument in register, but need to be saved to stack */
4297 if (!sparc_is_imm13 (stack_offset))
4299 if ((stack_offset - ARGS_OFFSET) & 0x1)
4300 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4302 if ((stack_offset - ARGS_OFFSET) & 0x2)
4303 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4305 if ((stack_offset - ARGS_OFFSET) & 0x4)
4306 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4309 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4311 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4315 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4319 /* Argument in regpair, but need to be saved to stack */
4320 if (!sparc_is_imm13 (inst->inst_offset + 4))
4322 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4323 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4325 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4326 if (!sparc_is_imm13 (stack_offset))
4328 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4330 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4331 /* The offset is guaranteed to be aligned by the ABI rules */
4332 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4335 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4336 /* Need to move into the a double precision register */
4337 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4340 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4341 if (inst->opcode == OP_REGVAR)
4342 /* FIXME: Load the argument into memory */
4348 if (cfg->method->save_lmf) {
4349 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4352 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4353 sparc_set_template (code, sparc_o7);
4354 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4356 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4358 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4360 /* FIXME: add a relocation for this */
4361 sparc_set (code, cfg->method, sparc_o7);
4362 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4364 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4365 (gpointer)"mono_arch_get_lmf_addr");
4368 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4371 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4372 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4374 cfg->code_len = (guint8*)code - cfg->native_code;
4376 g_assert (cfg->code_len <= cfg->code_size);
4378 return (guint8*)code;
4382 mono_arch_emit_epilog (MonoCompile *cfg)
4384 MonoMethod *method = cfg->method;
4387 int max_epilog_size = 16 + 20 * 4;
4389 if (cfg->method->save_lmf)
4390 max_epilog_size += 128;
4392 if (mono_jit_trace_calls != NULL)
4393 max_epilog_size += 50;
4395 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4396 max_epilog_size += 50;
4398 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4399 cfg->code_size *= 2;
4400 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4401 mono_jit_stats.code_reallocs++;
4404 code = (guint32*)(cfg->native_code + cfg->code_len);
4406 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4407 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4409 if (cfg->method->save_lmf) {
4410 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4412 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4416 * The V8 ABI requires that calls to functions which return a structure
4419 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4420 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4424 /* Only fold last instruction into the restore if the exit block has an in count of 1
4425 and the previous block hasn't been optimized away since it may have an in count > 1 */
4426 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4431 * FIXME: The last instruction might have a branch pointing into it like in
4432 * int_ceq sparc_i0 <-
4437 /* Try folding last instruction into the restore */
4438 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4439 /* or reg, imm, %i0 */
4440 int reg = sparc_inst_rs1 (code [-2]);
4441 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4442 code [-2] = code [-1];
4444 sparc_restore_imm (code, reg, imm, sparc_o0);
4447 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4448 /* or reg, reg, %i0 */
4449 int reg1 = sparc_inst_rs1 (code [-2]);
4450 int reg2 = sparc_inst_rs2 (code [-2]);
4451 code [-2] = code [-1];
4453 sparc_restore (code, reg1, reg2, sparc_o0);
4456 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4458 cfg->code_len = (guint8*)code - cfg->native_code;
4460 g_assert (cfg->code_len < cfg->code_size);
4465 mono_arch_emit_exceptions (MonoCompile *cfg)
4467 MonoJumpInfo *patch_info;
4472 MonoClass *exc_classes [16];
4473 guint8 *exc_throw_start [16], *exc_throw_end [16];
4475 /* Compute needed space */
4476 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4477 if (patch_info->type == MONO_PATCH_INFO_EXC)
4482 * make sure we have enough space for exceptions
4485 code_size = exc_count * (20 * 4);
4487 code_size = exc_count * 24;
4490 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4491 cfg->code_size *= 2;
4492 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4493 mono_jit_stats.code_reallocs++;
4496 code = (guint32*)(cfg->native_code + cfg->code_len);
4498 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4499 switch (patch_info->type) {
4500 case MONO_PATCH_INFO_EXC: {
4501 MonoClass *exc_class;
4502 guint32 *buf, *buf2;
4503 guint32 throw_ip, type_idx;
4506 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4508 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4509 g_assert (exc_class);
4510 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4511 throw_ip = patch_info->ip.i;
4513 /* Find a throw sequence for the same exception class */
4514 for (i = 0; i < nthrows; ++i)
4515 if (exc_classes [i] == exc_class)
4519 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4520 if (!sparc_is_imm13 (throw_offset))
4521 sparc_set32 (code, throw_offset, sparc_o1);
4523 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4524 g_assert (sparc_is_imm22 (disp));
4525 sparc_branch (code, 0, sparc_ba, disp);
4526 if (sparc_is_imm13 (throw_offset))
4527 sparc_set32 (code, throw_offset, sparc_o1);
4530 patch_info->type = MONO_PATCH_INFO_NONE;
4533 /* Emit the template for setting o1 */
4535 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4536 /* Can use a short form */
4539 sparc_set_template (code, sparc_o1);
4543 exc_classes [nthrows] = exc_class;
4544 exc_throw_start [nthrows] = (guint8*)code;
4548 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4552 /* first arg = type token */
4553 /* Pass the type index to reduce the size of the sparc_set */
4554 if (!sparc_is_imm13 (type_idx))
4555 sparc_set32 (code, type_idx, sparc_o0);
4557 /* second arg = offset between the throw ip and the current ip */
4558 /* On sparc, the saved ip points to the call instruction */
4559 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4560 sparc_set32 (buf, disp, sparc_o1);
4565 exc_throw_end [nthrows] = (guint8*)code;
4569 patch_info->data.name = "mono_arch_throw_corlib_exception";
4570 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4571 patch_info->ip.i = (guint8*)code - cfg->native_code;
4575 if (sparc_is_imm13 (type_idx)) {
4576 /* Put it into the delay slot */
4579 sparc_set32 (code, type_idx, sparc_o0);
4580 g_assert (code - buf == 1);
4591 cfg->code_len = (guint8*)code - cfg->native_code;
4593 g_assert (cfg->code_len < cfg->code_size);
4597 gboolean lmf_addr_key_inited = FALSE;
4599 #ifdef MONO_SPARC_THR_TLS
4600 thread_key_t lmf_addr_key;
4602 pthread_key_t lmf_addr_key;
4606 mono_arch_get_lmf_addr (void)
4608 /* This is perf critical so we bypass the IO layer */
4609 /* The thr_... functions seem to be somewhat faster */
4610 #ifdef MONO_SPARC_THR_TLS
4612 thr_getspecific (lmf_addr_key, &res);
4615 return pthread_getspecific (lmf_addr_key);
4619 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4622 * There seems to be no way to determine stack boundaries under solaris,
4623 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4626 #error "--with-sigaltstack=yes not supported on solaris"
4631 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4633 if (!lmf_addr_key_inited) {
4636 lmf_addr_key_inited = TRUE;
4638 #ifdef MONO_SPARC_THR_TLS
4639 res = thr_keycreate (&lmf_addr_key, NULL);
4641 res = pthread_key_create (&lmf_addr_key, NULL);
4643 g_assert (res == 0);
4647 #ifdef MONO_SPARC_THR_TLS
4648 thr_setspecific (lmf_addr_key, &tls->lmf);
4650 pthread_setspecific (lmf_addr_key, &tls->lmf);
4655 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4660 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *call, int this_reg, int this_type, int vt_reg)
4662 int this_out_reg = sparc_o0;
4667 MONO_INST_NEW (cfg, ins, OP_MOVE);
4668 ins->sreg1 = vt_reg;
4669 ins->dreg = mono_regstate_next_int (cfg->rs);
4670 mono_bblock_add_inst (cfg->cbb, ins);
4672 mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, sparc_o0, FALSE);
4674 this_out_reg = sparc_o1;
4676 /* Set the 'struct/union return pointer' location on the stack */
4677 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4681 /* add the this argument */
4682 if (this_reg != -1) {
4684 MONO_INST_NEW (cfg, this, OP_MOVE);
4685 this->type = this_type;
4686 this->sreg1 = this_reg;
4687 this->dreg = mono_regstate_next_int (cfg->rs);
4688 mono_bblock_add_inst (cfg->cbb, this);
4690 mono_call_inst_add_outarg_reg (cfg, call, this->dreg, this_out_reg, FALSE);
4696 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4698 MonoInst *ins = NULL;
4704 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4706 MonoInst *ins = NULL;
4712 * mono_arch_get_argument_info:
4713 * @csig: a method signature
4714 * @param_count: the number of parameters to consider
4715 * @arg_info: an array to store the result infos
4717 * Gathers information on parameters such as size, alignment and
4718 * padding. arg_info should be large enought to hold param_count + 1 entries.
4720 * Returns the size of the activation frame.
4723 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4729 cinfo = get_call_info (NULL, csig, FALSE);
4731 if (csig->hasthis) {
4732 ainfo = &cinfo->args [0];
4733 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4736 for (k = 0; k < param_count; k++) {
4737 ainfo = &cinfo->args [k + csig->hasthis];
4739 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4740 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4749 mono_arch_print_tree (MonoInst *tree, int arity)
4754 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4759 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
4765 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4767 /* FIXME: implement */
4768 g_assert_not_reached ();