3 * Sparc backend for the Mono code generator
6 * Paolo Molaro (lupus@ximian.com)
7 * Dietmar Maurer (dietmar@ximian.com)
10 * Christopher Taylor (ct@gentoo.org)
11 * Mark Crichton (crichton@gimp.org)
12 * Zoltan Varga (vargaz@freemail.hu)
14 * (C) 2003 Ximian, Inc.
28 #include <mono/metadata/appdomain.h>
29 #include <mono/metadata/debug-helpers.h>
30 #include <mono/metadata/tokentype.h>
31 #include <mono/utils/mono-math.h>
32 #include <mono/utils/mono-hwcap.h>
33 #include <mono/utils/unlocked.h>
35 #include "mini-sparc.h"
37 #include "cpu-sparc.h"
38 #include "jit-icalls.h"
42 * Sparc V9 means two things:
43 * - the instruction set
46 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
47 * processors in use are 64 bit processors. The V9 ABI is only usable if the
48 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
49 * instructions without using the 64 bit ABI.
54 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
55 * code. Unused input registers are used for global register allocation.
56 * - %o0..%o5 and %l7 is used for local register allocation and passing arguments
57 * - %l0..%l6 is used for global register allocation
58 * - %o7 and %g1 is used as scratch registers in opcodes
59 * - all floating point registers are used for local register allocation except %f0.
60 * Only double precision registers are used.
62 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
63 * used for local allocation.
68 * - doubles and longs must be stored in dword aligned locations
72 * The following things are not implemented or do not work:
73 * - some fp arithmetic corner cases
74 * The following tests in mono/mini are expected to fail:
75 * - test_0_simple_double_casts
76 * This test casts (guint64)-1 to double and then back to guint64 again.
77 * Under x86, it returns 0, while under sparc it returns -1.
79 * In addition to this, the runtime requires the trunc function, or its
80 * solaris counterpart, aintl, to do some double->int conversions. If this
81 * function is not available, it is emulated somewhat, but the results can be
87 * - optimize sparc_set according to the memory model
88 * - when non-AOT compiling, compute patch targets immediately so we don't
89 * have to emit the 6 byte template.
91 * - struct arguments/returns
96 * - sparc_call_simple can't be used in a lot of places since the displacement
97 * might not fit into an imm30.
98 * - g1 can't be used in a lot of places since it is used as a scratch reg in
100 * - sparc_f0 can't be used as a scratch register on V9
101 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
103 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
104 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
105 * be a double precision register which has no single precision part.
106 * - passing/returning structs is hard to implement, because:
107 * - the spec is very hard to understand
108 * - it requires knowledge about the fields of structure, needs to handle
109 * nested structures etc.
113 * Possible optimizations:
114 * - delay slot scheduling
115 * - allocate large constants to registers
116 * - add more mul/div/rem optimizations
120 #define MONO_SPARC_THR_TLS 1
124 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
125 * causing infinite loops in dominator computation. So glib-2.4 is required.
128 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
129 #error "glib 2.4 or later is required for 64 bit mode."
133 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
135 #define SIGNAL_STACK_SIZE (64 * 1024)
137 #define STACK_BIAS MONO_SPARC_STACK_BIAS
141 /* %g1 is used by sparc_set */
142 #define GP_SCRATCH_REG sparc_g4
143 /* %f0 is used for parameter passing */
144 #define FP_SCRATCH_REG sparc_f30
145 #define ARGS_OFFSET (STACK_BIAS + 128)
149 #define FP_SCRATCH_REG sparc_f0
150 #define ARGS_OFFSET 68
151 #define GP_SCRATCH_REG sparc_g1
155 /* Whenever this is a 64bit executable */
157 static gboolean v64 = TRUE;
159 static gboolean v64 = FALSE;
162 static gpointer mono_arch_get_lmf_addr (void);
165 mono_arch_regname (int reg) {
166 static const char * rnames[] = {
167 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
168 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
169 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
170 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
171 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
172 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
173 "sparc_fp", "sparc_retadr"
175 if (reg >= 0 && reg < 32)
181 mono_arch_fregname (int reg) {
182 static const char *rnames [] = {
183 "sparc_f0", "sparc_f1", "sparc_f2", "sparc_f3", "sparc_f4",
184 "sparc_f5", "sparc_f6", "sparc_f7", "sparc_f8", "sparc_f9",
185 "sparc_f10", "sparc_f11", "sparc_f12", "sparc_f13", "sparc_f14",
186 "sparc_f15", "sparc_f16", "sparc_f17", "sparc_f18", "sparc_f19",
187 "sparc_f20", "sparc_f21", "sparc_f22", "sparc_f23", "sparc_f24",
188 "sparc_f25", "sparc_f26", "sparc_f27", "sparc_f28", "sparc_f29",
189 "sparc_f30", "sparc_f31"
192 if (reg >= 0 && reg < 32)
199 * Initialize the cpu to execute managed code.
202 mono_arch_cpu_init (void)
207 * Initialize architecture specific code.
210 mono_arch_init (void)
215 * Cleanup architecture specific code.
218 mono_arch_cleanup (void)
223 mono_arch_have_fast_tls (void)
229 * This function returns the optimizations supported on this cpu.
232 mono_arch_cpu_optimizations (guint32 *exclude_mask)
239 * On some processors, the cmov instructions are even slower than the
242 if (mono_hwcap_sparc_is_v9)
243 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
245 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
251 * This function test for all SIMD functions supported.
253 * Returns a bitmask corresponding to all supported versions.
257 mono_arch_cpu_enumerate_simd_versions (void)
259 /* SIMD is currently unimplemented */
264 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
265 #else /* assume Sun's compiler */
266 static void flushi(void *addr)
273 void sync_instruction_memory(caddr_t addr, int len);
277 mono_arch_flush_icache (guint8 *code, gint size)
280 /* Hopefully this is optimized based on the actual CPU */
281 sync_instruction_memory (code, size);
283 gulong start = (gulong) code;
284 gulong end = start + size;
287 /* Sparcv9 chips only need flushes on 32 byte
288 * cacheline boundaries.
290 * Sparcv8 needs a flush every 8 bytes.
292 align = (mono_hwcap_sparc_is_v9 ? 32 : 8);
294 start &= ~(align - 1);
295 end = (end + (align - 1)) & ~(align - 1);
297 while (start < end) {
299 __asm__ __volatile__ ("iflush %0"::"r"(start));
311 * Flush all register windows to memory. Every register window is saved to
312 * a 16 word area on the stack pointed to by its %sp register.
315 mono_sparc_flushw (void)
317 static guint32 start [64];
318 static int inited = 0;
320 static void (*flushw) (void);
325 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
328 sparc_restore_simple (code);
330 g_assert ((code - start) < 64);
332 mono_arch_flush_icache ((guint8*)start, (guint8*)code - (guint8*)start);
334 flushw = (gpointer)start;
343 mono_arch_flush_register_windows (void)
345 mono_sparc_flushw ();
349 mono_arch_is_inst_imm (gint64 imm)
351 return sparc_is_imm13 (imm);
355 mono_sparc_is_v9 (void) {
356 return mono_hwcap_sparc_is_v9;
360 mono_sparc_is_sparc64 (void) {
372 ArgInFloatReg, /* V9 only */
373 ArgInDoubleReg /* V9 only */
378 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
381 guint32 vt_offset; /* for valuetypes */
399 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
401 ainfo->offset = *stack_size;
404 if (*gr >= PARAM_REGS) {
405 ainfo->storage = ArgOnStack;
408 ainfo->storage = ArgInIReg;
413 /* Allways reserve stack space for parameters passed in registers */
414 (*stack_size) += sizeof (gpointer);
417 if (*gr < PARAM_REGS - 1) {
418 /* A pair of registers */
419 ainfo->storage = ArgInIRegPair;
423 else if (*gr >= PARAM_REGS) {
424 /* A pair of stack locations */
425 ainfo->storage = ArgOnStackPair;
428 ainfo->storage = ArgInSplitRegStack;
433 (*stack_size) += 2 * sizeof (gpointer);
439 #define FLOAT_PARAM_REGS 32
442 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
444 ainfo->offset = *stack_size;
447 if (*gr >= FLOAT_PARAM_REGS) {
448 ainfo->storage = ArgOnStack;
451 /* A single is passed in an even numbered fp register */
452 ainfo->storage = ArgInFloatReg;
453 ainfo->reg = *gr + 1;
458 if (*gr < FLOAT_PARAM_REGS) {
459 /* A double register */
460 ainfo->storage = ArgInDoubleReg;
465 ainfo->storage = ArgOnStack;
469 (*stack_size) += sizeof (gpointer);
477 * Obtain information about a call according to the calling convention.
478 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
479 * document for more information.
480 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
481 * the 'Sparc Compliance Definition 2.4' document.
484 get_call_info (MonoCompile *cfg, MonoMethodSignature *sig, gboolean is_pinvoke)
487 int n = sig->hasthis + sig->param_count;
488 guint32 stack_size = 0;
492 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
498 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
499 /* The address of the return value is passed in %o0 */
500 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
501 cinfo->ret.reg += sparc_i0;
502 /* FIXME: Pass this after this as on other platforms */
509 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
511 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
514 /* Emit the signature cookie just before the implicit arguments */
515 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
518 for (i = 0; i < sig->param_count; ++i) {
519 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
522 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
525 /* Emit the signature cookie just before the implicit arguments */
526 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
529 DEBUG(printf("param %d: ", i));
530 if (sig->params [i]->byref) {
531 DEBUG(printf("byref\n"));
533 add_general (&gr, &stack_size, ainfo, FALSE);
536 ptype = mini_get_underlying_type (sig->params [i]);
537 switch (ptype->type) {
538 case MONO_TYPE_BOOLEAN:
541 add_general (&gr, &stack_size, ainfo, FALSE);
542 /* the value is in the ls byte */
543 ainfo->offset += sizeof (gpointer) - 1;
548 add_general (&gr, &stack_size, ainfo, FALSE);
549 /* the value is in the ls word */
550 ainfo->offset += sizeof (gpointer) - 2;
554 add_general (&gr, &stack_size, ainfo, FALSE);
555 /* the value is in the ls dword */
556 ainfo->offset += sizeof (gpointer) - 4;
561 case MONO_TYPE_FNPTR:
562 case MONO_TYPE_CLASS:
563 case MONO_TYPE_OBJECT:
564 case MONO_TYPE_STRING:
565 case MONO_TYPE_SZARRAY:
566 case MONO_TYPE_ARRAY:
567 add_general (&gr, &stack_size, ainfo, FALSE);
569 case MONO_TYPE_GENERICINST:
570 if (!mono_type_generic_inst_is_valuetype (ptype)) {
571 add_general (&gr, &stack_size, ainfo, FALSE);
575 case MONO_TYPE_VALUETYPE:
580 add_general (&gr, &stack_size, ainfo, FALSE);
582 case MONO_TYPE_TYPEDBYREF:
583 add_general (&gr, &stack_size, ainfo, FALSE);
588 add_general (&gr, &stack_size, ainfo, FALSE);
590 add_general (&gr, &stack_size, ainfo, TRUE);
595 add_float (&fr, &stack_size, ainfo, TRUE);
598 /* single precision values are passed in integer registers */
599 add_general (&gr, &stack_size, ainfo, FALSE);
604 add_float (&fr, &stack_size, ainfo, FALSE);
607 /* double precision values are passed in a pair of registers */
608 add_general (&gr, &stack_size, ainfo, TRUE);
612 g_assert_not_reached ();
616 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
619 /* Emit the signature cookie just before the implicit arguments */
620 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
624 ret_type = mini_get_underlying_type (sig->ret);
625 switch (ret_type->type) {
626 case MONO_TYPE_BOOLEAN:
637 case MONO_TYPE_FNPTR:
638 case MONO_TYPE_CLASS:
639 case MONO_TYPE_OBJECT:
640 case MONO_TYPE_SZARRAY:
641 case MONO_TYPE_ARRAY:
642 case MONO_TYPE_STRING:
643 cinfo->ret.storage = ArgInIReg;
644 cinfo->ret.reg = sparc_i0;
651 cinfo->ret.storage = ArgInIReg;
652 cinfo->ret.reg = sparc_i0;
656 cinfo->ret.storage = ArgInIRegPair;
657 cinfo->ret.reg = sparc_i0;
664 cinfo->ret.storage = ArgInFReg;
665 cinfo->ret.reg = sparc_f0;
667 case MONO_TYPE_GENERICINST:
668 if (!mono_type_generic_inst_is_valuetype (ret_type)) {
669 cinfo->ret.storage = ArgInIReg;
670 cinfo->ret.reg = sparc_i0;
676 case MONO_TYPE_VALUETYPE:
685 cinfo->ret.storage = ArgOnStack;
687 case MONO_TYPE_TYPEDBYREF:
690 /* Same as a valuetype with size 24 */
697 cinfo->ret.storage = ArgOnStack;
702 g_error ("Can't handle as return value 0x%x", sig->ret->type);
705 cinfo->stack_usage = stack_size;
706 cinfo->reg_usage = gr;
711 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
717 * FIXME: If an argument is allocated to a register, then load it from the
718 * stack in the prolog.
721 for (i = 0; i < cfg->num_varinfo; i++) {
722 MonoInst *ins = cfg->varinfo [i];
723 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
726 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
729 /* FIXME: Make arguments on stack allocateable to registers */
730 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
733 if (mono_is_regsize_var (ins->inst_vtype)) {
734 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
735 g_assert (i == vmv->idx);
737 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
745 mono_arch_get_global_int_regs (MonoCompile *cfg)
749 MonoMethodSignature *sig;
752 sig = mono_method_signature (cfg->method);
754 cinfo = get_call_info (cfg, sig, FALSE);
756 /* Use unused input registers */
757 for (i = cinfo->reg_usage; i < 6; ++i)
758 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
760 /* Use %l0..%l6 as global registers */
761 for (i = sparc_l0; i < sparc_l7; ++i)
762 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
770 * mono_arch_regalloc_cost:
772 * Return the cost, in number of memory references, of the action of
773 * allocating the variable VMV into a register during global register
777 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
783 * Set var information according to the calling convention. sparc version.
784 * The locals var stuff should most likely be split in another method.
788 mono_arch_allocate_vars (MonoCompile *cfg)
790 MonoMethodSignature *sig;
791 MonoMethodHeader *header;
793 int i, offset, size, align, curinst;
796 header = cfg->header;
798 sig = mono_method_signature (cfg->method);
800 cinfo = get_call_info (cfg, sig, FALSE);
802 if (sig->ret->type != MONO_TYPE_VOID) {
803 switch (cinfo->ret.storage) {
806 cfg->ret->opcode = OP_REGVAR;
807 cfg->ret->inst_c0 = cinfo->ret.reg;
809 case ArgInIRegPair: {
810 MonoType *t = mini_get_underlying_type (sig->ret);
811 if (((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
812 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
813 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
815 low->opcode = OP_REGVAR;
816 low->dreg = cinfo->ret.reg + 1;
817 high->opcode = OP_REGVAR;
818 high->dreg = cinfo->ret.reg;
820 cfg->ret->opcode = OP_REGVAR;
821 cfg->ret->inst_c0 = cinfo->ret.reg;
826 g_assert_not_reached ();
829 cfg->vret_addr->opcode = OP_REGOFFSET;
830 cfg->vret_addr->inst_basereg = sparc_fp;
831 cfg->vret_addr->inst_offset = 64;
837 cfg->ret->dreg = cfg->ret->inst_c0;
841 * We use the ABI calling conventions for managed code as well.
842 * Exception: valuetypes are never returned in registers on V9.
843 * FIXME: Use something more optimized.
846 /* Locals are allocated backwards from %fp */
847 cfg->frame_reg = sparc_fp;
851 * Reserve a stack slot for holding information used during exception
854 if (header->num_clauses)
855 offset += sizeof (gpointer) * 2;
857 if (cfg->method->save_lmf) {
858 offset += sizeof (MonoLMF);
859 cfg->arch.lmf_offset = offset;
862 curinst = cfg->locals_start;
863 for (i = curinst; i < cfg->num_varinfo; ++i) {
864 inst = cfg->varinfo [i];
866 if ((inst->opcode == OP_REGVAR) || (inst->opcode == OP_REGOFFSET)) {
867 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
871 if (inst->flags & MONO_INST_IS_DEAD)
874 /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
875 * pinvoke wrappers when they call functions returning structure */
876 if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
877 size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
879 size = mini_type_stack_size (inst->inst_vtype, &align);
882 * This is needed since structures containing doubles must be doubleword
884 * FIXME: Do this only if needed.
886 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
890 * variables are accessed as negative offsets from %fp, so increase
891 * the offset before assigning it to a variable
896 offset &= ~(align - 1);
897 inst->opcode = OP_REGOFFSET;
898 inst->inst_basereg = sparc_fp;
899 inst->inst_offset = STACK_BIAS + -offset;
901 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
904 if (sig->call_convention == MONO_CALL_VARARG) {
905 cfg->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
908 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
909 inst = cfg->args [i];
910 if (inst->opcode != OP_REGVAR) {
911 ArgInfo *ainfo = &cinfo->args [i];
912 gboolean inreg = TRUE;
916 if (sig->hasthis && (i == 0))
917 arg_type = &mono_defaults.object_class->byval_arg;
919 arg_type = sig->params [i - sig->hasthis];
922 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
923 || (arg_type->type == MONO_TYPE_R8)))
925 * Since float arguments are passed in integer registers, we need to
926 * save them to the stack in the prolog.
931 /* FIXME: Allocate volatile arguments to registers */
932 /* FIXME: This makes the argument holding a vtype address into volatile */
933 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
936 if (MONO_TYPE_ISSTRUCT (arg_type))
937 /* FIXME: this isn't needed */
940 inst->opcode = OP_REGOFFSET;
943 storage = ArgOnStack;
945 storage = ainfo->storage;
949 inst->opcode = OP_REGVAR;
950 inst->dreg = sparc_i0 + ainfo->reg;
953 if (inst->type == STACK_I8) {
954 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (inst->dreg));
955 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (inst->dreg));
957 low->opcode = OP_REGVAR;
958 low->dreg = sparc_i0 + ainfo->reg + 1;
959 high->opcode = OP_REGVAR;
960 high->dreg = sparc_i0 + ainfo->reg;
962 inst->opcode = OP_REGVAR;
963 inst->dreg = sparc_i0 + ainfo->reg;
968 * Since float regs are volatile, we save the arguments to
969 * the stack in the prolog.
970 * FIXME: Avoid this if the method contains no calls.
974 case ArgInSplitRegStack:
975 /* Split arguments are saved to the stack in the prolog */
976 inst->opcode = OP_REGOFFSET;
977 /* in parent frame */
978 inst->inst_basereg = sparc_fp;
979 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
981 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
983 * It is very hard to load doubles from non-doubleword aligned
984 * memory locations. So if the offset is misaligned, we copy the
985 * argument to a stack location in the prolog.
987 if ((inst->inst_offset - STACK_BIAS) % 8) {
988 inst->inst_basereg = sparc_fp;
992 offset &= ~(align - 1);
993 inst->inst_offset = STACK_BIAS + -offset;
1002 if (MONO_TYPE_ISSTRUCT (arg_type)) {
1003 /* Add a level of indirection */
1005 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
1006 * are destructively modified in a lot of places in inssel.brg.
1009 MONO_INST_NEW (cfg, indir, 0);
1011 inst->opcode = OP_VTARG_ADDR;
1012 inst->inst_left = indir;
1018 * spillvars are stored between the normal locals and the storage reserved
1022 cfg->stack_offset = offset;
1028 mono_arch_create_vars (MonoCompile *cfg)
1030 MonoMethodSignature *sig;
1032 sig = mono_method_signature (cfg->method);
1034 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
1035 cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
1036 if (G_UNLIKELY (cfg->verbose_level > 1)) {
1037 printf ("vret_addr = ");
1038 mono_print_ins (cfg->vret_addr);
1042 if (!sig->ret->byref && (sig->ret->type == MONO_TYPE_I8 || sig->ret->type == MONO_TYPE_U8)) {
1043 MonoInst *low = get_vreg_to_inst (cfg, MONO_LVREG_LS (cfg->ret->dreg));
1044 MonoInst *high = get_vreg_to_inst (cfg, MONO_LVREG_MS (cfg->ret->dreg));
1046 low->flags |= MONO_INST_VOLATILE;
1047 high->flags |= MONO_INST_VOLATILE;
1050 /* Add a properly aligned dword for use by int<->float conversion opcodes */
1051 cfg->arch.float_spill_slot = mono_compile_create_var (cfg, &mono_defaults.double_class->byval_arg, OP_ARG);
1052 ((MonoInst*)cfg->arch.float_spill_slot)->flags |= MONO_INST_VOLATILE;
1056 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
1060 MONO_INST_NEW (cfg, arg, 0);
1066 arg->opcode = OP_MOVE;
1067 arg->dreg = mono_alloc_ireg (cfg);
1069 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
1072 arg->opcode = OP_FMOVE;
1073 arg->dreg = mono_alloc_freg (cfg);
1075 mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
1078 g_assert_not_reached ();
1081 MONO_ADD_INS (cfg->cbb, arg);
1085 add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
1087 int dreg = mono_alloc_ireg (cfg);
1089 MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, sparc_sp, offset);
1091 mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
1095 emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1097 int offset = ARGS_OFFSET + ainfo->offset;
1099 switch (ainfo->storage) {
1101 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, MONO_LVREG_LS (in->dreg));
1102 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1104 case ArgOnStackPair:
1105 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, MONO_LVREG_MS (in->dreg));
1106 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1108 case ArgInSplitRegStack:
1109 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, MONO_LVREG_MS (in->dreg));
1110 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, MONO_LVREG_LS (in->dreg));
1113 g_assert_not_reached ();
1118 emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1120 int offset = ARGS_OFFSET + ainfo->offset;
1122 switch (ainfo->storage) {
1124 /* floating-point <-> integer transfer must go through memory */
1125 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1127 /* Load into a register pair */
1128 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1129 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
1131 case ArgOnStackPair:
1132 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1134 case ArgInSplitRegStack:
1135 /* floating-point <-> integer transfer must go through memory */
1136 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
1137 /* Load most significant word into register */
1138 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1141 g_assert_not_reached ();
1146 emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
1148 int offset = ARGS_OFFSET + ainfo->offset;
1150 switch (ainfo->storage) {
1152 /* floating-point <-> integer transfer must go through memory */
1153 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1154 add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
1157 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
1160 g_assert_not_reached ();
1165 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
1168 emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
1171 guint32 align, offset, pad, size;
1173 if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
1174 size = sizeof (MonoTypedRef);
1175 align = sizeof (gpointer);
1178 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1181 * Other backends use mono_type_stack_size (), but that
1182 * aligns the size to 8, which is larger than the size of
1183 * the source, leading to reads of invalid memory if the
1184 * source is at the end of address space.
1186 size = mono_class_value_size (in->klass, &align);
1189 /* The first 6 argument locations are reserved */
1190 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1191 cinfo->stack_usage = 6 * sizeof (gpointer);
1193 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1194 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1196 cinfo->stack_usage += size;
1197 cinfo->stack_usage += pad;
1200 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1201 * use the normal OUTARG opcodes to pass the address of the location to
1205 MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
1206 arg->sreg1 = in->dreg;
1207 arg->klass = in->klass;
1208 arg->backend.size = size;
1209 arg->inst_p0 = call;
1210 arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
1211 memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
1212 ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
1213 MONO_ADD_INS (cfg->cbb, arg);
1215 MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
1216 arg->dreg = mono_alloc_preg (cfg);
1217 arg->sreg1 = sparc_sp;
1218 arg->inst_imm = STACK_BIAS + offset;
1219 MONO_ADD_INS (cfg->cbb, arg);
1221 emit_pass_other (cfg, call, ainfo, NULL, arg);
1226 emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
1228 int offset = ARGS_OFFSET + ainfo->offset;
1231 switch (ainfo->storage) {
1233 add_outarg_reg (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
1240 opcode = OP_STOREI1_MEMBASE_REG;
1241 else if (offset & 0x2)
1242 opcode = OP_STOREI2_MEMBASE_REG;
1244 opcode = OP_STOREI4_MEMBASE_REG;
1245 MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
1249 g_assert_not_reached ();
1254 emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
1256 MonoMethodSignature *tmp_sig;
1259 * mono_ArgIterator_Setup assumes the signature cookie is
1260 * passed first and all the arguments which were before it are
1261 * passed on the stack after the signature. So compensate by
1262 * passing a different signature.
1264 tmp_sig = mono_metadata_signature_dup (call->signature);
1265 tmp_sig->param_count -= call->signature->sentinelpos;
1266 tmp_sig->sentinelpos = 0;
1267 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1269 /* FIXME: Add support for signature tokens to AOT */
1270 cfg->disable_aot = TRUE;
1271 /* We allways pass the signature on the stack for simplicity */
1272 MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
1276 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
1279 MonoMethodSignature *sig;
1283 guint32 extra_space = 0;
1285 sig = call->signature;
1286 n = sig->param_count + sig->hasthis;
1288 cinfo = get_call_info (cfg, sig, sig->pinvoke);
1290 if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
1291 /* Set the 'struct/union return pointer' location on the stack */
1292 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
1295 for (i = 0; i < n; ++i) {
1298 ainfo = cinfo->args + i;
1300 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1301 /* Emit the signature cookie just before the first implicit argument */
1302 emit_sig_cookie (cfg, call, cinfo);
1305 in = call->args [i];
1307 if (sig->hasthis && (i == 0))
1308 arg_type = &mono_defaults.object_class->byval_arg;
1310 arg_type = sig->params [i - sig->hasthis];
1312 arg_type = mini_get_underlying_type (arg_type);
1313 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
1314 emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
1315 else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
1316 emit_pass_long (cfg, call, ainfo, in);
1317 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
1318 emit_pass_double (cfg, call, ainfo, in);
1319 else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
1320 emit_pass_float (cfg, call, ainfo, in);
1322 emit_pass_other (cfg, call, ainfo, arg_type, in);
1325 /* Handle the case where there are no implicit arguments */
1326 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
1327 emit_sig_cookie (cfg, call, cinfo);
1330 call->stack_usage = cinfo->stack_usage + extra_space;
1336 mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
1338 ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
1339 int size = ins->backend.size;
1341 mini_emit_memcpy (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, SIZEOF_VOID_P);
1345 mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
1347 CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
1348 MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
1350 switch (cinfo->ret.storage) {
1352 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
1355 if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
1356 MONO_EMIT_NEW_UNALU (cfg, OP_LMOVE, cfg->ret->dreg, val->dreg);
1358 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_MS (cfg->ret->dreg), MONO_LVREG_MS (val->dreg));
1359 MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, MONO_LVREG_LS (cfg->ret->dreg), MONO_LVREG_LS (val->dreg));
1363 if (ret->type == MONO_TYPE_R4)
1364 MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
1366 MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
1369 g_assert_not_reached ();
1375 int cond_to_sparc_cond [][3] = {
1376 {sparc_be, sparc_be, sparc_fbe},
1377 {sparc_bne, sparc_bne, 0},
1378 {sparc_ble, sparc_ble, sparc_fble},
1379 {sparc_bge, sparc_bge, sparc_fbge},
1380 {sparc_bl, sparc_bl, sparc_fbl},
1381 {sparc_bg, sparc_bg, sparc_fbg},
1382 {sparc_bleu, sparc_bleu, 0},
1383 {sparc_beu, sparc_beu, 0},
1384 {sparc_blu, sparc_blu, sparc_fbl},
1385 {sparc_bgu, sparc_bgu, sparc_fbg}
1388 /* Map opcode to the sparc condition codes */
1389 static inline SparcCond
1390 opcode_to_sparc_cond (int opcode)
1396 case OP_COND_EXC_OV:
1397 case OP_COND_EXC_IOV:
1400 case OP_COND_EXC_IC:
1402 case OP_COND_EXC_NO:
1403 case OP_COND_EXC_NC:
1406 rel = mono_opcode_to_cond (opcode);
1407 t = mono_opcode_to_type (opcode, -1);
1409 return cond_to_sparc_cond [rel][t];
1416 #define COMPUTE_DISP(ins) \
1417 if (ins->inst_true_bb->native_offset) \
1418 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1421 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1425 #define DEFAULT_ICC sparc_xcc_short
1427 #define DEFAULT_ICC sparc_icc_short
1431 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1435 COMPUTE_DISP(ins); \
1436 predict = (disp != 0) ? 1 : 0; \
1437 g_assert (sparc_is_imm19 (disp)); \
1438 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1439 if (filldelay) sparc_nop (code); \
1441 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1442 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1446 COMPUTE_DISP(ins); \
1447 predict = (disp != 0) ? 1 : 0; \
1448 g_assert (sparc_is_imm19 (disp)); \
1449 sparc_fbranch (code, (annul), cond, disp); \
1450 if (filldelay) sparc_nop (code); \
1453 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1454 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1457 COMPUTE_DISP(ins); \
1458 g_assert (sparc_is_imm22 (disp)); \
1459 sparc_ ## bop (code, (annul), cond, disp); \
1460 if (filldelay) sparc_nop (code); \
1462 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1463 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1466 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1470 COMPUTE_DISP(ins); \
1471 predict = (disp != 0) ? 1 : 0; \
1472 g_assert (sparc_is_imm19 (disp)); \
1473 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1474 if (filldelay) sparc_nop (code); \
1477 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1480 COMPUTE_DISP(ins); \
1481 g_assert (sparc_is_imm22 (disp)); \
1482 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1483 if (filldelay) sparc_nop (code); \
1486 /* emit an exception if condition is fail */
1488 * We put the exception throwing code out-of-line, at the end of the method
1490 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1491 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1492 MONO_PATCH_INFO_EXC, sexc_name); \
1493 if (mono_hwcap_sparc_is_v9 && ((icc) != sparc_icc_short)) { \
1494 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1497 sparc_branch (code, 0, cond, 0); \
1499 if (filldelay) sparc_nop (code); \
1502 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1504 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1505 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1506 MONO_PATCH_INFO_EXC, sexc_name); \
1507 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1511 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1512 if (sparc_is_imm13 ((ins)->inst_imm)) \
1513 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1515 sparc_set (code, ins->inst_imm, sparc_o7); \
1516 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1520 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1521 if (sparc_is_imm13 (ins->inst_offset)) \
1522 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1524 sparc_set (code, ins->inst_offset, sparc_o7); \
1525 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1530 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1532 if (ins->inst_imm == 0) \
1535 sparc_set (code, ins->inst_imm, sparc_o7); \
1538 if (!sparc_is_imm13 (ins->inst_offset)) { \
1539 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1540 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1543 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1546 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1547 if (!sparc_is_imm13 (ins->inst_offset)) { \
1548 sparc_set (code, ins->inst_offset, sparc_o7); \
1549 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1552 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1555 #define EMIT_CALL() do { \
1557 sparc_set_template (code, sparc_o7); \
1558 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1561 sparc_call_simple (code, 0); \
1567 * A call template is 7 instructions long, so we want to avoid it if possible.
1570 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1575 /* FIXME: This only works if the target method is already compiled */
1576 if (0 && v64 && !cfg->compile_aot) {
1577 MonoJumpInfo patch_info;
1579 patch_info.type = patch_type;
1580 patch_info.data.target = data;
1582 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE, &error);
1583 mono_error_raise_exception (&error); /* FIXME: don't raise here */
1585 /* FIXME: Add optimizations if the target is close enough */
1586 sparc_set (code, target, sparc_o7);
1587 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1591 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1599 mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
1604 mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
1606 MonoInst *ins, *n, *last_ins = NULL;
1609 MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
1610 switch (ins->opcode) {
1612 /* remove unnecessary multiplication with 1 */
1613 if (ins->inst_imm == 1) {
1614 if (ins->dreg != ins->sreg1) {
1615 ins->opcode = OP_MOVE;
1617 MONO_DELETE_INS (bb, ins);
1623 case OP_LOAD_MEMBASE:
1624 case OP_LOADI4_MEMBASE:
1626 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1627 * OP_LOAD_MEMBASE offset(basereg), reg
1629 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1630 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1631 ins->inst_basereg == last_ins->inst_destbasereg &&
1632 ins->inst_offset == last_ins->inst_offset) {
1633 if (ins->dreg == last_ins->sreg1) {
1634 MONO_DELETE_INS (bb, ins);
1637 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1638 ins->opcode = OP_MOVE;
1639 ins->sreg1 = last_ins->sreg1;
1643 * Note: reg1 must be different from the basereg in the second load
1644 * OP_LOAD_MEMBASE offset(basereg), reg1
1645 * OP_LOAD_MEMBASE offset(basereg), reg2
1647 * OP_LOAD_MEMBASE offset(basereg), reg1
1648 * OP_MOVE reg1, reg2
1650 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1651 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1652 ins->inst_basereg != last_ins->dreg &&
1653 ins->inst_basereg == last_ins->inst_basereg &&
1654 ins->inst_offset == last_ins->inst_offset) {
1656 if (ins->dreg == last_ins->dreg) {
1657 MONO_DELETE_INS (bb, ins);
1660 ins->opcode = OP_MOVE;
1661 ins->sreg1 = last_ins->dreg;
1664 //g_assert_not_reached ();
1668 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1669 * OP_LOAD_MEMBASE offset(basereg), reg
1671 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1672 * OP_ICONST reg, imm
1674 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1675 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1676 ins->inst_basereg == last_ins->inst_destbasereg &&
1677 ins->inst_offset == last_ins->inst_offset) {
1678 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1679 ins->opcode = OP_ICONST;
1680 ins->inst_c0 = last_ins->inst_imm;
1681 g_assert_not_reached (); // check this rule
1686 case OP_LOADI1_MEMBASE:
1687 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1688 ins->inst_basereg == last_ins->inst_destbasereg &&
1689 ins->inst_offset == last_ins->inst_offset) {
1690 if (ins->dreg == last_ins->sreg1) {
1691 MONO_DELETE_INS (bb, ins);
1694 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1695 ins->opcode = OP_MOVE;
1696 ins->sreg1 = last_ins->sreg1;
1700 case OP_LOADI2_MEMBASE:
1701 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1702 ins->inst_basereg == last_ins->inst_destbasereg &&
1703 ins->inst_offset == last_ins->inst_offset) {
1704 if (ins->dreg == last_ins->sreg1) {
1705 MONO_DELETE_INS (bb, ins);
1708 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1709 ins->opcode = OP_MOVE;
1710 ins->sreg1 = last_ins->sreg1;
1714 case OP_STOREI4_MEMBASE_IMM:
1715 /* Convert pairs of 0 stores to a dword 0 store */
1716 /* Used when initializing temporaries */
1717 /* We know sparc_fp is dword aligned */
1718 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1719 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1720 (ins->inst_destbasereg == sparc_fp) &&
1721 (ins->inst_offset < 0) &&
1722 ((ins->inst_offset % 8) == 0) &&
1723 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1724 (ins->inst_imm == 0) &&
1725 (last_ins->inst_imm == 0)) {
1726 if (mono_hwcap_sparc_is_v9) {
1727 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1728 last_ins->inst_offset = ins->inst_offset;
1729 MONO_DELETE_INS (bb, ins);
1740 case OP_COND_EXC_EQ:
1741 case OP_COND_EXC_GE:
1742 case OP_COND_EXC_GT:
1743 case OP_COND_EXC_LE:
1744 case OP_COND_EXC_LT:
1745 case OP_COND_EXC_NE_UN:
1747 * Convert compare with zero+branch to BRcc
1750 * This only works in 64 bit mode, since it examines all 64
1751 * bits of the register.
1752 * Only do this if the method is small since BPr only has a 16bit
1755 if (v64 && (cfg->header->code_size < 10000) && last_ins &&
1756 (last_ins->opcode == OP_COMPARE_IMM) &&
1757 (last_ins->inst_imm == 0)) {
1758 switch (ins->opcode) {
1760 ins->opcode = OP_SPARC_BRZ;
1763 ins->opcode = OP_SPARC_BRNZ;
1766 ins->opcode = OP_SPARC_BRLZ;
1769 ins->opcode = OP_SPARC_BRGZ;
1772 ins->opcode = OP_SPARC_BRGEZ;
1775 ins->opcode = OP_SPARC_BRLEZ;
1777 case OP_COND_EXC_EQ:
1778 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1780 case OP_COND_EXC_GE:
1781 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1783 case OP_COND_EXC_GT:
1784 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1786 case OP_COND_EXC_LE:
1787 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1789 case OP_COND_EXC_LT:
1790 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1792 case OP_COND_EXC_NE_UN:
1793 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1796 g_assert_not_reached ();
1798 ins->sreg1 = last_ins->sreg1;
1800 MONO_DELETE_INS (bb, ins);
1808 if (ins->dreg == ins->sreg1) {
1809 MONO_DELETE_INS (bb, ins);
1813 * OP_MOVE sreg, dreg
1814 * OP_MOVE dreg, sreg
1816 if (last_ins && last_ins->opcode == OP_MOVE &&
1817 ins->sreg1 == last_ins->dreg &&
1818 ins->dreg == last_ins->sreg1) {
1819 MONO_DELETE_INS (bb, ins);
1827 bb->last_ins = last_ins;
1831 mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
1833 switch (ins->opcode) {
1835 MONO_EMIT_NEW_BIALU (cfg, OP_SUBCC, MONO_LVREG_LS (ins->dreg), 0, MONO_LVREG_LS (ins->sreg1));
1836 MONO_EMIT_NEW_BIALU (cfg, OP_SBB, MONO_LVREG_MS (ins->dreg), 0, MONO_LVREG_MS (ins->sreg1));
1845 mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1849 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1852 sparc_patch (guint32 *code, const gpointer target)
1855 guint32 ins = *code;
1856 guint32 op = ins >> 30;
1857 guint32 op2 = (ins >> 22) & 0x7;
1858 guint32 rd = (ins >> 25) & 0x1f;
1859 guint8* target8 = (guint8*)target;
1860 gint64 disp = (target8 - (guint8*)code) >> 2;
1863 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
1865 if ((op == 0) && (op2 == 2)) {
1866 if (!sparc_is_imm22 (disp))
1869 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1871 else if ((op == 0) && (op2 == 1)) {
1872 if (!sparc_is_imm19 (disp))
1875 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
1877 else if ((op == 0) && (op2 == 3)) {
1878 if (!sparc_is_imm16 (disp))
1881 *code &= ~(0x180000 | 0x3fff);
1882 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
1884 else if ((op == 0) && (op2 == 6)) {
1885 if (!sparc_is_imm22 (disp))
1888 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
1890 else if ((op == 0) && (op2 == 4)) {
1891 guint32 ins2 = code [1];
1893 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
1894 /* sethi followed by or */
1896 sparc_set (p, target8, rd);
1897 while (p <= (code + 1))
1900 else if (ins2 == 0x01000000) {
1901 /* sethi followed by nop */
1903 sparc_set (p, target8, rd);
1904 while (p <= (code + 1))
1907 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
1908 /* sethi followed by load/store */
1910 guint32 t = (guint32)target8;
1911 *code &= ~(0x3fffff);
1913 *(code + 1) &= ~(0x3ff);
1914 *(code + 1) |= (t & 0x3ff);
1918 (sparc_inst_rd (ins) == sparc_g1) &&
1919 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
1920 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
1921 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
1925 reg = sparc_inst_rd (c [1]);
1926 sparc_set (p, target8, reg);
1930 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
1931 (sparc_inst_imm (ins2))) {
1932 /* sethi followed by jmpl */
1934 guint32 t = (guint32)target8;
1935 *code &= ~(0x3fffff);
1937 *(code + 1) &= ~(0x3ff);
1938 *(code + 1) |= (t & 0x3ff);
1944 else if (op == 01) {
1945 gint64 disp = (target8 - (guint8*)code) >> 2;
1947 if (!sparc_is_imm30 (disp))
1949 sparc_call_simple (code, target8 - (guint8*)code);
1951 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
1953 g_assert (sparc_is_imm13 (target8));
1955 *code |= (guint32)target8;
1957 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
1958 /* sparc_set case 5. */
1962 reg = sparc_inst_rd (c [3]);
1963 sparc_set (p, target, reg);
1970 // g_print ("patched with 0x%08x\n", ins);
1974 * mono_sparc_emit_save_lmf:
1976 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
1977 * trampolines as well.
1980 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
1983 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
1984 /* Save previous_lmf */
1985 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
1986 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
1988 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
1989 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
1995 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
1997 /* Load previous_lmf */
1998 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2000 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2001 /* *(lmf) = previous_lmf */
2002 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2007 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2010 * Since register windows are saved to the current value of %sp, we need to
2011 * set the sp field in the lmf before the call, not in the prolog.
2013 if (cfg->method->save_lmf) {
2014 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2017 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2024 emit_vret_token (MonoInst *ins, guint32 *code)
2026 MonoCallInst *call = (MonoCallInst*)ins;
2030 * The sparc ABI requires that calls to functions which return a structure
2031 * contain an additional unimpl instruction which is checked by the callee.
2033 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2034 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2035 size = mini_type_stack_size (call->signature->ret, NULL);
2037 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2038 sparc_unimp (code, size & 0xfff);
2045 emit_move_return_value (MonoInst *ins, guint32 *code)
2047 /* Move return value to the target register */
2048 /* FIXME: do more things in the local reg allocator */
2049 switch (ins->opcode) {
2051 case OP_VOIDCALL_REG:
2052 case OP_VOIDCALL_MEMBASE:
2056 case OP_CALL_MEMBASE:
2057 g_assert (ins->dreg == sparc_o0);
2061 case OP_LCALL_MEMBASE:
2063 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2064 * in inssel-long32.brg.
2067 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2069 g_assert (ins->dreg == sparc_o1);
2074 case OP_FCALL_MEMBASE:
2076 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2077 sparc_fmovs (code, sparc_f0, ins->dreg);
2078 sparc_fstod (code, ins->dreg, ins->dreg);
2081 sparc_fmovd (code, sparc_f0, ins->dreg);
2083 sparc_fmovs (code, sparc_f0, ins->dreg);
2084 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2085 sparc_fstod (code, ins->dreg, ins->dreg);
2087 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2092 case OP_VCALL_MEMBASE:
2095 case OP_VCALL2_MEMBASE:
2105 * emit_load_volatile_arguments:
2107 * Load volatile arguments from the stack to the original input registers.
2108 * Required before a tail call.
2111 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2113 MonoMethod *method = cfg->method;
2114 MonoMethodSignature *sig;
2119 /* FIXME: Generate intermediate code instead */
2121 sig = mono_method_signature (method);
2123 cinfo = get_call_info (cfg, sig, FALSE);
2125 /* This is the opposite of the code in emit_prolog */
2127 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2128 ArgInfo *ainfo = cinfo->args + i;
2129 gint32 stack_offset;
2132 inst = cfg->args [i];
2134 if (sig->hasthis && (i == 0))
2135 arg_type = &mono_defaults.object_class->byval_arg;
2137 arg_type = sig->params [i - sig->hasthis];
2139 stack_offset = ainfo->offset + ARGS_OFFSET;
2140 ireg = sparc_i0 + ainfo->reg;
2142 if (ainfo->storage == ArgInSplitRegStack) {
2143 g_assert (inst->opcode == OP_REGOFFSET);
2145 if (!sparc_is_imm13 (stack_offset))
2147 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2150 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2151 if (ainfo->storage == ArgInIRegPair) {
2152 if (!sparc_is_imm13 (inst->inst_offset + 4))
2154 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2155 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2158 if (ainfo->storage == ArgInSplitRegStack) {
2159 if (stack_offset != inst->inst_offset) {
2160 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2161 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2162 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2167 if (ainfo->storage == ArgOnStackPair) {
2168 if (stack_offset != inst->inst_offset) {
2169 /* stack_offset is not dword aligned, so we need to make a copy */
2170 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2171 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2173 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2174 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2179 g_assert_not_reached ();
2182 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2183 /* Argument in register, but need to be saved to stack */
2184 if (!sparc_is_imm13 (stack_offset))
2186 if ((stack_offset - ARGS_OFFSET) & 0x1)
2187 /* FIXME: Is this ldsb or ldub ? */
2188 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2190 if ((stack_offset - ARGS_OFFSET) & 0x2)
2191 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2193 if ((stack_offset - ARGS_OFFSET) & 0x4)
2194 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2197 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2199 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2202 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2203 /* Argument in regpair, but need to be saved to stack */
2204 if (!sparc_is_imm13 (inst->inst_offset + 4))
2206 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2207 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2209 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2212 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2216 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2217 if (inst->opcode == OP_REGVAR)
2218 /* FIXME: Load the argument into memory */
2228 * mono_sparc_is_virtual_call:
2230 * Determine whenever the instruction at CODE is a virtual call.
2233 mono_sparc_is_virtual_call (guint32 *code)
2240 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2242 * Register indirect call. If it is a virtual call, then the
2243 * instruction in the delay slot is a special kind of nop.
2246 /* Construct special nop */
2247 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2250 if (code [1] == p [0])
2258 #define BR_SMALL_SIZE 2
2259 #define BR_LARGE_SIZE 2
2260 #define JUMP_IMM_SIZE 5
2261 #define ENABLE_WRONG_METHOD_CHECK 0
2264 * LOCKING: called with the domain lock held
2267 mono_arch_build_imt_trampoline (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count,
2268 gpointer fail_tramp)
2272 guint32 *code, *start;
2274 for (i = 0; i < count; ++i) {
2275 MonoIMTCheckItem *item = imt_entries [i];
2276 if (item->is_equals) {
2277 if (item->check_target_idx) {
2278 if (!item->compare_done)
2279 item->chunk_size += CMP_SIZE;
2280 item->chunk_size += BR_SMALL_SIZE + JUMP_IMM_SIZE;
2283 item->chunk_size += 16;
2284 item->chunk_size += JUMP_IMM_SIZE;
2285 #if ENABLE_WRONG_METHOD_CHECK
2286 item->chunk_size += CMP_SIZE + BR_SMALL_SIZE + 1;
2290 item->chunk_size += CMP_SIZE + BR_LARGE_SIZE;
2291 imt_entries [item->check_target_idx]->compare_done = TRUE;
2293 size += item->chunk_size;
2296 code = mono_method_alloc_generic_virtual_trampoline (domain, size * 4);
2298 code = mono_domain_code_reserve (domain, size * 4);
2300 for (i = 0; i < count; ++i) {
2301 MonoIMTCheckItem *item = imt_entries [i];
2302 item->code_target = (guint8*)code;
2303 if (item->is_equals) {
2304 gboolean fail_case = !item->check_target_idx && fail_tramp;
2306 if (item->check_target_idx || fail_case) {
2307 if (!item->compare_done || fail_case) {
2308 sparc_set (code, (guint32)item->key, sparc_g5);
2309 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2311 item->jmp_code = (guint8*)code;
2312 sparc_branch (code, 0, sparc_bne, 0);
2314 if (item->has_target_code) {
2315 sparc_set (code, item->value.target_code, sparc_f5);
2317 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2318 sparc_ld (code, sparc_g5, 0, sparc_g5);
2320 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2324 sparc_patch (item->jmp_code, code);
2325 sparc_set (code, fail_tramp, sparc_g5);
2326 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2328 item->jmp_code = NULL;
2331 /* enable the commented code to assert on wrong method */
2332 #if ENABLE_WRONG_METHOD_CHECK
2333 g_assert_not_reached ();
2335 sparc_set (code, ((guint32)(&(vtable->vtable [item->value.vtable_slot]))), sparc_g5);
2336 sparc_ld (code, sparc_g5, 0, sparc_g5);
2337 sparc_jmpl (code, sparc_g5, sparc_g0, sparc_g0);
2339 #if ENABLE_WRONG_METHOD_CHECK
2340 g_assert_not_reached ();
2344 sparc_set (code, (guint32)item->key, sparc_g5);
2345 sparc_cmp (code, MONO_ARCH_IMT_REG, sparc_g5);
2346 item->jmp_code = (guint8*)code;
2347 sparc_branch (code, 0, sparc_beu, 0);
2351 /* patch the branches to get to the target items */
2352 for (i = 0; i < count; ++i) {
2353 MonoIMTCheckItem *item = imt_entries [i];
2354 if (item->jmp_code) {
2355 if (item->check_target_idx) {
2356 sparc_patch ((guint32*)item->jmp_code, imt_entries [item->check_target_idx]->code_target);
2361 mono_arch_flush_icache ((guint8*)start, (code - start) * 4);
2363 UnlockedAdd (&mono_stats.imt_trampolines_size, (code - start) * 4);
2364 g_assert (code - start <= size);
2366 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, NULL), domain);
2372 mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
2375 g_assert_not_reached ();
2378 return (MonoMethod*)regs [sparc_g1];
2382 mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
2384 mono_sparc_flushw ();
2386 return (gpointer)regs [sparc_o0];
2390 * Some conventions used in the following code.
2391 * 2) The only scratch registers we have are o7 and g1. We try to
2392 * stick to o7 when we can, and use g1 when necessary.
2396 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2401 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2402 MonoInst *last_ins = NULL;
2406 if (cfg->verbose_level > 2)
2407 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2409 cpos = bb->max_offset;
2411 MONO_BB_FOR_EACH_INS (bb, ins) {
2414 offset = (guint8*)code - cfg->native_code;
2416 spec = ins_get_spec (ins->opcode);
2418 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2420 if (offset > (cfg->code_size - max_len - 16)) {
2421 cfg->code_size *= 2;
2422 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2423 code = (guint32*)(cfg->native_code + offset);
2425 code_start = (guint8*)code;
2426 // if (ins->cil_code)
2427 // g_print ("cil code\n");
2428 mono_debug_record_line_number (cfg, ins, offset);
2430 switch (ins->opcode) {
2431 case OP_STOREI1_MEMBASE_IMM:
2432 EMIT_STORE_MEMBASE_IMM (ins, stb);
2434 case OP_STOREI2_MEMBASE_IMM:
2435 EMIT_STORE_MEMBASE_IMM (ins, sth);
2437 case OP_STORE_MEMBASE_IMM:
2438 EMIT_STORE_MEMBASE_IMM (ins, sti);
2440 case OP_STOREI4_MEMBASE_IMM:
2441 EMIT_STORE_MEMBASE_IMM (ins, st);
2443 case OP_STOREI8_MEMBASE_IMM:
2445 EMIT_STORE_MEMBASE_IMM (ins, stx);
2447 /* Only generated by peephole opts */
2448 g_assert ((ins->inst_offset % 8) == 0);
2449 g_assert (ins->inst_imm == 0);
2450 EMIT_STORE_MEMBASE_IMM (ins, stx);
2453 case OP_STOREI1_MEMBASE_REG:
2454 EMIT_STORE_MEMBASE_REG (ins, stb);
2456 case OP_STOREI2_MEMBASE_REG:
2457 EMIT_STORE_MEMBASE_REG (ins, sth);
2459 case OP_STOREI4_MEMBASE_REG:
2460 EMIT_STORE_MEMBASE_REG (ins, st);
2462 case OP_STOREI8_MEMBASE_REG:
2464 EMIT_STORE_MEMBASE_REG (ins, stx);
2466 /* Only used by OP_MEMSET */
2467 EMIT_STORE_MEMBASE_REG (ins, std);
2470 case OP_STORE_MEMBASE_REG:
2471 EMIT_STORE_MEMBASE_REG (ins, sti);
2474 sparc_set (code, ins->inst_c0, ins->dreg);
2475 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2477 case OP_LOADI4_MEMBASE:
2479 EMIT_LOAD_MEMBASE (ins, ldsw);
2481 EMIT_LOAD_MEMBASE (ins, ld);
2484 case OP_LOADU4_MEMBASE:
2485 EMIT_LOAD_MEMBASE (ins, ld);
2487 case OP_LOADU1_MEMBASE:
2488 EMIT_LOAD_MEMBASE (ins, ldub);
2490 case OP_LOADI1_MEMBASE:
2491 EMIT_LOAD_MEMBASE (ins, ldsb);
2493 case OP_LOADU2_MEMBASE:
2494 EMIT_LOAD_MEMBASE (ins, lduh);
2496 case OP_LOADI2_MEMBASE:
2497 EMIT_LOAD_MEMBASE (ins, ldsh);
2499 case OP_LOAD_MEMBASE:
2501 EMIT_LOAD_MEMBASE (ins, ldx);
2503 EMIT_LOAD_MEMBASE (ins, ld);
2507 case OP_LOADI8_MEMBASE:
2508 EMIT_LOAD_MEMBASE (ins, ldx);
2511 case OP_ICONV_TO_I1:
2512 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2513 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2515 case OP_ICONV_TO_I2:
2516 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2517 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2519 case OP_ICONV_TO_U1:
2520 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2522 case OP_ICONV_TO_U2:
2523 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2524 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2526 case OP_LCONV_TO_OVF_U4:
2527 case OP_ICONV_TO_OVF_U4:
2528 /* Only used on V9 */
2529 sparc_cmp_imm (code, ins->sreg1, 0);
2530 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2531 MONO_PATCH_INFO_EXC, "OverflowException");
2532 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
2534 sparc_set (code, 1, sparc_o7);
2535 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
2536 sparc_cmp (code, ins->sreg1, sparc_o7);
2537 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
2538 MONO_PATCH_INFO_EXC, "OverflowException");
2539 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
2541 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2543 case OP_LCONV_TO_OVF_I4_UN:
2544 case OP_ICONV_TO_OVF_I4_UN:
2545 /* Only used on V9 */
2551 sparc_cmp (code, ins->sreg1, ins->sreg2);
2553 case OP_COMPARE_IMM:
2554 case OP_ICOMPARE_IMM:
2555 if (sparc_is_imm13 (ins->inst_imm))
2556 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2558 sparc_set (code, ins->inst_imm, sparc_o7);
2559 sparc_cmp (code, ins->sreg1, sparc_o7);
2564 * gdb does not like encountering 'ta 1' in the debugged code. So
2565 * instead of emitting a trap, we emit a call a C function and place a
2568 //sparc_ta (code, 1);
2569 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_break);
2574 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2577 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2582 /* according to inssel-long32.brg, this should set cc */
2583 EMIT_ALU_IMM (ins, add, TRUE);
2587 /* according to inssel-long32.brg, this should set cc */
2588 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2592 EMIT_ALU_IMM (ins, addx, TRUE);
2596 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2599 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2604 /* according to inssel-long32.brg, this should set cc */
2605 EMIT_ALU_IMM (ins, sub, TRUE);
2609 /* according to inssel-long32.brg, this should set cc */
2610 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2614 EMIT_ALU_IMM (ins, subx, TRUE);
2617 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2621 EMIT_ALU_IMM (ins, and, FALSE);
2624 /* Sign extend sreg1 into %y */
2625 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2626 sparc_wry (code, sparc_o7, sparc_g0);
2627 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2628 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2631 sparc_wry (code, sparc_g0, sparc_g0);
2632 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2638 /* Transform division into a shift */
2639 for (i = 1; i < 30; ++i) {
2641 if (ins->inst_imm == imm)
2647 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2648 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2649 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2652 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2653 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2654 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2655 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2656 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2660 /* Sign extend sreg1 into %y */
2661 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2662 sparc_wry (code, sparc_o7, sparc_g0);
2663 EMIT_ALU_IMM (ins, sdiv, TRUE);
2664 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2668 case OP_IDIV_UN_IMM:
2669 sparc_wry (code, sparc_g0, sparc_g0);
2670 EMIT_ALU_IMM (ins, udiv, FALSE);
2673 /* Sign extend sreg1 into %y */
2674 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2675 sparc_wry (code, sparc_o7, sparc_g0);
2676 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2677 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2678 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2679 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2682 sparc_wry (code, sparc_g0, sparc_g0);
2683 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2684 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2685 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2689 /* Sign extend sreg1 into %y */
2690 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2691 sparc_wry (code, sparc_o7, sparc_g0);
2692 if (!sparc_is_imm13 (ins->inst_imm)) {
2693 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2694 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2695 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2696 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
2699 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2700 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
2701 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2703 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2705 case OP_IREM_UN_IMM:
2706 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
2707 sparc_wry (code, sparc_g0, sparc_g0);
2708 sparc_udiv (code, FALSE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
2709 sparc_umul (code, FALSE, GP_SCRATCH_REG, sparc_o7, sparc_o7);
2710 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2713 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2717 EMIT_ALU_IMM (ins, or, FALSE);
2720 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2724 EMIT_ALU_IMM (ins, xor, FALSE);
2727 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2731 if (ins->inst_imm < (1 << 5))
2732 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2734 sparc_set (code, ins->inst_imm, sparc_o7);
2735 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2739 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2743 if (ins->inst_imm < (1 << 5))
2744 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2746 sparc_set (code, ins->inst_imm, sparc_o7);
2747 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2751 case OP_ISHR_UN_IMM:
2752 if (ins->inst_imm < (1 << 5))
2753 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2755 sparc_set (code, ins->inst_imm, sparc_o7);
2756 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2760 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2763 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
2766 if (ins->inst_imm < (1 << 6))
2767 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2769 sparc_set (code, ins->inst_imm, sparc_o7);
2770 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
2774 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
2777 if (ins->inst_imm < (1 << 6))
2778 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2780 sparc_set (code, ins->inst_imm, sparc_o7);
2781 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
2785 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
2787 case OP_LSHR_UN_IMM:
2788 if (ins->inst_imm < (1 << 6))
2789 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2791 sparc_set (code, ins->inst_imm, sparc_o7);
2792 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
2796 /* can't use sparc_not */
2797 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2800 /* can't use sparc_neg */
2801 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2804 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2810 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
2813 /* Transform multiplication into a shift */
2814 for (i = 0; i < 30; ++i) {
2816 if (ins->inst_imm == imm)
2820 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2822 EMIT_ALU_IMM (ins, smul, FALSE);
2826 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2827 sparc_rdy (code, sparc_g1);
2828 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2829 sparc_cmp (code, sparc_g1, sparc_o7);
2830 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2832 case OP_IMUL_OVF_UN:
2833 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2834 sparc_rdy (code, sparc_o7);
2835 sparc_cmp (code, sparc_o7, sparc_g0);
2836 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
2839 sparc_set (code, ins->inst_c0, ins->dreg);
2842 sparc_set (code, ins->inst_l, ins->dreg);
2845 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2846 sparc_set_template (code, ins->dreg);
2849 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2850 sparc_set_template (code, ins->dreg);
2852 case OP_ICONV_TO_I4:
2853 case OP_ICONV_TO_U4:
2855 if (ins->sreg1 != ins->dreg)
2856 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2860 if (ins->sreg1 != ins->dreg)
2861 sparc_fmovd (code, ins->sreg1, ins->dreg);
2863 sparc_fmovs (code, ins->sreg1, ins->dreg);
2864 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2868 if (cfg->method->save_lmf)
2871 code = emit_load_volatile_arguments (cfg, code);
2872 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2873 sparc_set_template (code, sparc_o7);
2874 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2875 /* Restore parent frame in delay slot */
2876 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2879 /* ensure ins->sreg1 is not NULL */
2880 /* Might be misaligned in case of vtypes so use a byte load */
2881 sparc_ldsb_imm (code, ins->sreg1, 0, sparc_g0);
2884 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2885 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
2893 call = (MonoCallInst*)ins;
2894 g_assert (!call->virtual);
2895 code = emit_save_sp_to_lmf (cfg, code);
2896 if (ins->flags & MONO_INST_HAS_METHOD)
2897 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
2899 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
2901 code = emit_vret_token (ins, code);
2902 code = emit_move_return_value (ins, code);
2908 case OP_VOIDCALL_REG:
2910 call = (MonoCallInst*)ins;
2911 code = emit_save_sp_to_lmf (cfg, code);
2912 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2914 * We emit a special kind of nop in the delay slot to tell the
2915 * trampoline code that this is a virtual call, thus an unbox
2916 * trampoline might need to be called.
2919 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2923 code = emit_vret_token (ins, code);
2924 code = emit_move_return_value (ins, code);
2926 case OP_FCALL_MEMBASE:
2927 case OP_LCALL_MEMBASE:
2928 case OP_VCALL_MEMBASE:
2929 case OP_VCALL2_MEMBASE:
2930 case OP_VOIDCALL_MEMBASE:
2931 case OP_CALL_MEMBASE:
2932 call = (MonoCallInst*)ins;
2933 code = emit_save_sp_to_lmf (cfg, code);
2934 if (sparc_is_imm13 (ins->inst_offset)) {
2935 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2937 sparc_set (code, ins->inst_offset, sparc_o7);
2938 sparc_ldi (code, ins->inst_basereg, sparc_o7, sparc_o7);
2940 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2942 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2946 code = emit_vret_token (ins, code);
2947 code = emit_move_return_value (ins, code);
2950 if (mono_method_signature (cfg->method)->ret->type == MONO_TYPE_R4)
2951 sparc_fdtos (code, ins->sreg1, sparc_f0);
2954 sparc_fmovd (code, ins->sreg1, ins->dreg);
2956 /* FIXME: Why not use fmovd ? */
2957 sparc_fmovs (code, ins->sreg1, ins->dreg);
2958 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2966 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
2967 /* Perform stack touching */
2971 /* Keep alignment */
2972 /* Add 4 to compensate for the rounding of localloc_offset */
2973 sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
2974 sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
2975 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2977 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
2979 size_reg = sparc_g4;
2981 size_reg = sparc_g1;
2983 sparc_mov_reg_reg (code, ins->dreg, size_reg);
2986 size_reg = ins->sreg1;
2988 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2989 /* Keep %sp valid at all times */
2990 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2991 /* Round localloc_offset too so the result is at least 8 aligned */
2992 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
2993 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
2994 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
2996 if (ins->flags & MONO_INST_INIT) {
2998 /* Initialize memory region */
2999 sparc_cmp_imm (code, size_reg, 0);
3001 sparc_branch (code, 0, sparc_be, 0);
3003 sparc_set (code, 0, sparc_o7);
3004 sparc_sub_imm (code, 0, size_reg, mono_hwcap_sparc_is_v9 ? 8 : 4, size_reg);
3007 if (mono_hwcap_sparc_is_v9)
3008 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3010 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3011 sparc_cmp (code, sparc_o7, size_reg);
3013 sparc_branch (code, 0, sparc_bl, 0);
3014 sparc_patch (br [2], br [1]);
3016 sparc_add_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3017 sparc_patch (br [0], code);
3021 case OP_LOCALLOC_IMM: {
3022 gint32 offset = ins->inst_imm;
3025 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3026 /* Perform stack touching */
3030 /* To compensate for the rounding of localloc_offset */
3031 offset += sizeof (gpointer);
3032 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3033 if (sparc_is_imm13 (offset))
3034 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3036 sparc_set (code, offset, sparc_o7);
3037 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3039 /* Round localloc_offset too so the result is at least 8 aligned */
3040 offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
3041 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
3042 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
3043 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3049 while (i < offset) {
3050 if (mono_hwcap_sparc_is_v9) {
3051 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3055 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3061 sparc_set (code, offset, sparc_o7);
3062 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3063 /* beginning of loop */
3065 if (mono_hwcap_sparc_is_v9)
3066 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3068 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3069 sparc_cmp_imm (code, sparc_o7, 0);
3071 sparc_branch (code, 0, sparc_bne, 0);
3073 sparc_sub_imm (code, 0, sparc_o7, mono_hwcap_sparc_is_v9 ? 8 : 4, sparc_o7);
3074 sparc_patch (br [1], br [0]);
3080 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3081 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3082 (gpointer)"mono_arch_throw_exception");
3086 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3087 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3088 (gpointer)"mono_arch_rethrow_exception");
3091 case OP_START_HANDLER: {
3093 * The START_HANDLER instruction marks the beginning of a handler
3094 * block. It is called using a call instruction, so %o7 contains
3095 * the return address. Since the handler executes in the same stack
3096 * frame as the method itself, we can't use save/restore to save
3097 * the return address. Instead, we save it into a dedicated
3100 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3101 if (!sparc_is_imm13 (spvar->inst_offset)) {
3102 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3103 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3106 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3109 case OP_ENDFILTER: {
3110 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3111 if (!sparc_is_imm13 (spvar->inst_offset)) {
3112 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3113 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3116 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3117 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3119 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3122 case OP_ENDFINALLY: {
3123 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3124 if (!sparc_is_imm13 (spvar->inst_offset)) {
3125 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3126 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3129 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3130 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3134 case OP_CALL_HANDLER:
3135 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3136 /* This is a jump inside the method, so call_simple works even on V9 */
3137 sparc_call_simple (code, 0);
3139 mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
3142 ins->inst_c0 = (guint8*)code - cfg->native_code;
3144 case OP_RELAXED_NOP:
3147 case OP_DUMMY_STORE:
3148 case OP_NOT_REACHED:
3152 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3153 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3155 if (ins->inst_target_bb->native_offset) {
3156 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3157 g_assert (sparc_is_imm22 (disp));
3158 sparc_branch (code, 1, sparc_ba, disp);
3160 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3161 sparc_branch (code, 1, sparc_ba, 0);
3166 sparc_jmp (code, ins->sreg1, sparc_g0);
3174 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3175 sparc_clr_reg (code, ins->dreg);
3176 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3179 sparc_clr_reg (code, ins->dreg);
3181 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3183 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3186 sparc_set (code, 1, ins->dreg);
3194 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3195 sparc_clr_reg (code, ins->dreg);
3196 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3199 sparc_clr_reg (code, ins->dreg);
3200 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3202 sparc_set (code, 1, ins->dreg);
3205 case OP_COND_EXC_EQ:
3206 case OP_COND_EXC_NE_UN:
3207 case OP_COND_EXC_LT:
3208 case OP_COND_EXC_LT_UN:
3209 case OP_COND_EXC_GT:
3210 case OP_COND_EXC_GT_UN:
3211 case OP_COND_EXC_GE:
3212 case OP_COND_EXC_GE_UN:
3213 case OP_COND_EXC_LE:
3214 case OP_COND_EXC_LE_UN:
3215 case OP_COND_EXC_OV:
3216 case OP_COND_EXC_NO:
3218 case OP_COND_EXC_NC:
3219 case OP_COND_EXC_IEQ:
3220 case OP_COND_EXC_INE_UN:
3221 case OP_COND_EXC_ILT:
3222 case OP_COND_EXC_ILT_UN:
3223 case OP_COND_EXC_IGT:
3224 case OP_COND_EXC_IGT_UN:
3225 case OP_COND_EXC_IGE:
3226 case OP_COND_EXC_IGE_UN:
3227 case OP_COND_EXC_ILE:
3228 case OP_COND_EXC_ILE_UN:
3229 case OP_COND_EXC_IOV:
3230 case OP_COND_EXC_INO:
3231 case OP_COND_EXC_IC:
3232 case OP_COND_EXC_INC:
3236 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3239 case OP_SPARC_COND_EXC_EQZ:
3240 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3242 case OP_SPARC_COND_EXC_GEZ:
3243 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3245 case OP_SPARC_COND_EXC_GTZ:
3246 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3248 case OP_SPARC_COND_EXC_LEZ:
3249 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3251 case OP_SPARC_COND_EXC_LTZ:
3252 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3254 case OP_SPARC_COND_EXC_NEZ:
3255 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3268 if (mono_hwcap_sparc_is_v9)
3269 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3271 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3276 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3278 case OP_SPARC_BRLEZ:
3279 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3282 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3285 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3288 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3290 case OP_SPARC_BRGEZ:
3291 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3294 /* floating point opcodes */
3296 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3298 sparc_set_template (code, sparc_o7);
3300 sparc_sethi (code, 0, sparc_o7);
3302 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3305 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3307 sparc_set_template (code, sparc_o7);
3309 sparc_sethi (code, 0, sparc_o7);
3311 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3313 /* Extend to double */
3314 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3316 case OP_STORER8_MEMBASE_REG:
3317 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3318 sparc_set (code, ins->inst_offset, sparc_o7);
3319 /* SPARCV9 handles misaligned fp loads/stores */
3320 if (!v64 && (ins->inst_offset % 8)) {
3322 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3323 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3324 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3326 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3329 if (!v64 && (ins->inst_offset % 8)) {
3331 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3332 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3334 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3337 case OP_LOADR8_MEMBASE:
3338 EMIT_LOAD_MEMBASE (ins, lddf);
3340 case OP_STORER4_MEMBASE_REG:
3341 /* This requires a double->single conversion */
3342 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3343 if (!sparc_is_imm13 (ins->inst_offset)) {
3344 sparc_set (code, ins->inst_offset, sparc_o7);
3345 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3348 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3350 case OP_LOADR4_MEMBASE: {
3351 /* ldf needs a single precision register */
3352 int dreg = ins->dreg;
3353 ins->dreg = FP_SCRATCH_REG;
3354 EMIT_LOAD_MEMBASE (ins, ldf);
3356 /* Extend to double */
3357 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3360 case OP_ICONV_TO_R4: {
3361 MonoInst *spill = cfg->arch.float_spill_slot;
3362 gint32 reg = spill->inst_basereg;
3363 gint32 offset = spill->inst_offset;
3365 g_assert (spill->opcode == OP_REGOFFSET);
3367 if (!sparc_is_imm13 (offset)) {
3368 sparc_set (code, offset, sparc_o7);
3369 sparc_stx (code, ins->sreg1, reg, offset);
3370 sparc_lddf (code, reg, offset, FP_SCRATCH_REG);
3372 sparc_stx_imm (code, ins->sreg1, reg, offset);
3373 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3375 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3377 if (!sparc_is_imm13 (offset)) {
3378 sparc_set (code, offset, sparc_o7);
3379 sparc_st (code, ins->sreg1, reg, sparc_o7);
3380 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3382 sparc_st_imm (code, ins->sreg1, reg, offset);
3383 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3385 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3387 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3390 case OP_ICONV_TO_R8: {
3391 MonoInst *spill = cfg->arch.float_spill_slot;
3392 gint32 reg = spill->inst_basereg;
3393 gint32 offset = spill->inst_offset;
3395 g_assert (spill->opcode == OP_REGOFFSET);
3398 if (!sparc_is_imm13 (offset)) {
3399 sparc_set (code, offset, sparc_o7);
3400 sparc_stx (code, ins->sreg1, reg, sparc_o7);
3401 sparc_lddf (code, reg, sparc_o7, FP_SCRATCH_REG);
3403 sparc_stx_imm (code, ins->sreg1, reg, offset);
3404 sparc_lddf_imm (code, reg, offset, FP_SCRATCH_REG);
3406 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3408 if (!sparc_is_imm13 (offset)) {
3409 sparc_set (code, offset, sparc_o7);
3410 sparc_st (code, ins->sreg1, reg, sparc_o7);
3411 sparc_ldf (code, reg, sparc_o7, FP_SCRATCH_REG);
3413 sparc_st_imm (code, ins->sreg1, reg, offset);
3414 sparc_ldf_imm (code, reg, offset, FP_SCRATCH_REG);
3416 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3420 case OP_FCONV_TO_I1:
3421 case OP_FCONV_TO_U1:
3422 case OP_FCONV_TO_I2:
3423 case OP_FCONV_TO_U2:
3428 case OP_FCONV_TO_I4:
3429 case OP_FCONV_TO_U4: {
3430 MonoInst *spill = cfg->arch.float_spill_slot;
3431 gint32 reg = spill->inst_basereg;
3432 gint32 offset = spill->inst_offset;
3434 g_assert (spill->opcode == OP_REGOFFSET);
3436 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3437 if (!sparc_is_imm13 (offset)) {
3438 sparc_set (code, offset, sparc_o7);
3439 sparc_stdf (code, FP_SCRATCH_REG, reg, sparc_o7);
3440 sparc_ld (code, reg, sparc_o7, ins->dreg);
3442 sparc_stdf_imm (code, FP_SCRATCH_REG, reg, offset);
3443 sparc_ld_imm (code, reg, offset, ins->dreg);
3446 switch (ins->opcode) {
3447 case OP_FCONV_TO_I1:
3448 case OP_FCONV_TO_U1:
3449 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3451 case OP_FCONV_TO_I2:
3452 case OP_FCONV_TO_U2:
3453 sparc_set (code, 0xffff, sparc_o7);
3454 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3461 case OP_FCONV_TO_I8:
3462 case OP_FCONV_TO_U8:
3464 g_assert_not_reached ();
3466 case OP_FCONV_TO_R4:
3467 /* FIXME: Change precision ? */
3469 sparc_fmovd (code, ins->sreg1, ins->dreg);
3471 sparc_fmovs (code, ins->sreg1, ins->dreg);
3472 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3475 case OP_LCONV_TO_R_UN: {
3477 g_assert_not_reached ();
3480 case OP_LCONV_TO_OVF_I:
3481 case OP_LCONV_TO_OVF_I4_2: {
3482 guint32 *br [3], *label [1];
3485 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3487 sparc_cmp_imm (code, ins->sreg1, 0);
3489 sparc_branch (code, 1, sparc_bneg, 0);
3493 /* ms word must be 0 */
3494 sparc_cmp_imm (code, ins->sreg2, 0);
3496 sparc_branch (code, 1, sparc_be, 0);
3501 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3504 sparc_patch (br [0], code);
3506 /* ms word must 0xfffffff */
3507 sparc_cmp_imm (code, ins->sreg2, -1);
3509 sparc_branch (code, 1, sparc_bne, 0);
3511 sparc_patch (br [2], label [0]);
3514 sparc_patch (br [1], code);
3515 if (ins->sreg1 != ins->dreg)
3516 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3520 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3523 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3526 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3529 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3533 sparc_fnegd (code, ins->sreg1, ins->dreg);
3535 /* FIXME: why don't use fnegd ? */
3536 sparc_fnegs (code, ins->sreg1, ins->dreg);
3540 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3541 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3542 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3545 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3552 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3553 sparc_clr_reg (code, ins->dreg);
3554 switch (ins->opcode) {
3557 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3559 sparc_set (code, 1, ins->dreg);
3560 sparc_fbranch (code, 1, sparc_fbu, 2);
3562 sparc_set (code, 1, ins->dreg);
3565 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3567 sparc_set (code, 1, ins->dreg);
3573 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3576 /* clt.un + brfalse */
3578 sparc_fbranch (code, 1, sparc_fbul, 0);
3581 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3582 sparc_patch (p, (guint8*)code);
3586 /* cgt.un + brfalse */
3588 sparc_fbranch (code, 1, sparc_fbug, 0);
3591 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3592 sparc_patch (p, (guint8*)code);
3596 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3597 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3600 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3601 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3604 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3605 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3608 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3609 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3612 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3613 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3616 MonoInst *spill = cfg->arch.float_spill_slot;
3617 gint32 reg = spill->inst_basereg;
3618 gint32 offset = spill->inst_offset;
3620 g_assert (spill->opcode == OP_REGOFFSET);
3622 if (!sparc_is_imm13 (offset)) {
3623 sparc_set (code, offset, sparc_o7);
3624 sparc_stdf (code, ins->sreg1, reg, sparc_o7);
3625 sparc_lduh (code, reg, sparc_o7, sparc_o7);
3627 sparc_stdf_imm (code, ins->sreg1, reg, offset);
3628 sparc_lduh_imm (code, reg, offset, sparc_o7);
3630 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3631 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3632 sparc_cmp_imm (code, sparc_o7, 2047);
3633 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "OverflowException");
3635 sparc_fmovd (code, ins->sreg1, ins->dreg);
3637 sparc_fmovs (code, ins->sreg1, ins->dreg);
3638 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3643 case OP_MEMORY_BARRIER:
3644 sparc_membar (code, sparc_membar_all);
3646 case OP_GC_SAFE_POINT:
3651 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3653 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3655 g_assert_not_reached ();
3658 if ((((guint8*)code) - code_start) > max_len) {
3659 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3660 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3661 g_assert_not_reached ();
3669 cfg->code_len = (guint8*)code - cfg->native_code;
3673 mono_arch_register_lowlevel_calls (void)
3675 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3679 mono_arch_patch_code (MonoCompile *cfg, MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors, MonoError *error)
3681 MonoJumpInfo *patch_info;
3685 /* FIXME: Move part of this to arch independent code */
3686 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3687 unsigned char *ip = patch_info->ip.i + code;
3690 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors, error);
3691 return_if_nok (error);
3693 switch (patch_info->type) {
3694 case MONO_PATCH_INFO_NONE:
3696 case MONO_PATCH_INFO_METHOD_JUMP: {
3697 guint32 *ip2 = (guint32*)ip;
3698 /* Might already been patched */
3699 sparc_set_template (ip2, sparc_o7);
3705 sparc_patch ((guint32*)ip, target);
3710 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3713 guint32 *code = (guint32*)p;
3714 MonoMethodSignature *sig = mono_method_signature (cfg->method);
3717 /* Save registers to stack */
3718 for (i = 0; i < 6; ++i)
3719 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
3721 cinfo = get_call_info (cfg, sig, FALSE);
3723 /* Save float regs on V9, since they are caller saved */
3724 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3725 ArgInfo *ainfo = cinfo->args + i;
3726 gint32 stack_offset;
3728 stack_offset = ainfo->offset + ARGS_OFFSET;
3730 if (ainfo->storage == ArgInFloatReg) {
3731 if (!sparc_is_imm13 (stack_offset))
3733 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3735 else if (ainfo->storage == ArgInDoubleReg) {
3736 /* The offset is guaranteed to be aligned by the ABI rules */
3737 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
3741 sparc_set (code, cfg->method, sparc_o0);
3742 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
3744 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3747 /* Restore float regs on V9 */
3748 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3749 ArgInfo *ainfo = cinfo->args + i;
3750 gint32 stack_offset;
3752 stack_offset = ainfo->offset + ARGS_OFFSET;
3754 if (ainfo->storage == ArgInFloatReg) {
3755 if (!sparc_is_imm13 (stack_offset))
3757 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3759 else if (ainfo->storage == ArgInDoubleReg) {
3760 /* The offset is guaranteed to be aligned by the ABI rules */
3761 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
3779 mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
3781 guint32 *code = (guint32*)p;
3782 int save_mode = SAVE_NONE;
3783 MonoMethod *method = cfg->method;
3785 switch (mini_get_underlying_type (mono_method_signature (method)->ret)->type) {
3786 case MONO_TYPE_VOID:
3787 /* special case string .ctor icall */
3788 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3789 save_mode = SAVE_ONE;
3791 save_mode = SAVE_NONE;
3796 save_mode = SAVE_ONE;
3798 save_mode = SAVE_TWO;
3803 save_mode = SAVE_FP;
3805 case MONO_TYPE_VALUETYPE:
3806 save_mode = SAVE_STRUCT;
3809 save_mode = SAVE_ONE;
3813 /* Save the result to the stack and also put it into the output registers */
3815 switch (save_mode) {
3818 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3819 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3820 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3821 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3824 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
3825 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3829 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
3831 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3832 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3833 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
3838 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3840 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3848 sparc_set (code, cfg->method, sparc_o0);
3850 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
3853 /* Restore result */
3855 switch (save_mode) {
3857 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3858 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3861 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
3864 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
3875 mono_arch_emit_prolog (MonoCompile *cfg)
3877 MonoMethod *method = cfg->method;
3878 MonoMethodSignature *sig;
3884 cfg->code_size = 256;
3885 cfg->native_code = g_malloc (cfg->code_size);
3886 code = (guint32*)cfg->native_code;
3888 /* FIXME: Generate intermediate code instead */
3890 offset = cfg->stack_offset;
3891 offset += (16 * sizeof (gpointer)); /* register save area */
3893 offset += 4; /* struct/union return pointer */
3896 /* add parameter area size for called functions */
3897 if (cfg->param_area < (6 * sizeof (gpointer)))
3898 /* Reserve space for the first 6 arguments even if it is unused */
3899 offset += 6 * sizeof (gpointer);
3901 offset += cfg->param_area;
3903 /* align the stack size */
3904 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3907 * localloc'd memory is stored between the local variables (whose
3908 * size is given by cfg->stack_offset), and between the space reserved
3911 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3913 cfg->stack_offset = offset;
3915 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3916 /* Perform stack touching */
3920 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3921 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3922 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
3923 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
3926 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3929 if (strstr (cfg->method->name, "foo")) {
3930 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3931 sparc_call_simple (code, 0);
3936 sig = mono_method_signature (method);
3938 cinfo = get_call_info (cfg, sig, FALSE);
3940 /* Keep in sync with emit_load_volatile_arguments */
3941 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3942 ArgInfo *ainfo = cinfo->args + i;
3943 gint32 stack_offset;
3945 inst = cfg->args [i];
3947 if (sig->hasthis && (i == 0))
3948 arg_type = &mono_defaults.object_class->byval_arg;
3950 arg_type = sig->params [i - sig->hasthis];
3952 stack_offset = ainfo->offset + ARGS_OFFSET;
3954 /* Save the split arguments so they will reside entirely on the stack */
3955 if (ainfo->storage == ArgInSplitRegStack) {
3956 /* Save the register to the stack */
3957 g_assert (inst->opcode == OP_REGOFFSET);
3958 if (!sparc_is_imm13 (stack_offset))
3960 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3963 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3964 /* Save the argument to a dword aligned stack location */
3966 * stack_offset contains the offset of the argument on the stack.
3967 * inst->inst_offset contains the dword aligned offset where the value
3970 if (ainfo->storage == ArgInIRegPair) {
3971 if (!sparc_is_imm13 (inst->inst_offset + 4))
3973 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3974 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3977 if (ainfo->storage == ArgInSplitRegStack) {
3979 g_assert_not_reached ();
3981 if (stack_offset != inst->inst_offset) {
3982 /* stack_offset is not dword aligned, so we need to make a copy */
3983 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3984 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3985 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3989 if (ainfo->storage == ArgOnStackPair) {
3991 g_assert_not_reached ();
3993 if (stack_offset != inst->inst_offset) {
3994 /* stack_offset is not dword aligned, so we need to make a copy */
3995 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3996 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3997 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3998 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4002 g_assert_not_reached ();
4005 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4006 /* Argument in register, but need to be saved to stack */
4007 if (!sparc_is_imm13 (stack_offset))
4009 if ((stack_offset - ARGS_OFFSET) & 0x1)
4010 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4012 if ((stack_offset - ARGS_OFFSET) & 0x2)
4013 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4015 if ((stack_offset - ARGS_OFFSET) & 0x4)
4016 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4019 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4021 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4025 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4029 /* Argument in regpair, but need to be saved to stack */
4030 if (!sparc_is_imm13 (inst->inst_offset + 4))
4032 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4033 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4035 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4036 if (!sparc_is_imm13 (stack_offset))
4038 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4040 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4041 /* The offset is guaranteed to be aligned by the ABI rules */
4042 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4045 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4046 /* Need to move into the a double precision register */
4047 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4050 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4051 if (inst->opcode == OP_REGVAR)
4052 /* FIXME: Load the argument into memory */
4058 if (cfg->method->save_lmf) {
4059 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4062 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4063 sparc_set_template (code, sparc_o7);
4064 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4066 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4068 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4070 /* FIXME: add a relocation for this */
4071 sparc_set (code, cfg->method, sparc_o7);
4072 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4074 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4075 (gpointer)"mono_arch_get_lmf_addr");
4078 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4081 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4082 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4084 cfg->code_len = (guint8*)code - cfg->native_code;
4086 g_assert (cfg->code_len <= cfg->code_size);
4088 return (guint8*)code;
4092 mono_arch_emit_epilog (MonoCompile *cfg)
4094 MonoMethod *method = cfg->method;
4097 int max_epilog_size = 16 + 20 * 4;
4099 if (cfg->method->save_lmf)
4100 max_epilog_size += 128;
4102 if (mono_jit_trace_calls != NULL)
4103 max_epilog_size += 50;
4105 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4106 cfg->code_size *= 2;
4107 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4108 cfg->stat_code_reallocs++;
4111 code = (guint32*)(cfg->native_code + cfg->code_len);
4113 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4114 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4116 if (cfg->method->save_lmf) {
4117 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4119 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4123 * The V8 ABI requires that calls to functions which return a structure
4126 if (!v64 && mono_method_signature (cfg->method)->pinvoke && MONO_TYPE_ISSTRUCT(mono_method_signature (cfg->method)->ret))
4127 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4131 /* Only fold last instruction into the restore if the exit block has an in count of 1
4132 and the previous block hasn't been optimized away since it may have an in count > 1 */
4133 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4137 * FIXME: The last instruction might have a branch pointing into it like in
4138 * int_ceq sparc_i0 <-
4142 /* Try folding last instruction into the restore */
4143 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4144 /* or reg, imm, %i0 */
4145 int reg = sparc_inst_rs1 (code [-2]);
4146 int imm = (((gint32)(sparc_inst_imm13 (code [-2]))) << 19) >> 19;
4147 code [-2] = code [-1];
4149 sparc_restore_imm (code, reg, imm, sparc_o0);
4152 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4153 /* or reg, reg, %i0 */
4154 int reg1 = sparc_inst_rs1 (code [-2]);
4155 int reg2 = sparc_inst_rs2 (code [-2]);
4156 code [-2] = code [-1];
4158 sparc_restore (code, reg1, reg2, sparc_o0);
4161 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4163 cfg->code_len = (guint8*)code - cfg->native_code;
4165 g_assert (cfg->code_len < cfg->code_size);
4170 mono_arch_emit_exceptions (MonoCompile *cfg)
4172 MonoJumpInfo *patch_info;
4177 MonoClass *exc_classes [16];
4178 guint8 *exc_throw_start [16], *exc_throw_end [16];
4180 /* Compute needed space */
4181 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4182 if (patch_info->type == MONO_PATCH_INFO_EXC)
4187 * make sure we have enough space for exceptions
4190 code_size = exc_count * (20 * 4);
4192 code_size = exc_count * 24;
4195 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4196 cfg->code_size *= 2;
4197 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4198 cfg->stat_code_reallocs++;
4201 code = (guint32*)(cfg->native_code + cfg->code_len);
4203 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4204 switch (patch_info->type) {
4205 case MONO_PATCH_INFO_EXC: {
4206 MonoClass *exc_class;
4207 guint32 *buf, *buf2;
4208 guint32 throw_ip, type_idx;
4211 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4213 exc_class = mono_class_load_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4214 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4215 throw_ip = patch_info->ip.i;
4217 /* Find a throw sequence for the same exception class */
4218 for (i = 0; i < nthrows; ++i)
4219 if (exc_classes [i] == exc_class)
4223 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4224 if (!sparc_is_imm13 (throw_offset))
4225 sparc_set32 (code, throw_offset, sparc_o1);
4227 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4228 g_assert (sparc_is_imm22 (disp));
4229 sparc_branch (code, 0, sparc_ba, disp);
4230 if (sparc_is_imm13 (throw_offset))
4231 sparc_set32 (code, throw_offset, sparc_o1);
4234 patch_info->type = MONO_PATCH_INFO_NONE;
4237 /* Emit the template for setting o1 */
4239 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4240 /* Can use a short form */
4243 sparc_set_template (code, sparc_o1);
4247 exc_classes [nthrows] = exc_class;
4248 exc_throw_start [nthrows] = (guint8*)code;
4252 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4256 /* first arg = type token */
4257 /* Pass the type index to reduce the size of the sparc_set */
4258 if (!sparc_is_imm13 (type_idx))
4259 sparc_set32 (code, type_idx, sparc_o0);
4261 /* second arg = offset between the throw ip and the current ip */
4262 /* On sparc, the saved ip points to the call instruction */
4263 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4264 sparc_set32 (buf, disp, sparc_o1);
4269 exc_throw_end [nthrows] = (guint8*)code;
4273 patch_info->data.name = "mono_arch_throw_corlib_exception";
4274 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4275 patch_info->ip.i = (guint8*)code - cfg->native_code;
4279 if (sparc_is_imm13 (type_idx)) {
4280 /* Put it into the delay slot */
4283 sparc_set32 (code, type_idx, sparc_o0);
4284 g_assert (code - buf == 1);
4295 cfg->code_len = (guint8*)code - cfg->native_code;
4297 g_assert (cfg->code_len < cfg->code_size);
4301 gboolean lmf_addr_key_inited = FALSE;
4303 #ifdef MONO_SPARC_THR_TLS
4304 thread_key_t lmf_addr_key;
4306 pthread_key_t lmf_addr_key;
4310 mono_arch_get_lmf_addr (void)
4312 /* This is perf critical so we bypass the IO layer */
4313 /* The thr_... functions seem to be somewhat faster */
4314 #ifdef MONO_SPARC_THR_TLS
4316 thr_getspecific (lmf_addr_key, &res);
4319 return pthread_getspecific (lmf_addr_key);
4323 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4326 * There seems to be no way to determine stack boundaries under solaris,
4327 * so it's not possible to determine whenever a SIGSEGV is caused by stack
4330 #error "--with-sigaltstack=yes not supported on solaris"
4335 mono_arch_tls_init (void)
4337 MonoJitTlsData *jit_tls;
4339 if (!lmf_addr_key_inited) {
4342 lmf_addr_key_inited = TRUE;
4344 #ifdef MONO_SPARC_THR_TLS
4345 res = thr_keycreate (&lmf_addr_key, NULL);
4347 res = pthread_key_create (&lmf_addr_key, NULL);
4349 g_assert (res == 0);
4353 jit_tls = mono_get_jit_tls ();
4355 #ifdef MONO_SPARC_THR_TLS
4356 thr_setspecific (lmf_addr_key, &jit_tls->lmf);
4358 pthread_setspecific (lmf_addr_key, &jit_tls->lmf);
4363 mono_arch_finish_init (void)
4368 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4373 mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4375 MonoInst *ins = NULL;
4381 * mono_arch_get_argument_info:
4382 * @csig: a method signature
4383 * @param_count: the number of parameters to consider
4384 * @arg_info: an array to store the result infos
4386 * Gathers information on parameters such as size, alignment and
4387 * padding. arg_info should be large enought to hold param_count + 1 entries.
4389 * Returns the size of the activation frame.
4392 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4398 cinfo = get_call_info (NULL, csig, FALSE);
4400 if (csig->hasthis) {
4401 ainfo = &cinfo->args [0];
4402 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4405 for (k = 0; k < param_count; k++) {
4406 ainfo = &cinfo->args [k + csig->hasthis];
4408 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4409 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4418 mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
4420 /* FIXME: implement */
4421 g_assert_not_reached ();
4425 mono_arch_opcode_supported (int opcode)