2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/utils/mono-math.h>
29 #include "mini-sparc.h"
32 #include "cpu-sparc.h"
35 * Sparc V9 means two things:
36 * - the instruction set
39 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
40 * processors in use are 64 bit processors. The V9 ABI is only usable if the
41 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
42 * instructions without using the 64 bit ABI.
47 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
48 * code. Unused input registers are used for global register allocation.
49 * - %l0..%l7 is used for local register allocation
50 * - %o0..%o6 is used for outgoing arguments
51 * - %o7 and %g1 is used as scratch registers in opcodes
52 * - all floating point registers are used for local register allocation except %f0.
53 * Only double precision registers are used.
55 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
56 * used for local allocation.
61 * - doubles and longs must be stored in dword aligned locations
65 * The following things are not implemented or do not work:
66 * - some fp arithmetic corner cases
67 * The following tests in mono/mini are expected to fail:
68 * - test_0_simple_double_casts
69 * This test casts (guint64)-1 to double and then back to guint64 again.
70 * Under x86, it returns 0, while under sparc it returns -1.
72 * In addition to this, the runtime requires the trunc function, or its
73 * solaris counterpart, aintl, to do some double->int conversions. If this
74 * function is not available, it is emulated somewhat, but the results can be
80 * - optimize sparc_set according to the memory model
81 * - when non-AOT compiling, compute patch targets immediately so we don't
82 * have to emit the 6 byte template.
84 * - struct arguments/returns
89 * - sparc_call_simple can't be used in a lot of places since the displacement
90 * might not fit into an imm30.
91 * - g1 can't be used in a lot of places since it is used as a scratch reg in
93 * - sparc_f0 can't be used as a scratch register on V9
94 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
96 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
97 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
98 * be a double precision register which has no single precision part.
99 * - passing/returning structs is hard to implement, because:
100 * - the spec is very hard to understand
101 * - it requires knowledge about the fields of structure, needs to handle
102 * nested structures etc.
106 * Possible optimizations:
107 * - delay slot scheduling
108 * - allocate large constants to registers
109 * - use %o registers for local allocation
110 * - implement unwinding through native frames
111 * - add more mul/div/rem optimizations
115 #define MONO_SPARC_THR_TLS 1
119 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
120 * causing infinite loops in dominator computation. So glib-2.4 is required.
123 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
124 #error "glib 2.4 or later is required for 64 bit mode."
128 #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
130 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
132 #define SIGNAL_STACK_SIZE (64 * 1024)
134 #define STACK_BIAS MONO_SPARC_STACK_BIAS
138 /* %g1 is used by sparc_set */
139 #define GP_SCRATCH_REG sparc_g4
140 /* %f0 is used for parameter passing */
141 #define FP_SCRATCH_REG sparc_f30
142 #define ARGS_OFFSET (STACK_BIAS + 128)
146 #define FP_SCRATCH_REG sparc_f0
147 #define ARGS_OFFSET 68
148 #define GP_SCRATCH_REG sparc_g1
152 /* Whenever the CPU supports v9 instructions */
153 static gboolean sparcv9 = FALSE;
155 /* Whenever this is a 64bit executable */
157 static gboolean v64 = TRUE;
159 static gboolean v64 = FALSE;
162 static gpointer mono_arch_get_lmf_addr (void);
165 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
168 mono_arch_regname (int reg) {
169 static const char * rnames[] = {
170 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
171 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
172 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
173 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
174 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
175 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
176 "sparc_fp", "sparc_retadr"
178 if (reg >= 0 && reg < 32)
184 * Initialize the cpu to execute managed code.
187 mono_arch_cpu_init (void)
190 /* make sure sparcv9 is initialized for embedded use */
191 mono_arch_cpu_optimizazions(&dummy);
195 * This function returns the optimizations supported on this cpu.
198 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
206 if (!sysinfo (SI_ISALIST, buf, 1024))
207 g_assert_not_reached ();
209 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
210 * (in)directly implies that we're a v9 or better.
211 * Improvements to this are greatly accepted...
212 * Also, we don't differentiate between v7 and v8. I sense SIGILL
213 * sniffing in my future.
215 if (getpagesize() == 8192)
216 strcpy (buf, "sparcv9");
218 strcpy (buf, "sparcv8");
222 * On some processors, the cmov instructions are even slower than the
225 if (strstr (buf, "sparcv9")) {
226 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
230 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
236 mono_sparc_break (void)
241 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
242 #else /* assume Sun's compiler */
243 static void flushi(void *addr)
250 void sync_instruction_memory(caddr_t addr, int len);
254 mono_arch_flush_icache (guint8 *code, gint size)
257 /* Hopefully this is optimized based on the actual CPU */
258 sync_instruction_memory (code, size);
260 guint64 *p = (guint64*)code;
261 guint64 *end = (guint64*)(code + ((size + 8) /8));
264 * FIXME: Flushing code in dword chunks in _slow_.
268 __asm__ __volatile__ ("iflush %0"::"r"(p++));
278 * Flush all register windows to memory. Every register window is saved to
279 * a 16 word area on the stack pointed to by its %sp register.
282 mono_sparc_flushw (void)
284 static guint32 start [64];
285 static int inited = 0;
287 static void (*flushw) (void);
292 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
295 sparc_restore_simple (code);
297 g_assert ((code - start) < 64);
299 flushw = (gpointer)start;
308 mono_arch_flush_register_windows (void)
310 mono_sparc_flushw ();
314 mono_arch_is_inst_imm (gint64 imm)
316 return sparc_is_imm13 (imm);
320 mono_sparc_is_v9 (void) {
325 mono_sparc_is_sparc64 (void) {
337 ArgInFloatReg, /* V9 only */
338 ArgInDoubleReg /* V9 only */
343 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
346 guint32 vt_offset; /* for valuetypes */
364 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
366 ainfo->offset = *stack_size;
369 if (*gr >= PARAM_REGS) {
370 ainfo->storage = ArgOnStack;
373 ainfo->storage = ArgInIReg;
378 /* Allways reserve stack space for parameters passed in registers */
379 (*stack_size) += sizeof (gpointer);
382 if (*gr < PARAM_REGS - 1) {
383 /* A pair of registers */
384 ainfo->storage = ArgInIRegPair;
388 else if (*gr >= PARAM_REGS) {
389 /* A pair of stack locations */
390 ainfo->storage = ArgOnStackPair;
393 ainfo->storage = ArgInSplitRegStack;
398 (*stack_size) += 2 * sizeof (gpointer);
404 #define FLOAT_PARAM_REGS 32
407 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
409 ainfo->offset = *stack_size;
412 if (*gr >= FLOAT_PARAM_REGS) {
413 ainfo->storage = ArgOnStack;
416 /* A single is passed in an even numbered fp register */
417 ainfo->storage = ArgInFloatReg;
418 ainfo->reg = *gr + 1;
423 if (*gr < FLOAT_PARAM_REGS) {
424 /* A double register */
425 ainfo->storage = ArgInDoubleReg;
430 ainfo->storage = ArgOnStack;
434 (*stack_size) += sizeof (gpointer);
442 * Obtain information about a call according to the calling convention.
443 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
444 * document for more information.
445 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
446 * the 'Sparc Compliance Definition 2.4' document.
449 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
452 int n = sig->hasthis + sig->param_count;
453 guint32 stack_size = 0;
456 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
462 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
463 /* The address of the return value is passed in %o0 */
464 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
465 cinfo->ret.reg += sparc_i0;
471 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
473 for (i = 0; i < sig->param_count; ++i) {
474 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
476 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
477 /* Emit the signature cookie just before the implicit arguments */
478 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
479 /* Prevent implicit arguments from being passed in registers */
483 DEBUG(printf("param %d: ", i));
484 if (sig->params [i]->byref) {
485 DEBUG(printf("byref\n"));
487 add_general (&gr, &stack_size, ainfo, FALSE);
490 switch (mono_type_get_underlying_type (sig->params [i])->type) {
491 case MONO_TYPE_BOOLEAN:
494 add_general (&gr, &stack_size, ainfo, FALSE);
495 /* the value is in the ls byte */
496 ainfo->offset += sizeof (gpointer) - 1;
501 add_general (&gr, &stack_size, ainfo, FALSE);
502 /* the value is in the ls word */
503 ainfo->offset += sizeof (gpointer) - 2;
507 add_general (&gr, &stack_size, ainfo, FALSE);
508 /* the value is in the ls dword */
509 ainfo->offset += sizeof (gpointer) - 4;
514 case MONO_TYPE_CLASS:
515 case MONO_TYPE_OBJECT:
516 case MONO_TYPE_STRING:
517 case MONO_TYPE_SZARRAY:
518 case MONO_TYPE_ARRAY:
519 add_general (&gr, &stack_size, ainfo, FALSE);
521 case MONO_TYPE_VALUETYPE:
526 add_general (&gr, &stack_size, ainfo, FALSE);
528 case MONO_TYPE_TYPEDBYREF:
529 add_general (&gr, &stack_size, ainfo, FALSE);
534 add_general (&gr, &stack_size, ainfo, FALSE);
536 add_general (&gr, &stack_size, ainfo, TRUE);
541 add_float (&fr, &stack_size, ainfo, TRUE);
544 /* single precision values are passed in integer registers */
545 add_general (&gr, &stack_size, ainfo, FALSE);
550 add_float (&fr, &stack_size, ainfo, FALSE);
553 /* double precision values are passed in a pair of registers */
554 add_general (&gr, &stack_size, ainfo, TRUE);
558 g_assert_not_reached ();
564 switch (mono_type_get_underlying_type (sig->ret)->type) {
565 case MONO_TYPE_BOOLEAN:
576 case MONO_TYPE_CLASS:
577 case MONO_TYPE_OBJECT:
578 case MONO_TYPE_SZARRAY:
579 case MONO_TYPE_ARRAY:
580 case MONO_TYPE_STRING:
581 cinfo->ret.storage = ArgInIReg;
582 cinfo->ret.reg = sparc_i0;
589 cinfo->ret.storage = ArgInIReg;
590 cinfo->ret.reg = sparc_i0;
594 cinfo->ret.storage = ArgInIRegPair;
595 cinfo->ret.reg = sparc_i0;
602 cinfo->ret.storage = ArgInFReg;
603 cinfo->ret.reg = sparc_f0;
605 case MONO_TYPE_VALUETYPE:
614 cinfo->ret.storage = ArgOnStack;
616 case MONO_TYPE_TYPEDBYREF:
619 /* Same as a valuetype with size 24 */
626 cinfo->ret.storage = ArgOnStack;
631 g_error ("Can't handle as return value 0x%x", sig->ret->type);
635 cinfo->stack_usage = stack_size;
636 cinfo->reg_usage = gr;
641 is_regsize_var (MonoType *t) {
644 switch (mono_type_get_underlying_type (t)->type) {
645 case MONO_TYPE_BOOLEAN:
656 case MONO_TYPE_OBJECT:
657 case MONO_TYPE_STRING:
658 case MONO_TYPE_CLASS:
659 case MONO_TYPE_SZARRAY:
660 case MONO_TYPE_ARRAY:
662 case MONO_TYPE_VALUETYPE:
674 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
680 * FIXME: If an argument is allocated to a register, then load it from the
681 * stack in the prolog.
684 for (i = 0; i < cfg->num_varinfo; i++) {
685 MonoInst *ins = cfg->varinfo [i];
686 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
689 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
692 /* FIXME: Make arguments on stack allocateable to registers */
693 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
696 if (is_regsize_var (ins->inst_vtype)) {
697 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
698 g_assert (i == vmv->idx);
700 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
708 mono_arch_get_global_int_regs (MonoCompile *cfg)
712 MonoMethodSignature *sig;
715 sig = cfg->method->signature;
717 cinfo = get_call_info (sig, FALSE);
719 /* Use unused input registers */
720 for (i = cinfo->reg_usage; i < 6; ++i)
721 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
723 /* Use %l0..%l3 as global registers */
724 for (i = sparc_l0; i < sparc_l4; ++i)
725 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
733 * mono_arch_regalloc_cost:
735 * Return the cost, in number of memory references, of the action of
736 * allocating the variable VMV into a register during global register
740 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
746 * Set var information according to the calling convention. sparc version.
747 * The locals var stuff should most likely be split in another method.
750 mono_arch_allocate_vars (MonoCompile *m)
752 MonoMethodSignature *sig;
753 MonoMethodHeader *header;
755 int i, offset, size, align, curinst;
758 header = ((MonoMethodNormal *)m->method)->header;
760 sig = m->method->signature;
762 cinfo = get_call_info (sig, FALSE);
764 if (sig->ret->type != MONO_TYPE_VOID) {
765 switch (cinfo->ret.storage) {
769 m->ret->opcode = OP_REGVAR;
770 m->ret->inst_c0 = cinfo->ret.reg;
774 g_assert_not_reached ();
777 m->ret->opcode = OP_REGOFFSET;
778 m->ret->inst_basereg = sparc_fp;
779 m->ret->inst_offset = 64;
785 m->ret->dreg = m->ret->inst_c0;
789 * We use the ABI calling conventions for managed code as well.
790 * Exception: valuetypes are never returned in registers on V9.
791 * FIXME: Use something more optimized.
794 /* Locals are allocated backwards from %fp */
795 m->frame_reg = sparc_fp;
799 * Reserve a stack slot for holding information used during exception
802 if (header->num_clauses)
803 offset += sizeof (gpointer) * 2;
805 if (m->method->save_lmf) {
806 offset += sizeof (MonoLMF);
807 m->arch.lmf_offset = offset;
810 curinst = m->locals_start;
811 for (i = curinst; i < m->num_varinfo; ++i) {
812 inst = m->varinfo [i];
814 if (inst->opcode == OP_REGVAR) {
815 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
819 /* inst->unused indicates native sized value types, this is used by the
820 * pinvoke wrappers when they call functions returning structure */
821 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
822 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
824 size = mono_type_stack_size (inst->inst_vtype, &align);
827 * This is needed since structures containing doubles must be doubleword
829 * FIXME: Do this only if needed.
831 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
835 * variables are accessed as negative offsets from %fp, so increase
836 * the offset before assigning it to a variable
841 offset &= ~(align - 1);
842 inst->opcode = OP_REGOFFSET;
843 inst->inst_basereg = sparc_fp;
844 inst->inst_offset = STACK_BIAS + -offset;
846 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
849 if (sig->call_convention == MONO_CALL_VARARG) {
850 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
853 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
854 inst = m->varinfo [i];
855 if (inst->opcode != OP_REGVAR) {
856 ArgInfo *ainfo = &cinfo->args [i];
857 gboolean inreg = TRUE;
861 if (sig->hasthis && (i == 0))
862 arg_type = &mono_defaults.object_class->byval_arg;
864 arg_type = sig->params [i - sig->hasthis];
867 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
868 || (arg_type->type == MONO_TYPE_R8)))
870 * Since float arguments are passed in integer registers, we need to
871 * save them to the stack in the prolog.
876 /* FIXME: Allocate volatile arguments to registers */
877 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
880 if (MONO_TYPE_ISSTRUCT (arg_type))
881 /* FIXME: this isn't needed */
884 inst->opcode = OP_REGOFFSET;
887 storage = ArgOnStack;
889 storage = ainfo->storage;
894 inst->opcode = OP_REGVAR;
895 inst->dreg = sparc_i0 + ainfo->reg;
900 * Since float regs are volatile, we save the arguments to
901 * the stack in the prolog.
902 * FIXME: Avoid this if the method contains no calls.
906 case ArgInSplitRegStack:
907 /* Split arguments are saved to the stack in the prolog */
908 inst->opcode = OP_REGOFFSET;
909 /* in parent frame */
910 inst->inst_basereg = sparc_fp;
911 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
913 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
915 * It is very hard to load doubles from non-doubleword aligned
916 * memory locations. So if the offset is misaligned, we copy the
917 * argument to a stack location in the prolog.
919 if ((inst->inst_offset - STACK_BIAS) % 8) {
920 inst->inst_basereg = sparc_fp;
924 offset &= ~(align - 1);
925 inst->inst_offset = STACK_BIAS + -offset;
934 if (MONO_TYPE_ISSTRUCT (arg_type)) {
935 /* Add a level of indirection */
937 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
938 * are destructively modified in a lot of places in inssel.brg.
941 MONO_INST_NEW (m, indir, 0);
943 inst->opcode = OP_SPARC_INARG_VT;
944 inst->inst_left = indir;
950 * spillvars are stored between the normal locals and the storage reserved
954 m->stack_offset = offset;
956 /* Add a properly aligned dword for use by int<->float conversion opcodes */
958 mono_spillvar_offset_float (m, 0);
964 * take the arguments and generate the arch-specific
965 * instructions to properly call the function in call.
966 * This includes pushing, moving arguments to the right register
970 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
972 MonoMethodSignature *sig;
976 guint32 extra_space = 0;
978 sig = call->signature;
979 n = sig->param_count + sig->hasthis;
981 cinfo = get_call_info (sig, sig->pinvoke);
983 for (i = 0; i < n; ++i) {
984 ainfo = cinfo->args + i;
985 if (is_virtual && i == 0) {
986 /* the argument will be attached to the call instruction */
989 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
990 /* FIXME: Test varargs with 0 implicit args */
991 /* FIXME: Test interaction with hasthis */
992 /* Emit the signature cookie just before the first implicit argument */
994 /* FIXME: Add support for signature tokens to AOT */
995 cfg->disable_aot = TRUE;
996 /* We allways pass the signature on the stack for simplicity */
997 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
998 arg->inst_basereg = sparc_sp;
999 arg->inst_imm = ARGS_OFFSET + cinfo->sig_cookie.offset;
1000 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1001 sig_arg->inst_p0 = call->signature;
1002 arg->inst_left = sig_arg;
1003 arg->type = STACK_PTR;
1004 /* prepend, so they get reversed */
1005 arg->next = call->out_args;
1006 call->out_args = arg;
1009 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1010 in = call->args [i];
1011 arg->cil_code = in->cil_code;
1012 arg->inst_left = in;
1013 arg->type = in->type;
1014 /* prepend, we'll need to reverse them later */
1015 arg->next = call->out_args;
1016 call->out_args = arg;
1018 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1021 guint32 offset, pad;
1029 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1030 size = sizeof (MonoTypedRef);
1031 align = sizeof (gpointer);
1035 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1037 size = mono_type_stack_size (&in->klass->byval_arg, &align);
1040 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1041 * use the normal OUTARG opcodes to pass the address of the location to
1044 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1045 inst->inst_left = in;
1047 /* The first 6 argument locations are reserved */
1048 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1049 cinfo->stack_usage = 6 * sizeof (gpointer);
1051 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1052 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1054 inst->inst_c1 = STACK_BIAS + offset;
1055 inst->unused = size;
1056 arg->inst_left = inst;
1058 cinfo->stack_usage += size;
1059 cinfo->stack_usage += pad;
1062 switch (ainfo->storage) {
1066 if (ainfo->storage == ArgInIRegPair)
1067 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1068 arg->unused = sparc_o0 + ainfo->reg;
1069 call->used_iregs |= 1 << ainfo->reg;
1071 if ((i >= sig->hasthis) && (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)) {
1073 * The OUTARG (freg) implementation needs an extra dword to store
1074 * the temporary value.
1080 arg->opcode = OP_SPARC_OUTARG_MEM;
1082 case ArgOnStackPair:
1083 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1085 case ArgInSplitRegStack:
1086 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1087 arg->unused = sparc_o0 + ainfo->reg;
1088 call->used_iregs |= 1 << ainfo->reg;
1091 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1092 arg->unused = sparc_f0 + ainfo->reg;
1094 case ArgInDoubleReg:
1095 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1096 arg->unused = sparc_f0 + ainfo->reg;
1102 arg->inst_basereg = sparc_sp;
1103 arg->inst_imm = ARGS_OFFSET + ainfo->offset;
1108 * Reverse the call->out_args list.
1111 MonoInst *prev = NULL, *list = call->out_args, *next;
1118 call->out_args = prev;
1120 call->stack_usage = cinfo->stack_usage + extra_space;
1121 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1122 cfg->flags |= MONO_CFG_HAS_CALLS;
1128 /* Map opcode to the sparc condition codes */
1129 static inline SparcCond
1130 opcode_to_sparc_cond (int opcode)
1152 case OP_COND_EXC_EQ:
1155 case OP_COND_EXC_NE_UN:
1162 case OP_COND_EXC_LT:
1168 case OP_COND_EXC_LT_UN:
1174 case OP_COND_EXC_GT:
1180 case OP_COND_EXC_GT_UN:
1184 case OP_COND_EXC_GE:
1188 case OP_COND_EXC_GE_UN:
1192 case OP_COND_EXC_LE:
1196 case OP_COND_EXC_LE_UN:
1198 case OP_COND_EXC_OV:
1199 case OP_COND_EXC_IOV:
1202 case OP_COND_EXC_IC:
1204 case OP_COND_EXC_NO:
1205 case OP_COND_EXC_NC:
1208 g_assert_not_reached ();
1213 #define COMPUTE_DISP(ins) \
1214 if (ins->flags & MONO_INST_BRLABEL) { \
1215 if (ins->inst_i0->inst_c0) \
1216 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1219 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1222 if (ins->inst_true_bb->native_offset) \
1223 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1226 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1231 #define DEFAULT_ICC sparc_xcc_short
1233 #define DEFAULT_ICC sparc_icc_short
1237 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1241 COMPUTE_DISP(ins); \
1242 predict = (disp != 0) ? 1 : 0; \
1243 g_assert (sparc_is_imm19 (disp)); \
1244 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1245 if (filldelay) sparc_nop (code); \
1247 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1248 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1252 COMPUTE_DISP(ins); \
1253 predict = (disp != 0) ? 1 : 0; \
1254 g_assert (sparc_is_imm19 (disp)); \
1255 sparc_fbranch (code, (annul), cond, disp); \
1256 if (filldelay) sparc_nop (code); \
1259 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1260 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1263 COMPUTE_DISP(ins); \
1264 g_assert (sparc_is_imm22 (disp)); \
1265 sparc_ ## bop (code, (annul), cond, disp); \
1266 if (filldelay) sparc_nop (code); \
1268 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1269 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1272 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1276 COMPUTE_DISP(ins); \
1277 predict = (disp != 0) ? 1 : 0; \
1278 g_assert (sparc_is_imm19 (disp)); \
1279 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1280 if (filldelay) sparc_nop (code); \
1283 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1286 COMPUTE_DISP(ins); \
1287 g_assert (sparc_is_imm22 (disp)); \
1288 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1289 if (filldelay) sparc_nop (code); \
1292 /* emit an exception if condition is fail */
1294 * We put the exception throwing code out-of-line, at the end of the method
1296 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1297 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1298 MONO_PATCH_INFO_EXC, sexc_name); \
1300 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1303 sparc_branch (code, 0, cond, 0); \
1305 if (filldelay) sparc_nop (code); \
1308 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1310 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1311 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1312 MONO_PATCH_INFO_EXC, sexc_name); \
1313 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1317 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1318 if (sparc_is_imm13 ((ins)->inst_imm)) \
1319 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1321 sparc_set (code, ins->inst_imm, sparc_o7); \
1322 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1326 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1327 if (sparc_is_imm13 (ins->inst_offset)) \
1328 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1330 sparc_set (code, ins->inst_offset, sparc_o7); \
1331 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1336 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1338 if (ins->inst_imm == 0) \
1341 sparc_set (code, ins->inst_imm, sparc_o7); \
1344 if (!sparc_is_imm13 (ins->inst_offset)) { \
1345 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1346 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1349 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1352 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1353 if (!sparc_is_imm13 (ins->inst_offset)) { \
1354 sparc_set (code, ins->inst_offset, sparc_o7); \
1355 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1358 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1361 #define EMIT_CALL() do { \
1363 sparc_set_template (code, sparc_o7); \
1364 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1367 sparc_call_simple (code, 0); \
1372 extern gboolean mono_compile_aot;
1375 * A call template is 7 instructions long, so we want to avoid it if possible.
1378 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1382 /* FIXME: This only works if the target method is already compiled */
1383 if (0 && v64 && !mono_compile_aot) {
1384 MonoJumpInfo patch_info;
1386 patch_info.type = patch_type;
1387 patch_info.data.target = data;
1389 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1391 /* FIXME: Add optimizations if the target is close enough */
1392 sparc_set (code, target, sparc_o7);
1393 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1397 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1405 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1407 MonoInst *ins, *last_ins = NULL;
1412 switch (ins->opcode) {
1414 /* remove unnecessary multiplication with 1 */
1415 if (ins->inst_imm == 1) {
1416 if (ins->dreg != ins->sreg1) {
1417 ins->opcode = OP_MOVE;
1419 last_ins->next = ins->next;
1426 case OP_LOAD_MEMBASE:
1427 case OP_LOADI4_MEMBASE:
1429 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1430 * OP_LOAD_MEMBASE offset(basereg), reg
1432 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1433 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1434 ins->inst_basereg == last_ins->inst_destbasereg &&
1435 ins->inst_offset == last_ins->inst_offset) {
1436 if (ins->dreg == last_ins->sreg1) {
1437 last_ins->next = ins->next;
1441 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1442 ins->opcode = OP_MOVE;
1443 ins->sreg1 = last_ins->sreg1;
1447 * Note: reg1 must be different from the basereg in the second load
1448 * OP_LOAD_MEMBASE offset(basereg), reg1
1449 * OP_LOAD_MEMBASE offset(basereg), reg2
1451 * OP_LOAD_MEMBASE offset(basereg), reg1
1452 * OP_MOVE reg1, reg2
1454 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1455 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1456 ins->inst_basereg != last_ins->dreg &&
1457 ins->inst_basereg == last_ins->inst_basereg &&
1458 ins->inst_offset == last_ins->inst_offset) {
1460 if (ins->dreg == last_ins->dreg) {
1461 last_ins->next = ins->next;
1465 ins->opcode = OP_MOVE;
1466 ins->sreg1 = last_ins->dreg;
1469 //g_assert_not_reached ();
1473 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1474 * OP_LOAD_MEMBASE offset(basereg), reg
1476 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1477 * OP_ICONST reg, imm
1479 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1480 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1481 ins->inst_basereg == last_ins->inst_destbasereg &&
1482 ins->inst_offset == last_ins->inst_offset) {
1483 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1484 ins->opcode = OP_ICONST;
1485 ins->inst_c0 = last_ins->inst_imm;
1486 g_assert_not_reached (); // check this rule
1491 case OP_LOADU1_MEMBASE:
1492 case OP_LOADI1_MEMBASE:
1493 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1494 ins->inst_basereg == last_ins->inst_destbasereg &&
1495 ins->inst_offset == last_ins->inst_offset) {
1496 if (ins->dreg == last_ins->sreg1) {
1497 last_ins->next = ins->next;
1501 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1502 ins->opcode = OP_MOVE;
1503 ins->sreg1 = last_ins->sreg1;
1507 case OP_LOADU2_MEMBASE:
1508 case OP_LOADI2_MEMBASE:
1509 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1510 ins->inst_basereg == last_ins->inst_destbasereg &&
1511 ins->inst_offset == last_ins->inst_offset) {
1512 if (ins->dreg == last_ins->sreg1) {
1513 last_ins->next = ins->next;
1517 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1518 ins->opcode = OP_MOVE;
1519 ins->sreg1 = last_ins->sreg1;
1523 case OP_STOREI4_MEMBASE_IMM:
1524 /* Convert pairs of 0 stores to a dword 0 store */
1525 /* Used when initializing temporaries */
1526 /* We know sparc_fp is dword aligned */
1527 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1528 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1529 (ins->inst_destbasereg == sparc_fp) &&
1530 (ins->inst_offset < 0) &&
1531 ((ins->inst_offset % 8) == 0) &&
1532 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1533 (ins->inst_imm == 0) &&
1534 (last_ins->inst_imm == 0)) {
1536 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1537 last_ins->inst_offset = ins->inst_offset;
1538 last_ins->next = ins->next;
1550 case OP_COND_EXC_EQ:
1551 case OP_COND_EXC_GE:
1552 case OP_COND_EXC_GT:
1553 case OP_COND_EXC_LE:
1554 case OP_COND_EXC_LT:
1555 case OP_COND_EXC_NE_UN:
1557 * Convert compare with zero+branch to BRcc
1560 * This only works in 64 bit mode, since it examines all 64
1561 * bits of the register.
1562 * Only do this if the method is small since BPr only has a 16bit
1565 if (v64 && (((MonoMethodNormal*)cfg->method)->header->code_size < 10000) && last_ins &&
1566 (last_ins->opcode == OP_COMPARE_IMM) &&
1567 (last_ins->inst_imm == 0)) {
1568 MonoInst *next = ins->next;
1569 switch (ins->opcode) {
1571 ins->opcode = OP_SPARC_BRZ;
1574 ins->opcode = OP_SPARC_BRNZ;
1577 ins->opcode = OP_SPARC_BRLZ;
1580 ins->opcode = OP_SPARC_BRGZ;
1583 ins->opcode = OP_SPARC_BRGEZ;
1586 ins->opcode = OP_SPARC_BRLEZ;
1588 case OP_COND_EXC_EQ:
1589 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1591 case OP_COND_EXC_GE:
1592 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1594 case OP_COND_EXC_GT:
1595 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1597 case OP_COND_EXC_LE:
1598 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1600 case OP_COND_EXC_LT:
1601 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1603 case OP_COND_EXC_NE_UN:
1604 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1607 g_assert_not_reached ();
1609 ins->sreg1 = last_ins->sreg1;
1611 last_ins->next = next;
1622 if (ins->dreg == ins->sreg1) {
1624 last_ins->next = ins->next;
1629 * OP_MOVE sreg, dreg
1630 * OP_MOVE dreg, sreg
1632 if (last_ins && last_ins->opcode == OP_MOVE &&
1633 ins->sreg1 == last_ins->dreg &&
1634 ins->dreg == last_ins->sreg1) {
1635 last_ins->next = ins->next;
1644 bb->last_ins = last_ins;
1647 /* Parameters used by the register allocator */
1649 /* Use %l4..%l7 as local registers */
1650 #define ARCH_CALLER_REGS (0xf0<<16)
1653 /* Use %d34..%d62 as the double precision floating point local registers */
1654 /* %d32 has the same encoding as %f1, so %d36%d38 == 0b1010 == 0xa */
1655 #define ARCH_CALLER_FREGS (0xaaaaaaa8)
1657 /* Use %f2..%f30 as the double precision floating point local registers */
1658 #define ARCH_CALLER_FREGS (0x55555554)
1662 #define DEBUG(a) if (cfg->verbose_level > 1) a
1664 #define reg_is_freeable(r) ((1 << (r)) & ARCH_CALLER_REGS)
1665 #define freg_is_freeable(r) (((1) << (r)) & ARCH_CALLER_FREGS)
1674 static const char*const * ins_spec = sparc_desc;
1676 static inline const char*
1677 get_ins_spec (int opcode)
1679 if (ins_spec [opcode])
1680 return ins_spec [opcode];
1682 return ins_spec [CEE_ADD];
1686 print_ins (int i, MonoInst *ins)
1688 const char *spec = get_ins_spec (ins->opcode);
1689 g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1690 if (spec [MONO_INST_DEST]) {
1691 if (ins->dreg >= MONO_MAX_IREGS)
1692 g_print (" R%d <-", ins->dreg);
1694 if (spec [MONO_INST_DEST] == 'b')
1695 g_print (" [%s + 0x%lx] <-", mono_arch_regname (ins->dreg), (long)ins->inst_offset);
1697 g_print (" %s <-", mono_arch_regname (ins->dreg));
1699 if (spec [MONO_INST_SRC1]) {
1700 if (ins->sreg1 >= MONO_MAX_IREGS)
1701 g_print (" R%d", ins->sreg1);
1703 if (spec [MONO_INST_SRC1] == 'b')
1704 g_print (" [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset);
1706 g_print (" %s", mono_arch_regname (ins->sreg1));
1708 if (spec [MONO_INST_SRC2]) {
1709 if (ins->sreg2 >= MONO_MAX_IREGS)
1710 g_print (" R%d", ins->sreg2);
1712 g_print (" %s", mono_arch_regname (ins->sreg2));
1714 if (spec [MONO_INST_CLOB])
1715 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1720 print_regtrack (RegTrack *t, int num)
1726 for (i = 0; i < num; ++i) {
1729 if (i >= MONO_MAX_IREGS) {
1730 g_snprintf (buf, sizeof(buf), "R%d", i);
1733 r = mono_arch_regname (i);
1734 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1738 typedef struct InstList InstList;
1746 static inline InstList*
1747 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1749 InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1758 #define STACK_OFFSETS_POSITIVE
1761 * returns the offset used by spillvar. It allocates a new
1762 * spill variable if necessary.
1765 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1767 MonoSpillInfo **si, *info;
1770 si = &cfg->spill_info;
1772 while (i <= spillvar) {
1775 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1777 cfg->stack_offset += sizeof (gpointer);
1778 info->offset = - cfg->stack_offset;
1782 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1788 g_assert_not_reached ();
1793 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1795 MonoSpillInfo **si, *info;
1798 si = &cfg->spill_info_float;
1800 while (i <= spillvar) {
1803 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1805 cfg->stack_offset += sizeof (double);
1806 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1807 info->offset = - cfg->stack_offset;
1811 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1817 g_assert_not_reached ();
1822 * Force the spilling of the variable in the symbolic register 'reg'.
1824 G_GNUC_UNUSED static int
1825 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
1830 sel = cfg->rs->iassign [reg];
1831 /*i = cfg->rs->isymbolic [sel];
1832 g_assert (i == reg);*/
1834 spill = ++cfg->spill_count;
1835 cfg->rs->iassign [i] = -spill - 1;
1836 mono_regstate_free_int (cfg->rs, sel);
1837 /* we need to create a spill var and insert a load to sel after the current instruction */
1838 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1840 load->inst_basereg = cfg->frame_reg;
1841 load->inst_offset = mono_spillvar_offset (cfg, spill);
1843 while (ins->next != item->prev->data)
1846 load->next = ins->next;
1848 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1849 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1850 g_assert (i == sel);
1856 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1861 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1862 /* exclude the registers in the current instruction */
1863 if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
1864 if (ins->sreg1 >= MONO_MAX_IREGS)
1865 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
1867 regmask &= ~ (1 << ins->sreg1);
1868 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1870 if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
1871 if (ins->sreg2 >= MONO_MAX_IREGS)
1872 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
1874 regmask &= ~ (1 << ins->sreg2);
1875 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1877 if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
1878 regmask &= ~ (1 << ins->dreg);
1879 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1882 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1883 g_assert (regmask); /* need at least a register we can free */
1885 /* we should track prev_use and spill the register that's farther */
1886 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1887 if (regmask & (1 << i)) {
1889 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1893 i = cfg->rs->isymbolic [sel];
1894 spill = ++cfg->spill_count;
1895 cfg->rs->iassign [i] = -spill - 1;
1896 mono_regstate_free_int (cfg->rs, sel);
1897 /* we need to create a spill var and insert a load to sel after the current instruction */
1898 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1900 load->inst_basereg = cfg->frame_reg;
1901 load->inst_offset = mono_spillvar_offset (cfg, spill);
1903 while (ins->next != item->prev->data)
1906 load->next = ins->next;
1908 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1909 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1910 g_assert (i == sel);
1916 get_float_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1921 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1922 /* exclude the registers in the current instruction */
1923 if (reg != ins->sreg1 && (freg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg1] >= 0))) {
1924 if (ins->sreg1 >= MONO_MAX_FREGS)
1925 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg1]);
1927 regmask &= ~ (1 << ins->sreg1);
1928 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1930 if (reg != ins->sreg2 && (freg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg2] >= 0))) {
1931 if (ins->sreg2 >= MONO_MAX_FREGS)
1932 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg2]);
1934 regmask &= ~ (1 << ins->sreg2);
1935 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1937 if (reg != ins->dreg && freg_is_freeable (ins->dreg)) {
1938 regmask &= ~ (1 << ins->dreg);
1939 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1942 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1943 g_assert (regmask); /* need at least a register we can free */
1945 /* we should track prev_use and spill the register that's farther */
1946 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1947 if (regmask & (1 << i)) {
1949 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->fassign [sel]));
1953 i = cfg->rs->fsymbolic [sel];
1954 spill = ++cfg->spill_count;
1955 cfg->rs->fassign [i] = -spill - 1;
1956 mono_regstate_free_float(cfg->rs, sel);
1957 /* we need to create a spill var and insert a load to sel after the current instruction */
1958 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1960 load->inst_basereg = cfg->frame_reg;
1961 load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1963 while (ins->next != item->prev->data)
1966 load->next = ins->next;
1968 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1969 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1970 g_assert (i == sel);
1976 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1979 MONO_INST_NEW (cfg, copy, OP_MOVE);
1983 copy->next = ins->next;
1986 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1990 G_GNUC_UNUSED static MonoInst*
1991 create_copy_ins_float (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1994 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1998 copy->next = ins->next;
2001 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
2006 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2009 MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
2011 store->inst_destbasereg = cfg->frame_reg;
2012 store->inst_offset = mono_spillvar_offset (cfg, spill);
2014 store->next = ins->next;
2017 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2022 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2025 MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
2027 store->inst_destbasereg = cfg->frame_reg;
2028 store->inst_offset = mono_spillvar_offset_float (cfg, spill);
2030 store->next = ins->next;
2033 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2038 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
2041 g_assert (item->next);
2042 prev = item->next->data;
2044 while (prev->next != ins)
2046 to_insert->next = ins;
2047 prev->next = to_insert;
2049 * needed otherwise in the next instruction we can add an ins to the
2050 * end and that would get past this instruction.
2052 item->data = to_insert;
2055 G_GNUC_UNUSED static int
2056 alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
2058 int val = cfg->rs->iassign [sym_reg];
2062 /* the register gets spilled after this inst */
2065 val = mono_regstate_alloc_int (cfg->rs, allow_mask);
2067 val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
2068 cfg->rs->iassign [sym_reg] = val;
2069 /* add option to store before the instruction for src registers */
2071 create_spilled_store (cfg, spill, val, sym_reg, ins);
2073 cfg->rs->isymbolic [val] = sym_reg;
2077 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
2080 * Local register allocation.
2081 * We first scan the list of instructions and we save the liveness info of
2082 * each register (when the register is first used, when it's value is set etc.).
2083 * We also reverse the list of instructions (in the InstList list) because assigning
2084 * registers backwards allows for more tricks to be used.
2087 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2090 MonoRegState *rs = cfg->rs;
2092 RegTrack *reginfo, *reginfof;
2093 RegTrack *reginfo1, *reginfo2, *reginfod;
2094 InstList *tmp, *reversed = NULL;
2096 guint32 src1_mask, src2_mask, dest_mask;
2097 guint32 cur_iregs, cur_fregs;
2099 /* FIXME: Use caller saved regs and %i1-%2 for allocation */
2103 rs->next_vireg = bb->max_ireg;
2104 rs->next_vfreg = bb->max_freg;
2105 mono_regstate_assign (rs);
2106 reginfo = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vireg);
2107 reginfof = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vfreg);
2108 rs->ifree_mask = ARCH_CALLER_REGS;
2109 rs->ffree_mask = ARCH_CALLER_FREGS;
2113 DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2114 /* forward pass on the instructions to collect register liveness info */
2116 spec = ins_spec [ins->opcode];
2119 spec = ins_spec [CEE_ADD];
2121 DEBUG (print_ins (i, ins));
2123 if (spec [MONO_INST_SRC1]) {
2124 if (spec [MONO_INST_SRC1] == 'f')
2125 reginfo1 = reginfof;
2128 reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2129 reginfo1 [ins->sreg1].last_use = i;
2133 if (spec [MONO_INST_SRC2]) {
2134 if (spec [MONO_INST_SRC2] == 'f')
2135 reginfo2 = reginfof;
2138 reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2139 reginfo2 [ins->sreg2].last_use = i;
2143 if (spec [MONO_INST_DEST]) {
2144 if (spec [MONO_INST_DEST] == 'f')
2145 reginfod = reginfof;
2148 if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2149 reginfod [ins->dreg].killed_in = i;
2150 reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2151 reginfod [ins->dreg].last_use = i;
2152 if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2153 reginfod [ins->dreg].born_in = i;
2154 if (!v64 && (spec [MONO_INST_DEST] == 'l')) {
2155 /* result in a regpair, the virtual register is allocated sequentially */
2156 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2157 reginfod [ins->dreg + 1].last_use = i;
2158 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2159 reginfod [ins->dreg + 1].born_in = i;
2164 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2169 cur_iregs = ARCH_CALLER_REGS;
2170 cur_fregs = ARCH_CALLER_FREGS;
2172 DEBUG (print_regtrack (reginfo, rs->next_vireg));
2173 DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2176 int prev_dreg, prev_sreg1, prev_sreg2;
2179 spec = ins_spec [ins->opcode];
2181 spec = ins_spec [CEE_ADD];
2182 DEBUG (g_print ("processing:"));
2183 DEBUG (print_ins (i, ins));
2185 /* make the register available for allocation: FIXME add fp reg */
2186 if (ins->opcode == OP_SETREG || ins->opcode == OP_SETREGIMM) {
2187 /* Dont free register which can't be allocated */
2188 if (reg_is_freeable (ins->dreg)) {
2189 cur_iregs |= 1 << ins->dreg;
2190 DEBUG (g_print ("adding %d to cur_iregs\n", ins->dreg));
2192 } else if (ins->opcode == OP_SETFREG) {
2193 if (freg_is_freeable (ins->dreg)) {
2194 cur_fregs |= 1 << ins->dreg;
2195 DEBUG (g_print ("adding %d to cur_fregs\n", ins->dreg));
2197 } else if (spec [MONO_INST_CLOB] == 'c') {
2198 MonoCallInst *cinst = (MonoCallInst*)ins;
2199 DEBUG (g_print ("excluding regs 0x%lx from cur_iregs (0x%x)\n", (long)cinst->used_iregs, cur_iregs));
2200 cur_iregs &= ~cinst->used_iregs;
2201 cur_fregs &= ~cinst->used_fregs;
2202 DEBUG (g_print ("available cur_iregs: 0x%x\n", cur_iregs));
2203 /* registers used by the calling convention are excluded from
2204 * allocation: they will be selectively enabled when they are
2205 * assigned by the special SETREG opcodes.
2208 dest_mask = src1_mask = src2_mask = cur_iregs;
2213 /* update for use with FP regs... */
2214 if (spec [MONO_INST_DEST] == 'f') {
2215 if (ins->dreg >= MONO_MAX_FREGS) {
2216 val = rs->fassign [ins->dreg];
2217 prev_dreg = ins->dreg;
2221 /* the register gets spilled after this inst */
2224 dest_mask = cur_fregs;
2225 val = mono_regstate_alloc_float (rs, dest_mask);
2227 val = get_float_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2228 rs->fassign [ins->dreg] = val;
2230 create_spilled_store_float (cfg, spill, val, prev_dreg, ins);
2232 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2233 rs->fsymbolic [val] = prev_dreg;
2238 if (freg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i || !(cur_fregs & (1 << ins->dreg)))) {
2239 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2240 mono_regstate_free_float (rs, ins->dreg);
2242 } else if (ins->dreg >= MONO_MAX_IREGS) {
2243 val = rs->iassign [ins->dreg];
2244 prev_dreg = ins->dreg;
2248 /* the register gets spilled after this inst */
2251 val = mono_regstate_alloc_int (rs, dest_mask);
2253 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2254 rs->iassign [ins->dreg] = val;
2256 create_spilled_store (cfg, spill, val, prev_dreg, ins);
2258 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2259 rs->isymbolic [val] = prev_dreg;
2261 if (!v64 && spec [MONO_INST_DEST] == 'l') {
2262 int hreg = prev_dreg + 1;
2263 val = rs->iassign [hreg];
2267 /* the register gets spilled after this inst */
2270 /* The second register must be a pair of the first */
2271 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2272 val = mono_regstate_alloc_int (rs, dest_mask);
2274 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2275 rs->iassign [hreg] = val;
2277 create_spilled_store (cfg, spill, val, hreg, ins);
2280 /* The second register must be a pair of the first */
2281 if (val != rs->iassign [prev_dreg] + 1) {
2282 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2284 val = mono_regstate_alloc_int (rs, dest_mask);
2286 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2288 create_copy_ins (cfg, rs->iassign [hreg], val, ins);
2290 rs->iassign [hreg] = val;
2294 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2295 rs->isymbolic [val] = hreg;
2297 if (reg_is_freeable (val) && hreg >= 0 && (reginfo [hreg].born_in >= i && !(cur_iregs & (1 << val)))) {
2298 DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2299 mono_regstate_free_int (rs, val);
2305 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i)) {
2306 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2307 mono_regstate_free_int (rs, ins->dreg);
2313 if (spec [MONO_INST_SRC1] == 'f') {
2314 if (ins->sreg1 >= MONO_MAX_FREGS) {
2315 val = rs->fassign [ins->sreg1];
2316 prev_sreg1 = ins->sreg1;
2320 /* the register gets spilled after this inst */
2323 //g_assert (val == -1); /* source cannot be spilled */
2324 src1_mask = cur_fregs;
2325 val = mono_regstate_alloc_float (rs, src1_mask);
2327 val = get_float_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2328 rs->fassign [ins->sreg1] = val;
2329 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2331 MonoInst *store = create_spilled_store_float (cfg, spill, val, prev_sreg1, NULL);
2332 insert_before_ins (ins, tmp, store);
2335 rs->fsymbolic [val] = prev_sreg1;
2340 } else if (ins->sreg1 >= MONO_MAX_IREGS) {
2341 val = rs->iassign [ins->sreg1];
2342 prev_sreg1 = ins->sreg1;
2346 /* the register gets spilled after this inst */
2349 if (0 && (ins->opcode == OP_MOVE) && reg_is_freeable (ins->dreg)) {
2351 * small optimization: the dest register is already allocated
2352 * but the src one is not: we can simply assign the same register
2353 * here and peephole will get rid of the instruction later.
2354 * This optimization may interfere with the clobbering handling:
2355 * it removes a mov operation that will be added again to handle clobbering.
2356 * There are also some other issues that should with make testjit.
2358 mono_regstate_alloc_int (rs, 1 << ins->dreg);
2359 val = rs->iassign [ins->sreg1] = ins->dreg;
2360 //g_assert (val >= 0);
2361 DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2363 //g_assert (val == -1); /* source cannot be spilled */
2364 val = mono_regstate_alloc_int (rs, src1_mask);
2366 val = get_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2367 rs->iassign [ins->sreg1] = val;
2368 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2371 MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
2372 insert_before_ins (ins, tmp, store);
2375 rs->isymbolic [val] = prev_sreg1;
2384 if (spec [MONO_INST_SRC2] == 'f') {
2385 if (ins->sreg2 >= MONO_MAX_FREGS) {
2386 val = rs->fassign [ins->sreg2];
2387 prev_sreg2 = ins->sreg2;
2391 /* the register gets spilled after this inst */
2394 src2_mask = cur_fregs;
2395 val = mono_regstate_alloc_float (rs, src2_mask);
2397 val = get_float_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2398 rs->fassign [ins->sreg2] = val;
2399 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2401 create_spilled_store_float (cfg, spill, val, prev_sreg2, ins);
2403 rs->fsymbolic [val] = prev_sreg2;
2408 } else if (ins->sreg2 >= MONO_MAX_IREGS) {
2409 val = rs->iassign [ins->sreg2];
2410 prev_sreg2 = ins->sreg2;
2414 /* the register gets spilled after this inst */
2417 val = mono_regstate_alloc_int (rs, src2_mask);
2419 val = get_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2420 rs->iassign [ins->sreg2] = val;
2421 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2423 create_spilled_store (cfg, spill, val, prev_sreg2, ins);
2425 rs->isymbolic [val] = prev_sreg2;
2431 if (spec [MONO_INST_CLOB] == 'c') {
2433 guint32 clob_mask = ARCH_CALLER_REGS;
2434 for (j = 0; j < MONO_MAX_IREGS; ++j) {
2436 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2437 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2441 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2442 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2443 mono_regstate_free_int (rs, ins->sreg1);
2445 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2446 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2447 mono_regstate_free_int (rs, ins->sreg2);
2450 //DEBUG (print_ins (i, ins));
2457 sparc_patch (guint32 *code, const gpointer target)
2460 guint32 ins = *code;
2461 guint32 op = ins >> 30;
2462 guint32 op2 = (ins >> 22) & 0x7;
2463 guint32 rd = (ins >> 25) & 0x1f;
2464 guint8* target8 = (guint8*)target;
2465 gint64 disp = (target8 - (guint8*)code) >> 2;
2468 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2470 if ((op == 0) && (op2 == 2)) {
2471 if (!sparc_is_imm22 (disp))
2474 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2476 else if ((op == 0) && (op2 == 1)) {
2477 if (!sparc_is_imm19 (disp))
2480 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
2482 else if ((op == 0) && (op2 == 3)) {
2483 if (!sparc_is_imm16 (disp))
2486 *code &= ~(0x180000 | 0x3fff);
2487 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
2489 else if ((op == 0) && (op2 == 6)) {
2490 if (!sparc_is_imm22 (disp))
2493 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2495 else if ((op == 0) && (op2 == 4)) {
2496 guint32 ins2 = code [1];
2498 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
2499 /* sethi followed by or */
2501 sparc_set (p, target8, rd);
2502 while (p <= (code + 1))
2505 else if (ins2 == 0x01000000) {
2506 /* sethi followed by nop */
2508 sparc_set (p, target8, rd);
2509 while (p <= (code + 1))
2512 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
2513 /* sethi followed by load/store */
2515 guint32 t = (guint32)target8;
2516 *code &= ~(0x3fffff);
2518 *(code + 1) &= ~(0x3ff);
2519 *(code + 1) |= (t & 0x3ff);
2523 (sparc_inst_rd (ins) == sparc_g1) &&
2524 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
2525 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
2526 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
2530 reg = sparc_inst_rd (c [1]);
2531 sparc_set (p, target8, reg);
2535 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
2536 (sparc_inst_imm (ins2))) {
2537 /* sethi followed by jmpl */
2539 guint32 t = (guint32)target8;
2540 *code &= ~(0x3fffff);
2542 *(code + 1) &= ~(0x3ff);
2543 *(code + 1) |= (t & 0x3ff);
2549 else if (op == 01) {
2550 gint64 disp = (target8 - (guint8*)code) >> 2;
2552 if (!sparc_is_imm30 (disp))
2554 sparc_call_simple (code, target8 - (guint8*)code);
2556 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
2558 g_assert (sparc_is_imm13 (target8));
2560 *code |= (guint32)target8;
2562 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
2563 /* sparc_set case 5. */
2567 reg = sparc_inst_rd (c [3]);
2568 sparc_set (p, target, reg);
2575 // g_print ("patched with 0x%08x\n", ins);
2579 * mono_sparc_emit_save_lmf:
2581 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
2582 * trampolines as well.
2585 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2588 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2589 /* Save previous_lmf */
2590 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
2591 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2593 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2594 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
2600 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2602 /* Load previous_lmf */
2603 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2605 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2606 /* *(lmf) = previous_lmf */
2607 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2612 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2615 * Since register windows are saved to the current value of %sp, we need to
2616 * set the sp field in the lmf before the call, not in the prolog.
2618 if (cfg->method->save_lmf) {
2619 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2622 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2629 emit_vret_token (MonoInst *ins, guint32 *code)
2631 MonoCallInst *call = (MonoCallInst*)ins;
2635 * The sparc ABI requires that calls to functions which return a structure
2636 * contain an additional unimpl instruction which is checked by the callee.
2638 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2639 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2640 size = mono_type_stack_size (call->signature->ret, NULL);
2642 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2643 sparc_unimp (code, size & 0xfff);
2650 emit_move_return_value (MonoInst *ins, guint32 *code)
2652 /* Move return value to the target register */
2653 /* FIXME: do this in the local reg allocator */
2654 switch (ins->opcode) {
2656 case OP_VOIDCALL_REG:
2657 case OP_VOIDCALL_MEMBASE:
2661 case OP_CALL_MEMBASE:
2662 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2666 case OP_LCALL_MEMBASE:
2668 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2672 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2674 sparc_mov_reg_reg (code, sparc_o0, ins->dreg + 1);
2675 sparc_mov_reg_reg (code, sparc_o1, ins->dreg);
2680 case OP_FCALL_MEMBASE:
2682 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2683 sparc_fmovs (code, sparc_f0, ins->dreg);
2684 sparc_fstod (code, ins->dreg, ins->dreg);
2687 sparc_fmovd (code, sparc_f0, ins->dreg);
2689 sparc_fmovs (code, sparc_f0, ins->dreg);
2690 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2691 sparc_fstod (code, ins->dreg, ins->dreg);
2693 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2698 case OP_VCALL_MEMBASE:
2708 * emit_load_volatile_arguments:
2710 * Load volatile arguments from the stack to the original input registers.
2711 * Required before a tail call.
2714 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2716 MonoMethod *method = cfg->method;
2717 MonoMethodSignature *sig;
2722 /* FIXME: Generate intermediate code instead */
2724 sig = method->signature;
2726 cinfo = get_call_info (sig, FALSE);
2728 /* This is the opposite of the code in emit_prolog */
2730 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2731 ArgInfo *ainfo = cinfo->args + i;
2732 gint32 stack_offset;
2734 inst = cfg->varinfo [i];
2736 if (sig->hasthis && (i == 0))
2737 arg_type = &mono_defaults.object_class->byval_arg;
2739 arg_type = sig->params [i - sig->hasthis];
2741 stack_offset = ainfo->offset + ARGS_OFFSET;
2742 ireg = sparc_i0 + ainfo->reg;
2744 if (ainfo->storage == ArgInSplitRegStack) {
2745 g_assert (inst->opcode == OP_REGOFFSET);
2747 if (!sparc_is_imm13 (stack_offset))
2749 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2752 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2753 if (ainfo->storage == ArgInIRegPair) {
2754 if (!sparc_is_imm13 (inst->inst_offset + 4))
2756 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2757 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2760 if (ainfo->storage == ArgInSplitRegStack) {
2761 if (stack_offset != inst->inst_offset) {
2762 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2763 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2764 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2769 if (ainfo->storage == ArgOnStackPair) {
2770 if (stack_offset != inst->inst_offset) {
2771 /* stack_offset is not dword aligned, so we need to make a copy */
2772 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2773 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2775 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2776 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2781 g_assert_not_reached ();
2784 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2785 /* Argument in register, but need to be saved to stack */
2786 if (!sparc_is_imm13 (stack_offset))
2788 if ((stack_offset - ARGS_OFFSET) & 0x1)
2789 /* FIXME: Is this ldsb or ldub ? */
2790 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2792 if ((stack_offset - ARGS_OFFSET) & 0x2)
2793 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2795 if ((stack_offset - ARGS_OFFSET) & 0x4)
2796 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2799 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2801 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2804 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2805 /* Argument in regpair, but need to be saved to stack */
2806 if (!sparc_is_imm13 (inst->inst_offset + 4))
2808 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2809 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2811 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2814 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2818 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2819 if (inst->opcode == OP_REGVAR)
2820 /* FIXME: Load the argument into memory */
2830 * mono_sparc_is_virtual_call:
2832 * Determine whenever the instruction at CODE is a virtual call.
2835 mono_sparc_is_virtual_call (guint32 *code)
2842 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2844 * Register indirect call. If it is a virtual call, then the
2845 * instruction in the delay slot is a special kind of nop.
2848 /* Construct special nop */
2849 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2852 if (code [1] == p [0])
2860 * mono_sparc_get_vcall_slot_addr:
2862 * Determine the vtable slot used by a virtual call.
2865 mono_sparc_get_vcall_slot_addr (guint32 *code, gpointer *fp)
2867 guint32 ins = code [0];
2868 guint32 prev_ins = code [-1];
2870 mono_sparc_flushw ();
2872 fp = (gpointer*)((guint8*)fp + MONO_SPARC_STACK_BIAS);
2874 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2875 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2876 /* ld [r1 + CONST ], r2; call r2 */
2877 guint32 base = sparc_inst_rs1 (prev_ins);
2878 guint32 disp = sparc_inst_imm13 (prev_ins);
2881 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2883 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2885 base_val = fp [base - 16];
2887 return (gpointer)((guint8*)base_val + disp);
2890 g_assert_not_reached ();
2893 g_assert_not_reached ();
2899 * Some conventions used in the following code.
2900 * 2) The only scratch registers we have are o7 and g1. We try to
2901 * stick to o7 when we can, and use g1 when necessary.
2905 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2910 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2911 MonoInst *last_ins = NULL;
2915 if (cfg->opt & MONO_OPT_PEEPHOLE)
2916 peephole_pass (cfg, bb);
2918 if (cfg->verbose_level > 2)
2919 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2921 cpos = bb->max_offset;
2923 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2931 offset = (guint8*)code - cfg->native_code;
2933 spec = ins_spec [ins->opcode];
2935 spec = ins_spec [CEE_ADD];
2937 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2939 if (offset > (cfg->code_size - max_len - 16)) {
2940 cfg->code_size *= 2;
2941 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2942 code = (guint32*)(cfg->native_code + offset);
2944 code_start = (guint8*)code;
2945 // if (ins->cil_code)
2946 // g_print ("cil code\n");
2947 mono_debug_record_line_number (cfg, ins, offset);
2949 switch (ins->opcode) {
2950 case OP_STOREI1_MEMBASE_IMM:
2951 EMIT_STORE_MEMBASE_IMM (ins, stb);
2953 case OP_STOREI2_MEMBASE_IMM:
2954 EMIT_STORE_MEMBASE_IMM (ins, sth);
2956 case OP_STORE_MEMBASE_IMM:
2957 EMIT_STORE_MEMBASE_IMM (ins, sti);
2959 case OP_STOREI4_MEMBASE_IMM:
2960 EMIT_STORE_MEMBASE_IMM (ins, st);
2962 case OP_STOREI8_MEMBASE_IMM:
2964 EMIT_STORE_MEMBASE_IMM (ins, stx);
2966 /* Only generated by peephole opts */
2967 g_assert ((ins->inst_offset % 8) == 0);
2968 g_assert (ins->inst_imm == 0);
2969 EMIT_STORE_MEMBASE_IMM (ins, stx);
2972 case OP_STOREI1_MEMBASE_REG:
2973 EMIT_STORE_MEMBASE_REG (ins, stb);
2975 case OP_STOREI2_MEMBASE_REG:
2976 EMIT_STORE_MEMBASE_REG (ins, sth);
2978 case OP_STOREI4_MEMBASE_REG:
2979 EMIT_STORE_MEMBASE_REG (ins, st);
2981 case OP_STOREI8_MEMBASE_REG:
2983 EMIT_STORE_MEMBASE_REG (ins, stx);
2985 /* Only used by OP_MEMSET */
2986 EMIT_STORE_MEMBASE_REG (ins, std);
2989 case OP_STORE_MEMBASE_REG:
2990 EMIT_STORE_MEMBASE_REG (ins, sti);
2994 sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
2996 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3001 sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
3003 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3007 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3010 sparc_set (code, ins->inst_c0, ins->dreg);
3011 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
3013 case OP_LOADI4_MEMBASE:
3015 EMIT_LOAD_MEMBASE (ins, ldsw);
3017 EMIT_LOAD_MEMBASE (ins, ld);
3020 case OP_LOADU4_MEMBASE:
3021 EMIT_LOAD_MEMBASE (ins, ld);
3023 case OP_LOADU1_MEMBASE:
3024 EMIT_LOAD_MEMBASE (ins, ldub);
3026 case OP_LOADI1_MEMBASE:
3027 EMIT_LOAD_MEMBASE (ins, ldsb);
3029 case OP_LOADU2_MEMBASE:
3030 EMIT_LOAD_MEMBASE (ins, lduh);
3032 case OP_LOADI2_MEMBASE:
3033 EMIT_LOAD_MEMBASE (ins, ldsh);
3035 case OP_LOAD_MEMBASE:
3037 EMIT_LOAD_MEMBASE (ins, ldx);
3039 EMIT_LOAD_MEMBASE (ins, ld);
3043 case OP_LOADI8_MEMBASE:
3044 EMIT_LOAD_MEMBASE (ins, ldx);
3048 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
3049 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
3052 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3053 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
3056 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
3059 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3060 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
3062 case CEE_CONV_OVF_U4:
3063 /* Only used on V9 */
3064 sparc_cmp_imm (code, ins->sreg1, 0);
3065 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3066 MONO_PATCH_INFO_EXC, "OverflowException");
3067 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
3069 sparc_set (code, 1, sparc_o7);
3070 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
3071 sparc_cmp (code, ins->sreg1, sparc_o7);
3072 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3073 MONO_PATCH_INFO_EXC, "OverflowException");
3074 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
3076 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3078 case CEE_CONV_OVF_I4_UN:
3079 /* Only used on V9 */
3084 /* Only used on V9 */
3085 sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
3089 /* Only used on V9 */
3090 sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
3095 sparc_cmp (code, ins->sreg1, ins->sreg2);
3097 case OP_COMPARE_IMM:
3098 case OP_ICOMPARE_IMM:
3099 if (sparc_is_imm13 (ins->inst_imm))
3100 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
3102 sparc_set (code, ins->inst_imm, sparc_o7);
3103 sparc_cmp (code, ins->sreg1, sparc_o7);
3106 case OP_X86_TEST_NULL:
3107 sparc_cmp_imm (code, ins->sreg1, 0);
3111 * gdb does not like encountering 'ta 1' in the debugged code. So
3112 * instead of emitting a trap, we emit a call a C function and place a
3115 //sparc_ta (code, 1);
3116 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
3121 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3125 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3130 /* according to inssel-long32.brg, this should set cc */
3131 EMIT_ALU_IMM (ins, add, TRUE);
3135 /* according to inssel-long32.brg, this should set cc */
3136 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3140 EMIT_ALU_IMM (ins, addx, TRUE);
3144 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3148 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3153 /* according to inssel-long32.brg, this should set cc */
3154 EMIT_ALU_IMM (ins, sub, TRUE);
3158 /* according to inssel-long32.brg, this should set cc */
3159 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3163 EMIT_ALU_IMM (ins, subx, TRUE);
3167 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3171 EMIT_ALU_IMM (ins, and, FALSE);
3175 /* Sign extend sreg1 into %y */
3176 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3177 sparc_wry (code, sparc_o7, sparc_g0);
3178 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3179 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3183 sparc_wry (code, sparc_g0, sparc_g0);
3184 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3189 /* Transform division into a shift */
3190 for (i = 1; i < 30; ++i) {
3192 if (ins->inst_imm == imm)
3198 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
3199 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3200 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
3203 /* http://compilers.iecc.com/comparch/article/93-04-079 */
3204 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3205 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
3206 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3207 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
3211 /* Sign extend sreg1 into %y */
3212 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3213 sparc_wry (code, sparc_o7, sparc_g0);
3214 EMIT_ALU_IMM (ins, sdiv, TRUE);
3215 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3221 /* Sign extend sreg1 into %y */
3222 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3223 sparc_wry (code, sparc_o7, sparc_g0);
3224 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
3225 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3226 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3227 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3231 sparc_wry (code, sparc_g0, sparc_g0);
3232 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
3233 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3234 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3238 /* Sign extend sreg1 into %y */
3239 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3240 sparc_wry (code, sparc_o7, sparc_g0);
3241 if (!sparc_is_imm13 (ins->inst_imm)) {
3242 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
3243 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
3244 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3245 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
3248 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
3249 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3250 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
3252 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3256 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3260 EMIT_ALU_IMM (ins, or, FALSE);
3264 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3268 EMIT_ALU_IMM (ins, xor, FALSE);
3272 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
3276 if (ins->inst_imm < (1 << 5))
3277 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3279 sparc_set (code, ins->inst_imm, sparc_o7);
3280 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
3285 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
3289 if (ins->inst_imm < (1 << 5))
3290 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3292 sparc_set (code, ins->inst_imm, sparc_o7);
3293 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
3297 case OP_ISHR_UN_IMM:
3298 if (ins->inst_imm < (1 << 5))
3299 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3301 sparc_set (code, ins->inst_imm, sparc_o7);
3302 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
3307 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
3310 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
3313 if (ins->inst_imm < (1 << 6))
3314 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3316 sparc_set (code, ins->inst_imm, sparc_o7);
3317 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
3321 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
3324 if (ins->inst_imm < (1 << 6))
3325 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3327 sparc_set (code, ins->inst_imm, sparc_o7);
3328 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
3332 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
3334 case OP_LSHR_UN_IMM:
3335 if (ins->inst_imm < (1 << 6))
3336 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3338 sparc_set (code, ins->inst_imm, sparc_o7);
3339 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
3344 /* can't use sparc_not */
3345 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
3349 /* can't use sparc_neg */
3350 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
3354 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3360 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
3363 /* Transform multiplication into a shift */
3364 for (i = 0; i < 30; ++i) {
3366 if (ins->inst_imm == imm)
3370 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
3372 EMIT_ALU_IMM (ins, smul, FALSE);
3377 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3378 sparc_rdy (code, sparc_g1);
3379 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
3380 sparc_cmp (code, sparc_g1, sparc_o7);
3381 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3383 case CEE_MUL_OVF_UN:
3384 case OP_IMUL_OVF_UN:
3385 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3386 sparc_rdy (code, sparc_o7);
3387 sparc_cmp (code, sparc_o7, sparc_g0);
3388 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3392 sparc_set (code, ins->inst_c0, ins->dreg);
3395 sparc_set (code, ins->inst_l, ins->dreg);
3398 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3399 sparc_set_template (code, ins->dreg);
3405 if (ins->sreg1 != ins->dreg)
3406 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3409 /* Only used on V9 */
3410 if (ins->sreg1 != ins->dreg)
3411 sparc_fmovd (code, ins->sreg1, ins->dreg);
3413 case OP_SPARC_SETFREG_FLOAT:
3414 /* Only used on V9 */
3415 sparc_fdtos (code, ins->sreg1, ins->dreg);
3418 if (cfg->method->save_lmf)
3421 code = emit_load_volatile_arguments (cfg, code);
3422 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3423 sparc_set_template (code, sparc_o7);
3424 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
3425 /* Restore parent frame in delay slot */
3426 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3429 /* ensure ins->sreg1 is not NULL */
3430 sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
3433 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
3434 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
3441 call = (MonoCallInst*)ins;
3442 g_assert (!call->virtual);
3443 code = emit_save_sp_to_lmf (cfg, code);
3444 if (ins->flags & MONO_INST_HAS_METHOD)
3445 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3447 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3449 code = emit_vret_token (ins, code);
3450 code = emit_move_return_value (ins, code);
3455 case OP_VOIDCALL_REG:
3457 call = (MonoCallInst*)ins;
3458 code = emit_save_sp_to_lmf (cfg, code);
3459 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
3461 * We emit a special kind of nop in the delay slot to tell the
3462 * trampoline code that this is a virtual call, thus an unbox
3463 * trampoline might need to be called.
3466 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3470 code = emit_vret_token (ins, code);
3471 code = emit_move_return_value (ins, code);
3473 case OP_FCALL_MEMBASE:
3474 case OP_LCALL_MEMBASE:
3475 case OP_VCALL_MEMBASE:
3476 case OP_VOIDCALL_MEMBASE:
3477 case OP_CALL_MEMBASE:
3478 call = (MonoCallInst*)ins;
3479 g_assert (sparc_is_imm13 (ins->inst_offset));
3480 code = emit_save_sp_to_lmf (cfg, code);
3481 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
3482 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
3484 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3488 code = emit_vret_token (ins, code);
3489 code = emit_move_return_value (ins, code);
3492 if (cfg->method->signature->ret->type == MONO_TYPE_R4)
3493 sparc_fdtos (code, ins->sreg1, sparc_f0);
3496 sparc_fmovd (code, ins->sreg1, ins->dreg);
3498 /* FIXME: Why not use fmovd ? */
3499 sparc_fmovs (code, ins->sreg1, ins->dreg);
3500 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3505 g_assert_not_reached ();
3508 /* Keep alignment */
3509 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
3510 sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
3511 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
3512 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3513 /* Keep %sp valid at all times */
3514 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3515 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
3516 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3518 case OP_SPARC_LOCALLOC_IMM: {
3519 gint32 offset = ins->inst_c0;
3520 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3521 if (sparc_is_imm13 (offset))
3522 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3524 sparc_set (code, offset, sparc_o7);
3525 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3527 sparc_mov_reg_reg (code, sparc_sp, ins->dreg);
3528 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
3529 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3533 /* The return is done in the epilog */
3534 g_assert_not_reached ();
3537 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3538 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3539 (gpointer)"mono_arch_throw_exception");
3542 case OP_START_HANDLER: {
3544 * The START_HANDLER instruction marks the beginning of a handler
3545 * block. It is called using a call instruction, so %o7 contains
3546 * the return address. Since the handler executes in the same stack
3547 * frame as the method itself, we can't use save/restore to save
3548 * the return address. Instead, we save it into a dedicated
3551 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3552 if (!sparc_is_imm13 (spvar->inst_offset)) {
3553 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3554 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3557 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3560 case OP_ENDFILTER: {
3561 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3562 if (!sparc_is_imm13 (spvar->inst_offset)) {
3563 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3564 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3567 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3568 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3570 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3573 case CEE_ENDFINALLY: {
3574 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3575 if (!sparc_is_imm13 (spvar->inst_offset)) {
3576 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3577 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3580 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3581 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3585 case OP_CALL_HANDLER:
3586 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3587 /* This is a jump inside the method, so call_simple works even on V9 */
3588 sparc_call_simple (code, 0);
3592 ins->inst_c0 = (guint8*)code - cfg->native_code;
3595 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3596 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3598 if (ins->flags & MONO_INST_BRLABEL) {
3599 if (ins->inst_i0->inst_c0) {
3600 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3601 g_assert (sparc_is_imm22 (disp));
3602 sparc_branch (code, 1, sparc_ba, disp);
3604 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3605 sparc_branch (code, 1, sparc_ba, 0);
3608 if (ins->inst_target_bb->native_offset) {
3609 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3610 g_assert (sparc_is_imm22 (disp));
3611 sparc_branch (code, 1, sparc_ba, disp);
3613 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3614 sparc_branch (code, 1, sparc_ba, 0);
3620 sparc_jmp (code, ins->sreg1, sparc_g0);
3628 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3629 sparc_clr_reg (code, ins->dreg);
3630 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3633 sparc_clr_reg (code, ins->dreg);
3635 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3637 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3640 sparc_set (code, 1, ins->dreg);
3648 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3649 sparc_clr_reg (code, ins->dreg);
3650 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3653 sparc_clr_reg (code, ins->dreg);
3654 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3656 sparc_set (code, 1, ins->dreg);
3659 case OP_COND_EXC_EQ:
3660 case OP_COND_EXC_NE_UN:
3661 case OP_COND_EXC_LT:
3662 case OP_COND_EXC_LT_UN:
3663 case OP_COND_EXC_GT:
3664 case OP_COND_EXC_GT_UN:
3665 case OP_COND_EXC_GE:
3666 case OP_COND_EXC_GE_UN:
3667 case OP_COND_EXC_LE:
3668 case OP_COND_EXC_LE_UN:
3669 case OP_COND_EXC_OV:
3670 case OP_COND_EXC_NO:
3672 case OP_COND_EXC_NC:
3673 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3675 case OP_SPARC_COND_EXC_EQZ:
3676 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3678 case OP_SPARC_COND_EXC_GEZ:
3679 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3681 case OP_SPARC_COND_EXC_GTZ:
3682 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3684 case OP_SPARC_COND_EXC_LEZ:
3685 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3687 case OP_SPARC_COND_EXC_LTZ:
3688 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3690 case OP_SPARC_COND_EXC_NEZ:
3691 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3693 case OP_COND_EXC_IOV:
3694 case OP_COND_EXC_IC:
3695 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
3708 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3710 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3724 /* Only used on V9 */
3725 EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
3730 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3732 case OP_SPARC_BRLEZ:
3733 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3736 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3739 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3742 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3744 case OP_SPARC_BRGEZ:
3745 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3748 /* floating point opcodes */
3750 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3752 sparc_set_template (code, sparc_o7);
3754 sparc_sethi (code, 0, sparc_o7);
3756 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3759 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3761 sparc_set_template (code, sparc_o7);
3763 sparc_sethi (code, 0, sparc_o7);
3765 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3767 /* Extend to double */
3768 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3770 case OP_STORER8_MEMBASE_REG:
3771 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3772 sparc_set (code, ins->inst_offset, sparc_o7);
3773 /* SPARCV9 handles misaligned fp loads/stores */
3774 if (!v64 && (ins->inst_offset % 8)) {
3776 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3777 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3778 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3780 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3783 if (!v64 && (ins->inst_offset % 8)) {
3785 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3786 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3788 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3791 case OP_LOADR8_MEMBASE:
3792 EMIT_LOAD_MEMBASE (ins, lddf);
3794 case OP_STORER4_MEMBASE_REG:
3795 /* This requires a double->single conversion */
3796 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3797 if (!sparc_is_imm13 (ins->inst_offset)) {
3798 sparc_set (code, ins->inst_offset, sparc_o7);
3799 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3802 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3804 case OP_LOADR4_MEMBASE: {
3805 /* ldf needs a single precision register */
3806 int dreg = ins->dreg;
3807 ins->dreg = FP_SCRATCH_REG;
3808 EMIT_LOAD_MEMBASE (ins, ldf);
3810 /* Extend to double */
3811 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3816 sparc_fmovd (code, ins->sreg1, ins->dreg);
3818 sparc_fmovs (code, ins->sreg1, ins->dreg);
3819 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3823 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3824 if (!sparc_is_imm13 (offset))
3827 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3828 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3829 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3831 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3832 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3833 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3835 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3839 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3840 if (!sparc_is_imm13 (offset))
3843 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3844 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3845 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3847 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3848 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3849 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3853 case OP_FCONV_TO_I1:
3854 case OP_FCONV_TO_U1:
3855 case OP_FCONV_TO_I2:
3856 case OP_FCONV_TO_U2:
3861 case OP_FCONV_TO_I4:
3862 case OP_FCONV_TO_U4: {
3863 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3864 if (!sparc_is_imm13 (offset))
3866 /* FIXME: Is having the same code for all of these ok ? */
3867 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3868 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3869 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3872 case OP_FCONV_TO_I8:
3873 case OP_FCONV_TO_U8:
3875 g_assert_not_reached ();
3879 g_assert_not_reached ();
3881 case OP_LCONV_TO_R_UN: {
3883 g_assert_not_reached ();
3886 case OP_LCONV_TO_OVF_I: {
3887 guint32 *br [3], *label [1];
3890 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3892 sparc_cmp_imm (code, ins->sreg1, 0);
3894 sparc_branch (code, 1, sparc_bneg, 0);
3898 /* ms word must be 0 */
3899 sparc_cmp_imm (code, ins->sreg2, 0);
3901 sparc_branch (code, 1, sparc_be, 0);
3906 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3909 sparc_patch (br [0], code);
3911 /* ms word must 0xfffffff */
3912 sparc_cmp_imm (code, ins->sreg2, -1);
3914 sparc_branch (code, 1, sparc_bne, 0);
3915 sparc_patch (br [2], label [0]);
3918 sparc_patch (br [1], code);
3919 if (ins->sreg1 != ins->dreg)
3920 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3924 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3927 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3930 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3933 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3937 sparc_fnegd (code, ins->sreg1, ins->dreg);
3939 /* FIXME: why don't use fnegd ? */
3940 sparc_fnegs (code, ins->sreg1, ins->dreg);
3944 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3945 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3946 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3949 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3956 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3957 sparc_clr_reg (code, ins->dreg);
3958 switch (ins->opcode) {
3961 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3963 sparc_set (code, 1, ins->dreg);
3964 sparc_fbranch (code, 1, sparc_fbu, 2);
3966 sparc_set (code, 1, ins->dreg);
3969 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3971 sparc_set (code, 1, ins->dreg);
3977 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3980 /* clt.un + brfalse */
3982 sparc_fbranch (code, 1, sparc_fbul, 0);
3985 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3986 sparc_patch (p, (guint8*)code);
3990 /* cgt.un + brfalse */
3992 sparc_fbranch (code, 1, sparc_fbug, 0);
3995 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3996 sparc_patch (p, (guint8*)code);
4000 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
4001 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4004 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
4005 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4008 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
4009 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4012 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
4013 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4016 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
4017 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4019 case CEE_CKFINITE: {
4020 gint32 offset = mono_spillvar_offset_float (cfg, 0);
4021 if (!sparc_is_imm13 (offset))
4023 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
4024 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
4025 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
4026 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
4027 sparc_cmp_imm (code, sparc_o7, 2047);
4028 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
4030 sparc_fmovd (code, ins->sreg1, ins->dreg);
4032 sparc_fmovs (code, ins->sreg1, ins->dreg);
4033 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
4039 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4041 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
4043 g_assert_not_reached ();
4046 if ((((guint8*)code) - code_start) > max_len) {
4047 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4048 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
4049 g_assert_not_reached ();
4059 cfg->code_len = (guint8*)code - cfg->native_code;
4063 mono_arch_register_lowlevel_calls (void)
4065 mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
4066 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
4070 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4072 MonoJumpInfo *patch_info;
4074 /* FIXME: Move part of this to arch independent code */
4075 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4076 unsigned char *ip = patch_info->ip.i + code;
4079 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4081 switch (patch_info->type) {
4082 case MONO_PATCH_INFO_CLASS_INIT: {
4083 guint32 *ip2 = (guint32*)ip;
4084 /* Might already been changed to a nop */
4086 sparc_set_template (ip2, sparc_o7);
4087 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
4089 sparc_call_simple (ip2, 0);
4093 case MONO_PATCH_INFO_METHOD_JUMP: {
4094 guint32 *ip2 = (guint32*)ip;
4095 /* Might already been patched */
4096 sparc_set_template (ip2, sparc_o7);
4102 sparc_patch ((guint32*)ip, target);
4107 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
4114 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4116 int i, stack, code_size;
4117 guint32 *code = (guint32*)p;
4118 MonoMethodSignature *sig = cfg->method->signature;
4121 /* Save registers to stack */
4122 for (i = 0; i < 6; ++i)
4123 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
4125 cinfo = get_call_info (sig, FALSE);
4127 /* Save float regs on V9, since they are caller saved */
4128 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4129 ArgInfo *ainfo = cinfo->args + i;
4130 gint32 stack_offset;
4132 stack_offset = ainfo->offset + ARGS_OFFSET;
4134 if (ainfo->storage == ArgInFloatReg) {
4135 if (!sparc_is_imm13 (stack_offset))
4137 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4139 else if (ainfo->storage == ArgInDoubleReg) {
4140 /* The offset is guaranteed to be aligned by the ABI rules */
4141 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4145 sparc_set (code, cfg->method, sparc_o0);
4146 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
4148 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
4151 /* Restore float regs on V9 */
4152 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4153 ArgInfo *ainfo = cinfo->args + i;
4154 gint32 stack_offset;
4156 stack_offset = ainfo->offset + ARGS_OFFSET;
4158 if (ainfo->storage == ArgInFloatReg) {
4159 if (!sparc_is_imm13 (stack_offset))
4161 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4163 else if (ainfo->storage == ArgInDoubleReg) {
4164 /* The offset is guaranteed to be aligned by the ABI rules */
4165 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4169 mono_arch_instrument_mem_needs (cfg->method, &stack, &code_size);
4171 g_assert ((code - (guint32*)p) <= (code_size * 4));
4187 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4189 guint32 *code = (guint32*)p;
4190 int save_mode = SAVE_NONE;
4191 MonoMethod *method = cfg->method;
4193 switch (mono_type_get_underlying_type (method->signature->ret)->type) {
4194 case MONO_TYPE_VOID:
4195 /* special case string .ctor icall */
4196 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
4197 save_mode = SAVE_ONE;
4199 save_mode = SAVE_NONE;
4204 save_mode = SAVE_ONE;
4206 save_mode = SAVE_TWO;
4211 save_mode = SAVE_FP;
4213 case MONO_TYPE_VALUETYPE:
4214 save_mode = SAVE_STRUCT;
4217 save_mode = SAVE_ONE;
4221 /* Save the result to the stack and also put it into the output registers */
4223 switch (save_mode) {
4226 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
4227 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
4228 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4229 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
4232 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
4233 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4237 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
4239 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
4240 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
4241 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
4246 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4248 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
4256 sparc_set (code, cfg->method, sparc_o0);
4258 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
4261 /* Restore result */
4263 switch (save_mode) {
4265 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
4266 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
4269 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
4272 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
4283 mono_arch_max_epilog_size (MonoCompile *cfg)
4285 int exc_count = 0, max_epilog_size = 16 + 20*4;
4286 MonoJumpInfo *patch_info;
4288 if (cfg->method->save_lmf)
4289 max_epilog_size += 128;
4291 if (mono_jit_trace_calls != NULL)
4292 max_epilog_size += 50;
4294 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4295 max_epilog_size += 50;
4297 /* count the number of exception infos */
4299 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4300 if (patch_info->type == MONO_PATCH_INFO_EXC)
4305 * make sure we have enough space for exceptions
4308 max_epilog_size += exc_count * (20 * 4);
4310 max_epilog_size += exc_count * 24;
4313 return max_epilog_size;
4317 mono_arch_emit_prolog (MonoCompile *cfg)
4319 MonoMethod *method = cfg->method;
4320 MonoMethodSignature *sig;
4326 cfg->code_size = 256;
4327 cfg->native_code = g_malloc (cfg->code_size);
4328 code = (guint32*)cfg->native_code;
4330 /* FIXME: Generate intermediate code instead */
4332 offset = cfg->stack_offset;
4333 offset += (16 * sizeof (gpointer)); /* register save area */
4335 offset += 4; /* struct/union return pointer */
4338 /* add parameter area size for called functions */
4339 if (cfg->param_area < (6 * sizeof (gpointer)))
4340 /* Reserve space for the first 6 arguments even if it is unused */
4341 offset += 6 * sizeof (gpointer);
4343 offset += cfg->param_area;
4345 /* align the stack size */
4346 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
4349 * localloc'd memory is stored between the local variables (whose
4350 * size is given by cfg->stack_offset), and between the space reserved
4353 cfg->arch.localloc_offset = offset - cfg->stack_offset;
4355 cfg->stack_offset = offset;
4357 if (!sparc_is_imm13 (- cfg->stack_offset)) {
4358 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4359 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
4360 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
4363 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
4366 if (strstr (cfg->method->name, "test_marshal_struct")) {
4367 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4368 sparc_call_simple (code, 0);
4373 sig = method->signature;
4375 cinfo = get_call_info (sig, FALSE);
4377 /* Keep in sync with emit_load_volatile_arguments */
4378 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4379 ArgInfo *ainfo = cinfo->args + i;
4380 gint32 stack_offset;
4382 inst = cfg->varinfo [i];
4384 if (sig->hasthis && (i == 0))
4385 arg_type = &mono_defaults.object_class->byval_arg;
4387 arg_type = sig->params [i - sig->hasthis];
4389 stack_offset = ainfo->offset + ARGS_OFFSET;
4391 /* Save the split arguments so they will reside entirely on the stack */
4392 if (ainfo->storage == ArgInSplitRegStack) {
4393 /* Save the register to the stack */
4394 g_assert (inst->opcode == OP_REGOFFSET);
4395 if (!sparc_is_imm13 (stack_offset))
4397 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
4400 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
4401 /* Save the argument to a dword aligned stack location */
4403 * stack_offset contains the offset of the argument on the stack.
4404 * inst->inst_offset contains the dword aligned offset where the value
4407 if (ainfo->storage == ArgInIRegPair) {
4408 if (!sparc_is_imm13 (inst->inst_offset + 4))
4410 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4411 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4414 if (ainfo->storage == ArgInSplitRegStack) {
4416 g_assert_not_reached ();
4418 if (stack_offset != inst->inst_offset) {
4419 /* stack_offset is not dword aligned, so we need to make a copy */
4420 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4421 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4422 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4426 if (ainfo->storage == ArgOnStackPair) {
4428 g_assert_not_reached ();
4430 if (stack_offset != inst->inst_offset) {
4431 /* stack_offset is not dword aligned, so we need to make a copy */
4432 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4433 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4434 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4435 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4439 g_assert_not_reached ();
4442 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4443 /* Argument in register, but need to be saved to stack */
4444 if (!sparc_is_imm13 (stack_offset))
4446 if ((stack_offset - ARGS_OFFSET) & 0x1)
4447 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4449 if ((stack_offset - ARGS_OFFSET) & 0x2)
4450 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4452 if ((stack_offset - ARGS_OFFSET) & 0x4)
4453 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4456 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4458 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4462 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4466 /* Argument in regpair, but need to be saved to stack */
4467 if (!sparc_is_imm13 (inst->inst_offset + 4))
4469 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4470 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4472 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4473 if (!sparc_is_imm13 (stack_offset))
4475 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4477 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4478 /* The offset is guaranteed to be aligned by the ABI rules */
4479 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4482 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4483 /* Need to move into the a double precision register */
4484 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4487 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4488 if (inst->opcode == OP_REGVAR)
4489 /* FIXME: Load the argument into memory */
4495 if (cfg->method->save_lmf) {
4496 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4499 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4500 sparc_set_template (code, sparc_o7);
4501 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4503 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4505 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4507 /* FIXME: add a relocation for this */
4508 sparc_set (code, cfg->method, sparc_o7);
4509 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4511 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4512 (gpointer)"mono_arch_get_lmf_addr");
4515 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4518 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4519 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4521 cfg->code_len = (guint8*)code - cfg->native_code;
4523 g_assert (cfg->code_len <= cfg->code_size);
4525 return (guint8*)code;
4529 mono_arch_emit_epilog (MonoCompile *cfg)
4531 MonoJumpInfo *patch_info;
4532 MonoMethod *method = cfg->method;
4536 code = (guint32*)(cfg->native_code + cfg->code_len);
4538 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4539 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4541 if (cfg->method->save_lmf) {
4542 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4544 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4548 * The V8 ABI requires that calls to functions which return a structure
4551 if (!v64 && cfg->method->signature->pinvoke && MONO_TYPE_ISSTRUCT(cfg->method->signature->ret))
4552 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4556 /* Only fold last instruction into the restore if the exit block has an in count of 1
4557 and the previous block hasn't been optimized away since it may have an in count > 1 */
4558 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4561 /* Try folding last instruction into the restore */
4562 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4563 /* or reg, imm, %i0 */
4564 int reg = sparc_inst_rs1 (code [-2]);
4565 int imm = sparc_inst_imm13 (code [-2]);
4566 code [-2] = code [-1];
4568 sparc_restore_imm (code, reg, imm, sparc_o0);
4571 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4572 /* or reg, reg, %i0 */
4573 int reg1 = sparc_inst_rs1 (code [-2]);
4574 int reg2 = sparc_inst_rs2 (code [-2]);
4575 code [-2] = code [-1];
4577 sparc_restore (code, reg1, reg2, sparc_o0);
4580 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4582 /* add code to raise exceptions */
4583 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4584 glong offset = patch_info->ip.i;
4586 switch (patch_info->type) {
4587 case MONO_PATCH_INFO_EXC:
4588 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4589 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
4590 sparc_set_template (code, sparc_o0);
4591 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_REL, (gpointer)offset);
4592 sparc_set_template (code, sparc_o1);
4593 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4594 patch_info->data.name = "mono_arch_throw_exception_by_name";
4595 patch_info->ip.i = (guint8*)code - cfg->native_code;
4604 cfg->code_len = (guint8*)code - cfg->native_code;
4606 g_assert (cfg->code_len < cfg->code_size);
4610 gboolean lmf_addr_key_inited = FALSE;
4612 #ifdef MONO_SPARC_THR_TLS
4613 thread_key_t lmf_addr_key;
4615 pthread_key_t lmf_addr_key;
4619 mono_arch_get_lmf_addr (void)
4621 /* This is perf critical so we bypass the IO layer */
4622 /* The thr_... functions seem to be somewhat faster */
4623 #ifdef MONO_SPARC_THR_TLS
4625 thr_getspecific (lmf_addr_key, &res);
4628 return pthread_getspecific (lmf_addr_key);
4633 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4635 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4637 struct sigaltstack sa;
4642 printf ("SIGALT!\n");
4643 /* Setup an alternate signal stack */
4644 tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
4645 tls->signal_stack_size = SIGNAL_STACK_SIZE;
4648 sa.ss_sp = tls->signal_stack;
4649 sa.ss_size = SIGNAL_STACK_SIZE;
4651 g_assert (sigaltstack (&sa, NULL) == 0);
4653 sigstk.ss_sp = tls->signal_stack;
4654 sigstk.ss_size = SIGNAL_STACK_SIZE;
4655 sigstk.ss_flags = 0;
4656 g_assert (sigaltstack (&sigstk, NULL) == 0);
4660 if (!lmf_addr_key_inited) {
4663 lmf_addr_key_inited = TRUE;
4665 #ifdef MONO_SPARC_THR_TLS
4666 res = thr_keycreate (&lmf_addr_key, NULL);
4668 res = pthread_key_create (&lmf_addr_key, NULL);
4670 g_assert (res == 0);
4674 #ifdef MONO_SPARC_THR_TLS
4675 thr_setspecific (lmf_addr_key, &tls->lmf);
4677 pthread_setspecific (lmf_addr_key, &tls->lmf);
4682 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4687 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4689 int this_out_reg = sparc_o0;
4694 MONO_INST_NEW (cfg, ins, OP_SETREG);
4695 ins->sreg1 = vt_reg;
4696 ins->dreg = sparc_o0;
4697 mono_bblock_add_inst (cfg->cbb, ins);
4698 this_out_reg = sparc_o1;
4700 /* Set the 'struct/union return pointer' location on the stack */
4701 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4705 /* add the this argument */
4706 if (this_reg != -1) {
4708 MONO_INST_NEW (cfg, this, OP_SETREG);
4709 this->type = this_type;
4710 this->sreg1 = this_reg;
4711 this->dreg = this_out_reg;
4712 mono_bblock_add_inst (cfg->cbb, this);
4718 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4724 * mono_arch_get_argument_info:
4725 * @csig: a method signature
4726 * @param_count: the number of parameters to consider
4727 * @arg_info: an array to store the result infos
4729 * Gathers information on parameters such as size, alignment and
4730 * padding. arg_info should be large enought to hold param_count + 1 entries.
4732 * Returns the size of the activation frame.
4735 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4741 cinfo = get_call_info (csig, FALSE);
4743 if (csig->hasthis) {
4744 ainfo = &cinfo->args [0];
4745 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4748 for (k = 0; k < param_count; k++) {
4749 ainfo = &cinfo->args [k + csig->hasthis];
4751 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4752 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4761 mono_arch_print_tree (MonoInst *tree, int arity)
4766 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4771 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)