2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/utils/mono-math.h>
29 #include "mini-sparc.h"
32 #include "cpu-sparc.h"
35 * Sparc V9 means two things:
36 * - the instruction set
39 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
40 * processors in use are 64 bit processors. The V9 ABI is only usable if the
41 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
42 * instructions without using the 64 bit ABI.
47 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
48 * code. Unused input registers are used for global register allocation.
49 * - %l0..%l7 is used for local register allocation
50 * - %o0..%o6 is used for outgoing arguments
51 * - %o7 and %g1 is used as scratch registers in opcodes
52 * - all floating point registers are used for local register allocation except %f0.
53 * Only double precision registers are used.
58 * - doubles and longs must be stored in dword aligned locations
62 * The following things are not implemented or do not work:
63 * - some fp arithmetic corner cases
64 * The following tests in mono/mini are expected to fail:
65 * - test_0_simple_double_casts
66 * This test casts (guint64)-1 to double and then back to guint64 again.
67 * Under x86, it returns 0, while under sparc it returns -1.
69 * In addition to this, the runtime requires the truncl function, or its
70 * solaris counterpart, aintl, to do some double->int conversions. If this
71 * function is not available, it is emulated somewhat, but the results can be
76 * Possible optimizations:
77 * - delay slot scheduling
78 * - allocate large constants to registers
79 * - use %o registers for local allocation
80 * - implement unwinding through native frames
81 * - add more mul/div/rem optimizations
85 #error "Sparc V9 support not yet implemented."
89 #define MONO_SPARC_THR_TLS 1
92 #define NOT_IMPLEMENTED g_assert_not_reached ();
94 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
96 #define SIGNAL_STACK_SIZE (64 * 1024)
98 /* Whenever the CPU supports v9 instructions */
99 gboolean sparcv9 = FALSE;
101 static gpointer mono_arch_get_lmf_addr (void);
104 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
107 mono_arch_regname (int reg) {
108 static const char * rnames[] = {
109 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
110 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
111 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
112 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
113 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
114 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
115 "sparc_fp", "sparc_retadr"
117 if (reg >= 0 && reg < 32)
123 * Initialize the cpu to execute managed code.
126 mono_arch_cpu_init (void)
131 * This function returns the optimizations supported on this cpu.
134 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
142 if (!sysinfo (SI_ISALIST, buf, 1024))
143 g_assert_not_reached ();
145 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
146 * (in)directly implies that we're a v9 or better.
147 * Improvements to this are greatly accepted...
148 * Also, we don't differentiate between v7 and v8. I sense SIGILL
149 * sniffing in my future.
151 if (getpagesize() == 8192)
152 strcpy (buf, "sparcv9");
154 strcpy (buf, "sparcv8");
158 * On some processors, the cmov instructions are even slower than the
161 if (strstr (buf, "sparcv9")) {
162 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
166 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
172 mono_sparc_break (void)
177 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
178 #else /* assume Sun's compiler */
179 static void flushi(void *addr)
186 mono_arch_flush_icache (guint8 *code, gint size)
189 /* Hopefully this is optimized based on the actual CPU */
190 sync_instruction_memory (code, size);
192 guint64 *p = (guint64*)code;
193 guint64 *end = (guint64*)(code + ((size + 8) /8));
196 * FIXME: Flushing code in dword chunks in _slow_.
200 __asm__ __volatile__ ("iflush %0"::"r"(p++));
210 * Flush all register windows to memory. Every register window is saved to
211 * a 16 word area on the stack pointed to by its %sp register.
214 mono_sparc_flushw (void)
216 static guint32 start [64];
217 static int inited = 0;
219 static void (*flushw) (void);
224 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
227 sparc_restore_simple (code);
229 g_assert ((code - start) < 64);
231 flushw = (gpointer)start;
240 mono_arch_flush_register_windows (void)
242 mono_sparc_flushw ();
246 mono_sparc_is_v9 (void) {
262 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
265 guint32 vt_offset; /* for valuetypes */
283 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
285 ainfo->offset = *stack_size;
288 if (*gr >= PARAM_REGS) {
289 ainfo->storage = ArgOnStack;
292 ainfo->storage = ArgInIReg;
297 /* Allways reserve stack space for parameters passed in registers */
301 if (*gr < PARAM_REGS - 1) {
302 /* A pair of registers */
303 ainfo->storage = ArgInIRegPair;
307 else if (*gr >= PARAM_REGS) {
308 /* A pair of stack locations */
309 ainfo->storage = ArgOnStackPair;
310 ainfo->offset = *stack_size;
313 ainfo->storage = ArgInSplitRegStack;
315 ainfo->offset = *stack_size;
326 * Obtain information about a call according to the calling convention.
327 * See the "System V ABI, Sparc Processor Supplement" Sparc V8 version document for
331 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
333 guint32 i, gr, simpletype;
334 int n = sig->hasthis + sig->param_count;
335 guint32 stack_size = 0;
338 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
344 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
346 for (i = 0; i < sig->param_count; ++i) {
347 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
349 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
350 /* Emit the signature cookie just before the implicit arguments */
351 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
352 /* Prevent implicit arguments from being passed in registers */
356 DEBUG(printf("param %d: ", i));
357 if (sig->params [i]->byref) {
358 DEBUG(printf("byref\n"));
360 add_general (&gr, &stack_size, ainfo, FALSE);
363 simpletype = sig->params [i]->type;
365 switch (simpletype) {
366 case MONO_TYPE_BOOLEAN:
369 add_general (&gr, &stack_size, ainfo, FALSE);
370 /* the value is in the ls byte */
376 add_general (&gr, &stack_size, ainfo, FALSE);
377 /* the value is in the ls word */
385 case MONO_TYPE_CLASS:
386 case MONO_TYPE_OBJECT:
387 case MONO_TYPE_STRING:
388 case MONO_TYPE_SZARRAY:
389 case MONO_TYPE_ARRAY:
390 add_general (&gr, &stack_size, ainfo, FALSE);
392 case MONO_TYPE_VALUETYPE:
393 if (sig->params [i]->data.klass->enumtype) {
394 simpletype = sig->params [i]->data.klass->enum_basetype->type;
398 add_general (&gr, &stack_size, ainfo, FALSE);
400 case MONO_TYPE_TYPEDBYREF:
401 add_general (&gr, &stack_size, ainfo, FALSE);
405 add_general (&gr, &stack_size, ainfo, TRUE);
408 /* single precision values are passed in integer registers */
409 add_general (&gr, &stack_size, ainfo, FALSE);
412 /* double precision values are passed in a pair of registers */
413 add_general (&gr, &stack_size, ainfo, TRUE);
416 g_assert_not_reached ();
422 simpletype = sig->ret->type;
424 switch (simpletype) {
425 case MONO_TYPE_BOOLEAN:
436 case MONO_TYPE_CLASS:
437 case MONO_TYPE_OBJECT:
438 case MONO_TYPE_SZARRAY:
439 case MONO_TYPE_ARRAY:
440 case MONO_TYPE_STRING:
441 cinfo->ret.storage = ArgInIReg;
442 cinfo->ret.reg = sparc_i0;
448 cinfo->ret.storage = ArgInIRegPair;
449 cinfo->ret.reg = sparc_i0;
455 cinfo->ret.storage = ArgInFReg;
456 cinfo->ret.reg = sparc_f0;
458 case MONO_TYPE_VALUETYPE:
459 if (sig->ret->data.klass->enumtype) {
460 simpletype = sig->ret->data.klass->enum_basetype->type;
463 cinfo->ret.storage = ArgOnStack;
465 case MONO_TYPE_TYPEDBYREF:
466 cinfo->ret.storage = ArgOnStack;
471 g_error ("Can't handle as return value 0x%x", sig->ret->type);
475 cinfo->stack_usage = stack_size;
476 cinfo->reg_usage = gr;
481 is_regsize_var (MonoType *t) {
485 case MONO_TYPE_BOOLEAN:
496 case MONO_TYPE_OBJECT:
497 case MONO_TYPE_STRING:
498 case MONO_TYPE_CLASS:
499 case MONO_TYPE_SZARRAY:
500 case MONO_TYPE_ARRAY:
502 case MONO_TYPE_VALUETYPE:
503 if (t->data.klass->enumtype)
504 return is_regsize_var (t->data.klass->enum_basetype);
511 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
517 * FIXME: If an argument is allocated to a register, then load it from the
518 * stack in the prolog.
521 for (i = 0; i < cfg->num_varinfo; i++) {
522 MonoInst *ins = cfg->varinfo [i];
523 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
526 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
529 /* FIXME: Make arguments on stack allocateable to registers */
530 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
533 /* we can only allocate 32 bit values */
534 if (is_regsize_var (ins->inst_vtype)) {
535 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
536 g_assert (i == vmv->idx);
538 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
546 mono_arch_get_global_int_regs (MonoCompile *cfg)
550 MonoMethodSignature *sig;
553 sig = cfg->method->signature;
555 cinfo = get_call_info (sig, FALSE);
557 /* Use unused input registers */
558 for (i = cinfo->reg_usage; i < 6; ++i)
559 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
561 /* Use %l0..%l3 as global registers */
562 for (i = 16; i < 20; ++i)
563 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
571 * mono_arch_regalloc_cost:
573 * Return the cost, in number of memory references, of the action of
574 * allocating the variable VMV into a register during global register
578 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
584 * Set var information according to the calling convention. sparc version.
585 * The locals var stuff should most likely be split in another method.
588 mono_arch_allocate_vars (MonoCompile *m)
590 MonoMethodSignature *sig;
591 MonoMethodHeader *header;
593 int i, offset, size, align, curinst;
596 header = ((MonoMethodNormal *)m->method)->header;
598 sig = m->method->signature;
600 cinfo = get_call_info (sig, FALSE);
602 if (sig->ret->type != MONO_TYPE_VOID) {
603 switch (cinfo->ret.storage) {
607 m->ret->opcode = OP_REGVAR;
608 m->ret->inst_c0 = cinfo->ret.reg;
612 m->ret->opcode = OP_REGOFFSET;
613 m->ret->inst_basereg = sparc_fp;
614 m->ret->inst_offset = 64;
622 * We use the Sparc V8 calling conventions for managed code as well.
623 * FIXME: Use something more optimized.
626 /* Locals are allocated backwards from %fp */
627 m->frame_reg = sparc_fp;
631 * Reserve a stack slot for holding information used during exception
634 if (header->num_clauses)
637 if (m->method->save_lmf) {
638 offset += sizeof (MonoLMF);
639 m->arch.lmf_offset = offset;
642 curinst = m->locals_start;
643 for (i = curinst; i < m->num_varinfo; ++i) {
644 inst = m->varinfo [i];
646 if (inst->opcode == OP_REGVAR) {
647 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
651 /* inst->unused indicates native sized value types, this is used by the
652 * pinvoke wrappers when they call functions returning structure */
653 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
654 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
656 size = mono_type_stack_size (inst->inst_vtype, &align);
659 * This is needed since structures containing doubles must be doubleword
661 * FIXME: Do this only if needed.
663 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
667 * variables are accessed as negative offsets from %fp, so increase
668 * the offset before assigning it to a variable
673 offset &= ~(align - 1);
674 inst->opcode = OP_REGOFFSET;
675 inst->inst_basereg = sparc_fp;
676 inst->inst_offset = -offset;
678 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
681 if (sig->call_convention == MONO_CALL_VARARG) {
682 m->sig_cookie = cinfo->sig_cookie.offset + 68;
685 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
686 inst = m->varinfo [i];
687 if (inst->opcode != OP_REGVAR) {
688 ArgInfo *ainfo = &cinfo->args [i];
689 gboolean inreg = TRUE;
692 if (sig->hasthis && (i == 0))
693 arg_type = &mono_defaults.object_class->byval_arg;
695 arg_type = sig->params [i - sig->hasthis];
697 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
698 || (arg_type->type == MONO_TYPE_R8)))
700 * Since float arguments are passed in integer registers, we need to
701 * save them to the stack in the prolog.
705 /* FIXME: Allocate volatile arguments to registers */
706 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
709 if (MONO_TYPE_ISSTRUCT (arg_type))
710 /* FIXME: this isn't needed */
713 switch (ainfo->storage) {
717 inst->opcode = OP_REGVAR;
718 inst->dreg = sparc_i0 + ainfo->reg;
726 case ArgInSplitRegStack:
727 /* Split arguments are saved to the stack in the prolog */
728 inst->opcode = OP_REGOFFSET;
729 /* in parent frame */
730 inst->inst_basereg = sparc_fp;
731 inst->inst_offset = ainfo->offset + 68;
733 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
735 * It is very hard to load doubles from non-doubleword aligned
736 * memory locations. So if the offset is misaligned, we copy the
737 * argument to a stack location in the prolog.
739 if (inst->inst_offset % 8) {
740 inst->inst_basereg = sparc_fp;
744 offset &= ~(align - 1);
745 inst->inst_offset = -offset;
754 if (MONO_TYPE_ISSTRUCT (arg_type)) {
755 /* Add a level of indirection */
757 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
758 * are destructively modified in a lot of places in inssel.brg.
761 MONO_INST_NEW (m, indir, 0);
763 inst->opcode = OP_SPARC_INARG_VT;
764 inst->inst_left = indir;
770 * spillvars are stored between the normal locals and the storage reserved
774 m->stack_offset = offset;
776 /* Add a properly aligned dword for use by int<->float conversion opcodes */
778 mono_spillvar_offset_float (m, 0);
784 * take the arguments and generate the arch-specific
785 * instructions to properly call the function in call.
786 * This includes pushing, moving arguments to the right register
790 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
792 MonoMethodSignature *sig;
796 guint32 extra_space = 0;
798 sig = call->signature;
799 n = sig->param_count + sig->hasthis;
801 cinfo = get_call_info (sig, sig->pinvoke);
803 for (i = 0; i < n; ++i) {
804 ainfo = cinfo->args + i;
805 if (is_virtual && i == 0) {
806 /* the argument will be attached to the call instruction */
809 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
810 /* FIXME: Test varargs with 0 implicit args */
811 /* FIXME: Test interaction with hasthis */
812 /* Emit the signature cookie just before the first implicit argument */
814 /* FIXME: Add support for signature tokens to AOT */
815 cfg->disable_aot = TRUE;
816 /* We allways pass the signature on the stack for simplicity */
817 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
818 arg->inst_basereg = sparc_sp;
819 arg->inst_imm = 68 + cinfo->sig_cookie.offset;
820 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
821 sig_arg->inst_p0 = call->signature;
822 arg->inst_left = sig_arg;
823 arg->type = STACK_PTR;
824 /* prepend, so they get reversed */
825 arg->next = call->out_args;
826 call->out_args = arg;
829 MONO_INST_NEW (cfg, arg, OP_OUTARG);
831 arg->cil_code = in->cil_code;
833 arg->type = in->type;
834 /* prepend, we'll need to reverse them later */
835 arg->next = call->out_args;
836 call->out_args = arg;
838 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
844 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
845 size = sizeof (MonoTypedRef);
850 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
852 size = mono_type_stack_size (&in->klass->byval_arg, &align);
855 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
856 * use the normal OUTARG opcodes to pass the address of the location to
859 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
860 inst->inst_left = in;
862 /* The first 6 argument locations are reserved */
863 if (cinfo->stack_usage < 24)
864 cinfo->stack_usage = 24;
866 offset = ALIGN_TO (68 + cinfo->stack_usage, align);
867 pad = offset - (68 + cinfo->stack_usage);
869 inst->inst_c1 = offset;
871 arg->inst_left = inst;
873 cinfo->stack_usage += size;
874 cinfo->stack_usage += pad;
877 switch (ainfo->storage) {
881 if (ainfo->storage == ArgInIRegPair)
882 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
883 arg->unused = sparc_o0 + ainfo->reg;
884 /* outgoing arguments begin at sp+68 */
885 arg->inst_basereg = sparc_sp;
886 arg->inst_imm = 68 + ainfo->offset;
887 call->used_iregs |= 1 << ainfo->reg;
889 if ((i >= sig->hasthis) && (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)) {
891 * The OUTARG (freg) implementation needs an extra dword to store
892 * the temporary value.
898 arg->opcode = OP_SPARC_OUTARG_MEM;
899 arg->inst_basereg = sparc_sp;
900 arg->inst_imm = 68 + ainfo->offset;
903 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
904 arg->inst_basereg = sparc_sp;
905 arg->inst_imm = 68 + ainfo->offset;
907 case ArgInSplitRegStack:
908 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
909 arg->unused = sparc_o0 + ainfo->reg;
910 arg->inst_basereg = sparc_sp;
911 arg->inst_imm = 68 + ainfo->offset;
912 call->used_iregs |= 1 << ainfo->reg;
921 * Reverse the call->out_args list.
924 MonoInst *prev = NULL, *list = call->out_args, *next;
931 call->out_args = prev;
933 call->stack_usage = cinfo->stack_usage + extra_space;
934 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
935 cfg->flags |= MONO_CFG_HAS_CALLS;
941 /* Map opcode to the sparc condition codes */
942 static inline SparcCond
943 opcode_to_sparc_cond (int opcode)
966 case OP_COND_EXC_NE_UN:
974 case OP_COND_EXC_LT_UN:
982 case OP_COND_EXC_GT_UN:
988 case OP_COND_EXC_GE_UN:
994 case OP_COND_EXC_LE_UN:
1001 case OP_COND_EXC_NO:
1002 case OP_COND_EXC_NC:
1005 g_assert_not_reached ();
1010 #define COMPUTE_DISP(ins) \
1011 if (ins->flags & MONO_INST_BRLABEL) { \
1012 if (ins->inst_i0->inst_c0) \
1013 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1016 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1019 if (ins->inst_true_bb->native_offset) \
1020 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1023 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1027 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1030 COMPUTE_DISP(ins); \
1031 g_assert (sparc_is_imm22 (disp)); \
1032 sparc_ ## bop (code, (annul), cond, disp); \
1033 if (filldelay) sparc_nop (code); \
1036 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1037 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1039 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1043 COMPUTE_DISP(ins); \
1044 predict = (disp != 0) ? 1 : 0; \
1045 g_assert (sparc_is_imm19 (disp)); \
1046 sparc_branchp (code, (annul), (cond), sparc_icc_short, (predict), disp); \
1047 if (filldelay) sparc_nop (code); \
1050 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1053 COMPUTE_DISP(ins); \
1054 g_assert (sparc_is_imm22 (disp)); \
1055 sparc_ ## bop (code, (annul), (predict), ins->sreg2, disp); \
1056 if (filldelay) sparc_nop (code); \
1059 /* emit an exception if condition is fail */
1061 * We put the exception throwing code out-of-line, at the end of the method
1063 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) do { \
1064 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1065 MONO_PATCH_INFO_EXC, sexc_name); \
1067 sparc_branchp (code, 0, (cond), sparc_icc_short, 0, 0); \
1070 sparc_branch (code, 1, cond, 0); \
1075 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1076 if (sparc_is_imm13 ((ins)->inst_imm)) \
1077 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1079 sparc_set (code, ins->inst_imm, sparc_o7); \
1080 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1084 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1085 if (sparc_is_imm13 (ins->inst_offset)) \
1086 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1088 sparc_set (code, ins->inst_offset, sparc_o7); \
1089 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1094 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1096 if (ins->inst_imm == 0) \
1099 sparc_set (code, ins->inst_imm, sparc_o7); \
1102 if (!sparc_is_imm13 (ins->inst_offset)) { \
1103 sparc_set (code, ins->inst_offset, sparc_g1); \
1104 sparc_ ## op (code, sreg, ins->inst_destbasereg, sparc_g1); \
1107 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1110 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1111 if (!sparc_is_imm13 (ins->inst_offset)) { \
1112 sparc_set (code, ins->inst_offset, sparc_o7); \
1113 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1116 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1120 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1122 MonoInst *ins, *last_ins = NULL;
1127 switch (ins->opcode) {
1129 /* remove unnecessary multiplication with 1 */
1130 if (ins->inst_imm == 1) {
1131 if (ins->dreg != ins->sreg1) {
1132 ins->opcode = OP_MOVE;
1134 last_ins->next = ins->next;
1140 case OP_LOAD_MEMBASE:
1141 case OP_LOADI4_MEMBASE:
1143 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1144 * OP_LOAD_MEMBASE offset(basereg), reg
1146 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1147 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1148 ins->inst_basereg == last_ins->inst_destbasereg &&
1149 ins->inst_offset == last_ins->inst_offset) {
1150 if (ins->dreg == last_ins->sreg1) {
1151 last_ins->next = ins->next;
1155 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1156 ins->opcode = OP_MOVE;
1157 ins->sreg1 = last_ins->sreg1;
1161 * Note: reg1 must be different from the basereg in the second load
1162 * OP_LOAD_MEMBASE offset(basereg), reg1
1163 * OP_LOAD_MEMBASE offset(basereg), reg2
1165 * OP_LOAD_MEMBASE offset(basereg), reg1
1166 * OP_MOVE reg1, reg2
1168 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1169 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1170 ins->inst_basereg != last_ins->dreg &&
1171 ins->inst_basereg == last_ins->inst_basereg &&
1172 ins->inst_offset == last_ins->inst_offset) {
1174 if (ins->dreg == last_ins->dreg) {
1175 last_ins->next = ins->next;
1179 ins->opcode = OP_MOVE;
1180 ins->sreg1 = last_ins->dreg;
1183 //g_assert_not_reached ();
1187 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1188 * OP_LOAD_MEMBASE offset(basereg), reg
1190 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1191 * OP_ICONST reg, imm
1193 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1194 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1195 ins->inst_basereg == last_ins->inst_destbasereg &&
1196 ins->inst_offset == last_ins->inst_offset) {
1197 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1198 ins->opcode = OP_ICONST;
1199 ins->inst_c0 = last_ins->inst_imm;
1200 g_assert_not_reached (); // check this rule
1204 case OP_LOADU1_MEMBASE:
1205 case OP_LOADI1_MEMBASE:
1206 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1207 ins->inst_basereg == last_ins->inst_destbasereg &&
1208 ins->inst_offset == last_ins->inst_offset) {
1209 if (ins->dreg == last_ins->sreg1) {
1210 last_ins->next = ins->next;
1214 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1215 ins->opcode = OP_MOVE;
1216 ins->sreg1 = last_ins->sreg1;
1220 case OP_LOADU2_MEMBASE:
1221 case OP_LOADI2_MEMBASE:
1222 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1223 ins->inst_basereg == last_ins->inst_destbasereg &&
1224 ins->inst_offset == last_ins->inst_offset) {
1225 if (ins->dreg == last_ins->sreg1) {
1226 last_ins->next = ins->next;
1230 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1231 ins->opcode = OP_MOVE;
1232 ins->sreg1 = last_ins->sreg1;
1236 case OP_STOREI4_MEMBASE_IMM:
1237 /* Convert pairs of 0 stores to a dword 0 store */
1238 /* Used when initializing temporaries */
1239 /* We know sparc_fp is dword aligned */
1240 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1241 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1242 (ins->inst_destbasereg == sparc_fp) &&
1243 (ins->inst_offset < 0) &&
1244 ((ins->inst_offset % 8) == 0) &&
1245 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1246 (ins->inst_imm == 0) &&
1247 (last_ins->inst_imm == 0)) {
1249 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1250 last_ins->inst_offset = ins->inst_offset;
1251 last_ins->next = ins->next;
1264 * Convert compare with zero+branch to BRcc
1267 * This only works in 64 bit mode, since it examines all 64
1268 * bits of the register.
1270 if (0 && sparcv9 && last_ins &&
1271 (last_ins->opcode == OP_COMPARE_IMM) &&
1272 (last_ins->inst_imm == 0)) {
1273 MonoInst *next = ins->next;
1274 switch (ins->opcode) {
1276 ins->opcode = OP_SPARC_BRZ;
1279 ins->opcode = OP_SPARC_BRNZ;
1282 ins->opcode = OP_SPARC_BRLZ;
1285 ins->opcode = OP_SPARC_BRGZ;
1288 ins->opcode = OP_SPARC_BRGEZ;
1291 ins->opcode = OP_SPARC_BRLEZ;
1294 g_assert_not_reached ();
1296 ins->sreg2 = last_ins->sreg1;
1298 last_ins->next = next;
1309 if (ins->dreg == ins->sreg1) {
1311 last_ins->next = ins->next;
1316 * OP_MOVE sreg, dreg
1317 * OP_MOVE dreg, sreg
1319 if (last_ins && last_ins->opcode == OP_MOVE &&
1320 ins->sreg1 == last_ins->dreg &&
1321 ins->dreg == last_ins->sreg1) {
1322 last_ins->next = ins->next;
1331 bb->last_ins = last_ins;
1334 /* Parameters used by the register allocator */
1336 /* Use %l4..%l7 as local registers */
1337 #define ARCH_CALLER_REGS (0xf0<<16)
1338 /* Use %f2..%f30 as the double precision floating point local registers */
1339 #define ARCH_CALLER_FREGS (0x55555554)
1342 #define DEBUG(a) if (cfg->verbose_level > 1) a
1344 #define reg_is_freeable(r) ((1 << (r)) & ARCH_CALLER_REGS)
1345 #define freg_is_freeable(r) (((1) << (r)) & ARCH_CALLER_FREGS)
1354 static const char*const * ins_spec = sparc_desc;
1357 print_ins (int i, MonoInst *ins)
1359 const char *spec = ins_spec [ins->opcode];
1360 g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1361 if (spec [MONO_INST_DEST]) {
1362 if (ins->dreg >= MONO_MAX_IREGS)
1363 g_print (" R%d <-", ins->dreg);
1365 if (spec [MONO_INST_DEST] == 'b')
1366 g_print (" [%s + 0x%x] <-", mono_arch_regname (ins->dreg), ins->inst_offset);
1368 g_print (" %s <-", mono_arch_regname (ins->dreg));
1370 if (spec [MONO_INST_SRC1]) {
1371 if (ins->sreg1 >= MONO_MAX_IREGS)
1372 g_print (" R%d", ins->sreg1);
1374 if (spec [MONO_INST_SRC1] == 'b')
1375 g_print (" [%s + 0x%x]", mono_arch_regname (ins->sreg1), ins->inst_offset);
1377 g_print (" %s", mono_arch_regname (ins->sreg1));
1379 if (spec [MONO_INST_SRC2]) {
1380 if (ins->sreg2 >= MONO_MAX_IREGS)
1381 g_print (" R%d", ins->sreg2);
1383 g_print (" %s", mono_arch_regname (ins->sreg2));
1385 if (spec [MONO_INST_CLOB])
1386 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1391 print_regtrack (RegTrack *t, int num)
1397 for (i = 0; i < num; ++i) {
1400 if (i >= MONO_MAX_IREGS) {
1401 g_snprintf (buf, sizeof(buf), "R%d", i);
1404 r = mono_arch_regname (i);
1405 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1409 typedef struct InstList InstList;
1417 static inline InstList*
1418 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1420 InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1429 #define STACK_OFFSETS_POSITIVE
1432 * returns the offset used by spillvar. It allocates a new
1433 * spill variable if necessary.
1436 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1438 MonoSpillInfo **si, *info;
1441 si = &cfg->spill_info;
1443 while (i <= spillvar) {
1446 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1448 cfg->stack_offset += sizeof (gpointer);
1449 info->offset = - cfg->stack_offset;
1453 return (*si)->offset;
1459 g_assert_not_reached ();
1464 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1466 MonoSpillInfo **si, *info;
1469 si = &cfg->spill_info_float;
1471 while (i <= spillvar) {
1474 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1476 cfg->stack_offset += sizeof (double);
1477 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1478 info->offset = - cfg->stack_offset;
1482 return (*si)->offset;
1488 g_assert_not_reached ();
1493 * Force the spilling of the variable in the symbolic register 'reg'.
1496 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
1501 sel = cfg->rs->iassign [reg];
1502 /*i = cfg->rs->isymbolic [sel];
1503 g_assert (i == reg);*/
1505 spill = ++cfg->spill_count;
1506 cfg->rs->iassign [i] = -spill - 1;
1507 mono_regstate_free_int (cfg->rs, sel);
1508 /* we need to create a spill var and insert a load to sel after the current instruction */
1509 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1511 load->inst_basereg = cfg->frame_reg;
1512 load->inst_offset = mono_spillvar_offset (cfg, spill);
1514 while (ins->next != item->prev->data)
1517 load->next = ins->next;
1519 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08x(%%sp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
1520 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1521 g_assert (i == sel);
1527 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1532 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1533 /* exclude the registers in the current instruction */
1534 if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
1535 if (ins->sreg1 >= MONO_MAX_IREGS)
1536 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
1538 regmask &= ~ (1 << ins->sreg1);
1539 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1541 if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
1542 if (ins->sreg2 >= MONO_MAX_IREGS)
1543 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
1545 regmask &= ~ (1 << ins->sreg2);
1546 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1548 if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
1549 regmask &= ~ (1 << ins->dreg);
1550 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1553 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1554 g_assert (regmask); /* need at least a register we can free */
1556 /* we should track prev_use and spill the register that's farther */
1557 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1558 if (regmask & (1 << i)) {
1560 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1564 i = cfg->rs->isymbolic [sel];
1565 spill = ++cfg->spill_count;
1566 cfg->rs->iassign [i] = -spill - 1;
1567 mono_regstate_free_int (cfg->rs, sel);
1568 /* we need to create a spill var and insert a load to sel after the current instruction */
1569 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1571 load->inst_basereg = cfg->frame_reg;
1572 load->inst_offset = mono_spillvar_offset (cfg, spill);
1574 while (ins->next != item->prev->data)
1577 load->next = ins->next;
1579 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08x(%%sp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
1580 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1581 g_assert (i == sel);
1587 get_float_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1592 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1593 /* exclude the registers in the current instruction */
1594 if (reg != ins->sreg1 && (freg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg1] >= 0))) {
1595 if (ins->sreg1 >= MONO_MAX_FREGS)
1596 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg1]);
1598 regmask &= ~ (1 << ins->sreg1);
1599 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1601 if (reg != ins->sreg2 && (freg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg2] >= 0))) {
1602 if (ins->sreg2 >= MONO_MAX_FREGS)
1603 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg2]);
1605 regmask &= ~ (1 << ins->sreg2);
1606 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1608 if (reg != ins->dreg && freg_is_freeable (ins->dreg)) {
1609 regmask &= ~ (1 << ins->dreg);
1610 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1613 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1614 g_assert (regmask); /* need at least a register we can free */
1616 /* we should track prev_use and spill the register that's farther */
1617 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1618 if (regmask & (1 << i)) {
1620 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->fassign [sel]));
1624 i = cfg->rs->fsymbolic [sel];
1625 spill = ++cfg->spill_count;
1626 cfg->rs->fassign [i] = -spill - 1;
1627 mono_regstate_free_float(cfg->rs, sel);
1628 /* we need to create a spill var and insert a load to sel after the current instruction */
1629 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1631 load->inst_basereg = cfg->frame_reg;
1632 load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1634 while (ins->next != item->prev->data)
1637 load->next = ins->next;
1639 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08x(%%sp)) R%d (freed %s)\n", spill, load->inst_offset, i, mono_arch_regname (sel)));
1640 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1641 g_assert (i == sel);
1647 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1650 MONO_INST_NEW (cfg, copy, OP_MOVE);
1654 copy->next = ins->next;
1657 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1662 create_copy_ins_float (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1665 MONO_INST_NEW (cfg, copy, OP_FMOVE);
1669 copy->next = ins->next;
1672 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
1677 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
1680 MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
1682 store->inst_destbasereg = cfg->frame_reg;
1683 store->inst_offset = mono_spillvar_offset (cfg, spill);
1685 store->next = ins->next;
1688 DEBUG (g_print ("SPILLED STORE (%d at 0x%08x(%%sp)) R%d (from %s)\n", spill, store->inst_offset, prev_reg, mono_arch_regname (reg)));
1693 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
1696 MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
1698 store->inst_destbasereg = cfg->frame_reg;
1699 store->inst_offset = mono_spillvar_offset_float (cfg, spill);
1701 store->next = ins->next;
1704 DEBUG (g_print ("SPILLED STORE (%d at 0x%08x(%%sp)) R%d (from %s)\n", spill, store->inst_offset, prev_reg, mono_arch_regname (reg)));
1709 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
1712 g_assert (item->next);
1713 prev = item->next->data;
1715 while (prev->next != ins)
1717 to_insert->next = ins;
1718 prev->next = to_insert;
1720 * needed otherwise in the next instruction we can add an ins to the
1721 * end and that would get past this instruction.
1723 item->data = to_insert;
1727 alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
1729 int val = cfg->rs->iassign [sym_reg];
1733 /* the register gets spilled after this inst */
1736 val = mono_regstate_alloc_int (cfg->rs, allow_mask);
1738 val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
1739 cfg->rs->iassign [sym_reg] = val;
1740 /* add option to store before the instruction for src registers */
1742 create_spilled_store (cfg, spill, val, sym_reg, ins);
1744 cfg->rs->isymbolic [val] = sym_reg;
1748 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
1751 * Local register allocation.
1752 * We first scan the list of instructions and we save the liveness info of
1753 * each register (when the register is first used, when it's value is set etc.).
1754 * We also reverse the list of instructions (in the InstList list) because assigning
1755 * registers backwards allows for more tricks to be used.
1758 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
1761 MonoRegState *rs = cfg->rs;
1763 RegTrack *reginfo, *reginfof;
1764 RegTrack *reginfo1, *reginfo2, *reginfod;
1765 InstList *tmp, *reversed = NULL;
1767 guint32 src1_mask, src2_mask, dest_mask;
1768 guint32 cur_iregs, cur_fregs;
1770 /* FIXME: Use caller saved regs and %i1-%2 for allocation */
1774 rs->next_vireg = bb->max_ireg;
1775 rs->next_vfreg = bb->max_freg;
1776 mono_regstate_assign (rs);
1777 reginfo = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vireg);
1778 reginfof = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vfreg);
1779 rs->ifree_mask = ARCH_CALLER_REGS;
1780 rs->ffree_mask = ARCH_CALLER_FREGS;
1784 DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
1785 /* forward pass on the instructions to collect register liveness info */
1787 spec = ins_spec [ins->opcode];
1789 mono_print_tree_nl (ins);
1792 DEBUG (print_ins (i, ins));
1794 if (spec [MONO_INST_SRC1]) {
1795 if (spec [MONO_INST_SRC1] == 'f')
1796 reginfo1 = reginfof;
1799 reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
1800 reginfo1 [ins->sreg1].last_use = i;
1804 if (spec [MONO_INST_SRC2]) {
1805 if (spec [MONO_INST_SRC2] == 'f')
1806 reginfo2 = reginfof;
1809 reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
1810 reginfo2 [ins->sreg2].last_use = i;
1814 if (spec [MONO_INST_DEST]) {
1815 if (spec [MONO_INST_DEST] == 'f')
1816 reginfod = reginfof;
1819 if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
1820 reginfod [ins->dreg].killed_in = i;
1821 reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
1822 reginfod [ins->dreg].last_use = i;
1823 if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
1824 reginfod [ins->dreg].born_in = i;
1825 if (spec [MONO_INST_DEST] == 'l') {
1826 /* result in eax:edx, the virtual register is allocated sequentially */
1827 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
1828 reginfod [ins->dreg + 1].last_use = i;
1829 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
1830 reginfod [ins->dreg + 1].born_in = i;
1835 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
1840 cur_iregs = ARCH_CALLER_REGS;
1841 cur_fregs = ARCH_CALLER_FREGS;
1843 DEBUG (print_regtrack (reginfo, rs->next_vireg));
1844 DEBUG (print_regtrack (reginfof, rs->next_vfreg));
1847 int prev_dreg, prev_sreg1, prev_sreg2;
1850 spec = ins_spec [ins->opcode];
1851 DEBUG (g_print ("processing:"));
1852 DEBUG (print_ins (i, ins));
1854 /* make the register available for allocation: FIXME add fp reg */
1855 if (ins->opcode == OP_SETREG || ins->opcode == OP_SETREGIMM) {
1856 /* Dont free register which can't be allocated */
1857 if (reg_is_freeable (ins->dreg)) {
1858 cur_iregs |= 1 << ins->dreg;
1859 DEBUG (g_print ("adding %d to cur_iregs\n", ins->dreg));
1861 } else if (ins->opcode == OP_SETFREG) {
1862 if (freg_is_freeable (ins->dreg)) {
1863 cur_fregs |= 1 << ins->dreg;
1864 DEBUG (g_print ("adding %d to cur_fregs\n", ins->dreg));
1866 } else if (spec [MONO_INST_CLOB] == 'c') {
1867 MonoCallInst *cinst = (MonoCallInst*)ins;
1868 DEBUG (g_print ("excluding regs 0x%x from cur_iregs (0x%x)\n", cinst->used_iregs, cur_iregs));
1869 cur_iregs &= ~cinst->used_iregs;
1870 cur_fregs &= ~cinst->used_fregs;
1871 DEBUG (g_print ("available cur_iregs: 0x%x\n", cur_iregs));
1872 /* registers used by the calling convention are excluded from
1873 * allocation: they will be selectively enabled when they are
1874 * assigned by the special SETREG opcodes.
1877 dest_mask = src1_mask = src2_mask = cur_iregs;
1882 /* update for use with FP regs... */
1883 if (spec [MONO_INST_DEST] == 'f') {
1884 if (ins->dreg >= MONO_MAX_FREGS) {
1885 val = rs->fassign [ins->dreg];
1886 prev_dreg = ins->dreg;
1890 /* the register gets spilled after this inst */
1893 dest_mask = cur_fregs;
1894 val = mono_regstate_alloc_float (rs, dest_mask);
1896 val = get_float_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
1897 rs->fassign [ins->dreg] = val;
1899 create_spilled_store_float (cfg, spill, val, prev_dreg, ins);
1901 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
1902 rs->fsymbolic [val] = prev_dreg;
1907 if (freg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i || !(cur_fregs & (1 << ins->dreg)))) {
1908 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
1909 mono_regstate_free_float (rs, ins->dreg);
1911 } else if (ins->dreg >= MONO_MAX_IREGS) {
1912 val = rs->iassign [ins->dreg];
1913 prev_dreg = ins->dreg;
1917 /* the register gets spilled after this inst */
1920 val = mono_regstate_alloc_int (rs, dest_mask);
1922 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
1923 rs->iassign [ins->dreg] = val;
1925 create_spilled_store (cfg, spill, val, prev_dreg, ins);
1927 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
1928 rs->isymbolic [val] = prev_dreg;
1930 if (spec [MONO_INST_DEST] == 'l') {
1931 int hreg = prev_dreg + 1;
1932 val = rs->iassign [hreg];
1936 /* the register gets spilled after this inst */
1939 /* The second register must be a pair of the first */
1940 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
1941 val = mono_regstate_alloc_int (rs, dest_mask);
1943 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
1944 rs->iassign [hreg] = val;
1946 create_spilled_store (cfg, spill, val, hreg, ins);
1949 /* The second register must be a pair of the first */
1950 if (val != rs->iassign [prev_dreg] + 1) {
1951 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
1953 val = mono_regstate_alloc_int (rs, dest_mask);
1955 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
1957 create_copy_ins (cfg, rs->iassign [hreg], val, ins);
1959 rs->iassign [hreg] = val;
1963 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
1964 rs->isymbolic [val] = hreg;
1966 if (reg_is_freeable (val) && hreg >= 0 && (reginfo [hreg].born_in >= i && !(cur_iregs & (1 << val)))) {
1967 DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
1968 mono_regstate_free_int (rs, val);
1974 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i)) {
1975 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
1976 mono_regstate_free_int (rs, ins->dreg);
1982 if (spec [MONO_INST_SRC1] == 'f') {
1983 if (ins->sreg1 >= MONO_MAX_FREGS) {
1984 val = rs->fassign [ins->sreg1];
1985 prev_sreg1 = ins->sreg1;
1989 /* the register gets spilled after this inst */
1992 //g_assert (val == -1); /* source cannot be spilled */
1993 src1_mask = cur_fregs;
1994 val = mono_regstate_alloc_float (rs, src1_mask);
1996 val = get_float_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
1997 rs->fassign [ins->sreg1] = val;
1998 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2000 MonoInst *store = create_spilled_store_float (cfg, spill, val, prev_sreg1, NULL);
2001 insert_before_ins (ins, tmp, store);
2004 rs->fsymbolic [val] = prev_sreg1;
2009 } else if (ins->sreg1 >= MONO_MAX_IREGS) {
2010 val = rs->iassign [ins->sreg1];
2011 prev_sreg1 = ins->sreg1;
2015 /* the register gets spilled after this inst */
2018 if (0 && (ins->opcode == OP_MOVE) && reg_is_freeable (ins->dreg)) {
2020 * small optimization: the dest register is already allocated
2021 * but the src one is not: we can simply assign the same register
2022 * here and peephole will get rid of the instruction later.
2023 * This optimization may interfere with the clobbering handling:
2024 * it removes a mov operation that will be added again to handle clobbering.
2025 * There are also some other issues that should with make testjit.
2027 mono_regstate_alloc_int (rs, 1 << ins->dreg);
2028 val = rs->iassign [ins->sreg1] = ins->dreg;
2029 //g_assert (val >= 0);
2030 DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2032 //g_assert (val == -1); /* source cannot be spilled */
2033 val = mono_regstate_alloc_int (rs, src1_mask);
2035 val = get_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2036 rs->iassign [ins->sreg1] = val;
2037 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2040 MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
2041 insert_before_ins (ins, tmp, store);
2044 rs->isymbolic [val] = prev_sreg1;
2053 if (spec [MONO_INST_SRC2] == 'f') {
2054 if (ins->sreg2 >= MONO_MAX_FREGS) {
2055 val = rs->fassign [ins->sreg2];
2056 prev_sreg2 = ins->sreg2;
2060 /* the register gets spilled after this inst */
2063 src2_mask = cur_fregs;
2064 val = mono_regstate_alloc_float (rs, src2_mask);
2066 val = get_float_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2067 rs->fassign [ins->sreg2] = val;
2068 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2070 create_spilled_store_float (cfg, spill, val, prev_sreg2, ins);
2072 rs->fsymbolic [val] = prev_sreg2;
2077 } else if (ins->sreg2 >= MONO_MAX_IREGS) {
2078 val = rs->iassign [ins->sreg2];
2079 prev_sreg2 = ins->sreg2;
2083 /* the register gets spilled after this inst */
2086 val = mono_regstate_alloc_int (rs, src2_mask);
2088 val = get_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2089 rs->iassign [ins->sreg2] = val;
2090 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2092 create_spilled_store (cfg, spill, val, prev_sreg2, ins);
2094 rs->isymbolic [val] = prev_sreg2;
2100 if (spec [MONO_INST_CLOB] == 'c') {
2102 guint32 clob_mask = ARCH_CALLER_REGS;
2103 for (j = 0; j < MONO_MAX_IREGS; ++j) {
2105 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2106 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2110 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2111 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2112 mono_regstate_free_int (rs, ins->sreg1);
2114 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2115 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2116 mono_regstate_free_int (rs, ins->sreg2);
2119 //DEBUG (print_ins (i, ins));
2126 sparc_patch (guint8 *code, const guint8 *target)
2128 guint32 ins = *(guint32*)code;
2129 guint32 op = ins >> 30;
2130 guint32 op2 = (ins >> 22) & 0x7;
2131 guint32 rd = (ins >> 25) & 0x1f;
2132 gint32 disp = (target - code) >> 2;
2134 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2136 if ((op == 0) && (op2 == 2)) {
2137 if (!sparc_is_imm22 (disp))
2140 *(guint32*)code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2142 else if ((op == 0) && (op2 == 1)) {
2143 if (!sparc_is_imm19 (disp))
2146 *(guint32*)code = ((ins >> 19) << 19) | (disp & 0x7ffff);
2148 else if ((op == 0) && (op2 == 3)) {
2149 if (!sparc_is_imm16 (disp))
2152 *(guint32*)code &= ~(0x180000 | 0x3fff);
2153 *(guint32*)code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
2155 else if ((op == 0) && (op2 == 6)) {
2156 if (!sparc_is_imm22 (disp))
2159 *(guint32*)code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2161 else if ((op == 0) && (op2 == 4)) {
2162 guint32 ins2 = *(guint32*)(code + 4);
2164 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
2165 /* sethi followed by or */
2166 guint32 *p = (guint32*)code;
2167 sparc_set (p, target, rd);
2168 while (p <= (code + 4))
2171 else if (ins2 == 0x01000000) {
2172 /* sethi followed by nop */
2173 guint32 *p = (guint32*)code;
2174 sparc_set (p, target, rd);
2175 while (p <= (code + 4))
2178 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
2179 /* sethi followed by load/store */
2180 guint32 t = (guint32)target;
2181 *(guint32*)code &= ~(0x3fffff);
2182 *(guint32*)code |= (t >> 10);
2183 *(guint32*)(code + 4) &= ~(0x3ff);
2184 *(guint32*)(code + 4) |= (t & 0x3ff);
2186 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
2187 (sparc_inst_imm (ins2))) {
2188 /* sethi followed by jmpl */
2189 guint32 t = (guint32)target;
2190 *(guint32*)code &= ~(0x3fffff);
2191 *(guint32*)code |= (t >> 10);
2192 *(guint32*)(code + 4) &= ~(0x3ff);
2193 *(guint32*)(code + 4) |= (t & 0x3ff);
2198 else if (op == 01) {
2199 sparc_call_simple (code, target - code);
2201 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
2203 g_assert (sparc_is_imm13 (target));
2204 *(guint32*)code &= ~(0x1fff);
2205 *(guint32*)code |= (guint32)target;
2210 // g_print ("patched with 0x%08x\n", ins);
2214 * mono_sparc_emit_save_lmf:
2216 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
2217 * trampolines as well.
2220 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2223 sparc_st_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2224 /* Save previous_lmf */
2225 sparc_ld (code, sparc_o0, sparc_g0, sparc_o7);
2226 sparc_st_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2228 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2229 sparc_st (code, sparc_o7, sparc_o0, sparc_g0);
2235 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2237 /* Load previous_lmf */
2238 sparc_ld_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2240 sparc_ld_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2241 /* *(lmf) = previous_lmf */
2242 sparc_st (code, sparc_l0, sparc_l1, sparc_g0);
2247 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2250 * Since register windows are saved to the current value of %sp, we need to
2251 * set the sp field in the lmf before the call, not in the prolog.
2253 if (cfg->method->save_lmf) {
2254 gint32 lmf_offset = - cfg->arch.lmf_offset;
2257 sparc_st_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2264 emit_vret_token (MonoInst *ins, guint32 *code)
2266 MonoCallInst *call = (MonoCallInst*)ins;
2270 * The sparc ABI requires that calls to functions which return a structure
2271 * contain an additional unimpl instruction which is checked by the callee.
2273 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2274 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2275 size = mono_type_stack_size (call->signature->ret, NULL);
2277 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2278 sparc_unimp (code, size & 0xfff);
2285 emit_move_return_value (MonoInst *ins, guint32 *code)
2287 /* Move return value to the target register */
2288 /* FIXME: do this in the local reg allocator */
2289 switch (ins->opcode) {
2291 case OP_VOIDCALL_REG:
2292 case OP_VOIDCALL_MEMBASE:
2296 case OP_CALL_MEMBASE:
2297 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2301 case OP_LCALL_MEMBASE:
2303 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2306 sparc_mov_reg_reg (code, sparc_o0, ins->dreg + 1);
2307 sparc_mov_reg_reg (code, sparc_o1, ins->dreg);
2311 case OP_FCALL_MEMBASE:
2312 sparc_fmovs (code, sparc_f0, ins->dreg);
2313 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2314 sparc_fstod (code, ins->dreg, ins->dreg);
2316 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2320 case OP_VCALL_MEMBASE:
2330 * emit_load_volatile_arguments:
2332 * Load volatile arguments from the stack to the original input registers.
2333 * Required before a tail call.
2336 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2338 MonoMethod *method = cfg->method;
2339 MonoMethodSignature *sig;
2344 /* FIXME: Generate intermediate code instead */
2346 sig = method->signature;
2348 cinfo = get_call_info (sig, FALSE);
2350 /* This is the opposite of the code in emit_prolog */
2352 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2353 ArgInfo *ainfo = cinfo->args + i;
2354 guint32 stack_offset;
2356 inst = cfg->varinfo [i];
2358 if (sig->hasthis && (i == 0))
2359 arg_type = mono_defaults.object_class;
2361 arg_type = sig->params [i - sig->hasthis];
2363 stack_offset = ainfo->offset + 68;
2364 ireg = sparc_i0 + ainfo->reg;
2366 if (ainfo->storage == ArgInSplitRegStack) {
2367 g_assert (inst->opcode == OP_REGOFFSET);
2368 if (!sparc_is_imm13 (stack_offset))
2370 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2373 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2374 if (ainfo->storage == ArgInIRegPair) {
2375 if (!sparc_is_imm13 (inst->inst_offset + 4))
2377 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2378 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2381 if (ainfo->storage == ArgInSplitRegStack) {
2382 if (stack_offset != inst->inst_offset) {
2383 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2384 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2385 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2390 if (ainfo->storage == ArgOnStackPair) {
2391 if (stack_offset != inst->inst_offset) {
2392 /* stack_offset is not dword aligned, so we need to make a copy */
2393 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2394 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2396 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2397 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2402 g_assert_not_reached ();
2405 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2406 /* Argument in register, but need to be saved to stack */
2407 if (!sparc_is_imm13 (stack_offset))
2409 if (stack_offset & 0x1)
2410 /* FIXME: Is this ldsb or ldub ? */
2411 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2413 if (stack_offset & 0x2)
2414 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2416 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2419 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2420 /* Argument in regpair, but need to be saved to stack */
2421 if (!sparc_is_imm13 (inst->inst_offset + 4))
2423 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2424 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2427 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2428 if (inst->opcode == OP_REGVAR)
2429 /* FIXME: Load the argument into memory */
2439 * mono_sparc_is_virtual_call:
2441 * Determine whenever the instruction at CODE is a virtual call.
2444 mono_sparc_is_virtual_call (guint32 *code)
2451 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2453 * Register indirect call. If it is a virtual call, then the
2454 * instruction in the delay slot is a special kind of nop.
2457 /* Construct special nop */
2458 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2461 if (code [1] == p [0])
2469 * mono_sparc_get_vcall_slot_addr:
2471 * Determine the vtable slot used by a virtual call.
2474 mono_sparc_get_vcall_slot_addr (guint32 *code, guint32 *fp)
2476 guint32 ins = code [0];
2477 guint32 prev_ins = code [-1];
2479 mono_sparc_flushw ();
2481 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2482 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0)) {
2483 /* ld [r1 + CONST ], r2; call r2 */
2484 guint32 base = sparc_inst_rs1 (prev_ins);
2485 guint32 disp = sparc_inst_imm13 (prev_ins);
2488 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2490 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2492 base_val = fp [base - 16];
2494 return (gpointer)((guint8*)base_val + disp);
2497 g_assert_not_reached ();
2500 g_assert_not_reached ();
2506 * Some conventions used in the following code.
2507 * 2) The only scratch registers we have are o7 and g1. We try to
2508 * stick to o7 when we can, and use g1 when necessary.
2512 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2517 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2518 MonoInst *last_ins = NULL;
2521 if (cfg->opt & MONO_OPT_PEEPHOLE)
2522 peephole_pass (cfg, bb);
2524 if (cfg->verbose_level > 2)
2525 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2527 cpos = bb->max_offset;
2529 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2537 offset = (guint8*)code - cfg->native_code;
2539 max_len = ((guint8 *)ins_spec [ins->opcode])[MONO_INST_LEN];
2541 if (offset > (cfg->code_size - max_len - 16)) {
2542 cfg->code_size *= 2;
2543 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2544 code = (guint32*)(cfg->native_code + offset);
2546 code_start = (guint8*)code;
2547 // if (ins->cil_code)
2548 // g_print ("cil code\n");
2550 switch (ins->opcode) {
2551 case OP_STOREI1_MEMBASE_IMM:
2552 EMIT_STORE_MEMBASE_IMM (ins, stb);
2554 case OP_STOREI2_MEMBASE_IMM:
2555 EMIT_STORE_MEMBASE_IMM (ins, sth);
2557 case OP_STORE_MEMBASE_IMM:
2558 case OP_STOREI4_MEMBASE_IMM:
2559 EMIT_STORE_MEMBASE_IMM (ins, st);
2561 case OP_STOREI8_MEMBASE_IMM:
2562 /* Only generated by peephole opts */
2563 g_assert ((ins->inst_offset % 8) == 0);
2564 g_assert (ins->inst_imm == 0);
2565 EMIT_STORE_MEMBASE_IMM (ins, stx);
2567 case OP_STOREI1_MEMBASE_REG:
2568 EMIT_STORE_MEMBASE_REG (ins, stb);
2570 case OP_STOREI2_MEMBASE_REG:
2571 EMIT_STORE_MEMBASE_REG (ins, sth);
2573 case OP_STORE_MEMBASE_REG:
2574 case OP_STOREI4_MEMBASE_REG:
2575 EMIT_STORE_MEMBASE_REG (ins, st);
2577 case OP_STOREI8_MEMBASE_REG:
2578 /* Only used by OP_MEMSET */
2579 EMIT_STORE_MEMBASE_REG (ins, std);
2584 sparc_ld (code, ins->inst_p0, sparc_g0, ins->dreg);
2586 /* The cast IS BAD (maybe). But it needs to be done... */
2588 sparc_set (code, (guint)ins->inst_p0, ins->dreg);
2589 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
2591 case OP_LOAD_MEMBASE:
2592 case OP_LOADI4_MEMBASE:
2593 case OP_LOADU4_MEMBASE:
2594 EMIT_LOAD_MEMBASE (ins, ld);
2596 case OP_LOADU1_MEMBASE:
2597 EMIT_LOAD_MEMBASE (ins, ldub);
2599 case OP_LOADI1_MEMBASE:
2600 EMIT_LOAD_MEMBASE (ins, ldsb);
2602 case OP_LOADU2_MEMBASE:
2603 EMIT_LOAD_MEMBASE (ins, lduh);
2605 case OP_LOADI2_MEMBASE:
2606 EMIT_LOAD_MEMBASE (ins, ldsh);
2609 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
2610 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
2613 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2614 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
2616 /* GCC does this one differently. Don't ask me WHY. */
2618 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
2621 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
2622 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
2625 sparc_cmp (code, ins->sreg1, ins->sreg2);
2627 case OP_COMPARE_IMM:
2628 if (sparc_is_imm13 (ins->inst_imm))
2629 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
2631 sparc_set (code, ins->inst_imm, sparc_o7);
2632 sparc_cmp (code, ins->sreg1, sparc_o7);
2635 case OP_X86_TEST_NULL:
2636 sparc_cmp_imm (code, ins->sreg1, 0);
2640 * gdb does not like encountering 'ta 1' in the debugged code. So
2641 * instead of emitting a trap, we emit a call a C function and place a
2644 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
2645 sparc_call_simple (code, 0);
2649 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2652 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2655 /* according to inssel-long32.brg, this should set cc */
2656 EMIT_ALU_IMM (ins, add, TRUE);
2659 /* according to inssel-long32.brg, this should set cc */
2660 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2663 EMIT_ALU_IMM (ins, addx, TRUE);
2666 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2669 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2672 /* according to inssel-long32.brg, this should set cc */
2673 EMIT_ALU_IMM (ins, sub, TRUE);
2676 /* according to inssel-long32.brg, this should set cc */
2677 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2680 EMIT_ALU_IMM (ins, subx, TRUE);
2683 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2686 EMIT_ALU_IMM (ins, and, FALSE);
2689 /* Sign extend sreg1 into %y */
2690 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2691 sparc_wry (code, sparc_o7, sparc_g0);
2692 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2693 EMIT_COND_SYSTEM_EXCEPTION (code, sparc_boverflow, "ArithmeticException");
2696 sparc_wry (code, sparc_g0, sparc_g0);
2697 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2702 /* Transform division into a shift */
2703 for (i = 1; i < 30; ++i) {
2705 if (ins->inst_imm == imm)
2711 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
2712 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2713 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
2716 /* http://compilers.iecc.com/comparch/article/93-04-079 */
2717 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2718 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
2719 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2720 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
2724 /* Sign extend sreg1 into %y */
2725 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2726 sparc_wry (code, sparc_o7, sparc_g0);
2727 EMIT_ALU_IMM (ins, sdiv, TRUE);
2728 EMIT_COND_SYSTEM_EXCEPTION (code, sparc_boverflow, "ArithmeticException");
2733 /* Sign extend sreg1 into %y */
2734 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2735 sparc_wry (code, sparc_o7, sparc_g0);
2736 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
2737 EMIT_COND_SYSTEM_EXCEPTION (code, sparc_boverflow, "ArithmeticException");
2738 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2739 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2742 sparc_wry (code, sparc_g0, sparc_g0);
2743 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
2744 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
2745 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2748 /* Sign extend sreg1 into %y */
2749 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
2750 sparc_wry (code, sparc_o7, sparc_g0);
2751 if (!sparc_is_imm13 (ins->inst_imm)) {
2752 sparc_set (code, ins->inst_imm, sparc_g1);
2753 sparc_sdiv (code, TRUE, ins->sreg1, sparc_g1, sparc_o7);
2754 EMIT_COND_SYSTEM_EXCEPTION (code, sparc_boverflow, "ArithmeticException");
2755 sparc_smul (code, FALSE, sparc_o7, sparc_g1, sparc_o7);
2758 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
2759 EMIT_COND_SYSTEM_EXCEPTION (code, sparc_boverflow, "ArithmeticException");
2760 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
2762 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
2765 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2768 EMIT_ALU_IMM (ins, or, FALSE);
2771 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2774 EMIT_ALU_IMM (ins, xor, FALSE);
2777 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
2780 if (sparc_is_imm13 (ins->inst_imm))
2781 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2783 sparc_set (code, ins->inst_imm, sparc_o7);
2784 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
2788 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
2791 if (sparc_is_imm13 (ins->inst_imm))
2792 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2794 sparc_set (code, ins->inst_imm, sparc_o7);
2795 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
2799 if (sparc_is_imm13 (ins->inst_imm))
2800 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
2802 sparc_set (code, ins->inst_imm, sparc_o7);
2803 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
2807 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
2810 /* can't use sparc_not */
2811 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
2814 /* can't use sparc_neg */
2815 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
2818 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
2823 /* Transform multiplication into a shift */
2824 for (i = 1; i < 30; ++i) {
2826 if (ins->inst_imm == imm)
2830 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
2832 EMIT_ALU_IMM (ins, smul, FALSE);
2836 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2837 sparc_rdy (code, sparc_g1);
2838 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
2839 sparc_cmp (code, sparc_g1, sparc_o7);
2840 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_bne, "OverflowException");
2842 case CEE_MUL_OVF_UN:
2843 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
2844 sparc_rdy (code, sparc_o7);
2845 sparc_cmp (code, sparc_o7, sparc_g0);
2846 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_bne, "OverflowException");
2850 sparc_set (code, ins->inst_c0, ins->dreg);
2853 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
2854 sparc_set (code, 0xffffff, ins->dreg);
2860 if (ins->sreg1 != ins->dreg)
2861 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
2864 if (cfg->method->save_lmf)
2867 code = emit_load_volatile_arguments (cfg, code);
2868 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
2869 sparc_set (code, 0xffffff, sparc_o7);
2870 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
2871 /* Restore parent frame in delay slot */
2872 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
2875 /* ensure ins->sreg1 is not NULL */
2876 sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
2879 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
2880 sparc_st_imm (code, sparc_o7, ins->sreg1, 0);
2887 call = (MonoCallInst*)ins;
2888 g_assert (!call->virtual);
2889 code = emit_save_sp_to_lmf (cfg, code);
2890 if (ins->flags & MONO_INST_HAS_METHOD)
2891 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD, call->method);
2893 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, call->fptr);
2894 sparc_call_simple (code, 0);
2897 code = emit_vret_token (ins, code);
2898 code = emit_move_return_value (ins, code);
2903 case OP_VOIDCALL_REG:
2905 call = (MonoCallInst*)ins;
2906 code = emit_save_sp_to_lmf (cfg, code);
2907 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
2909 * We emit a special kind of nop in the delay slot to tell the
2910 * trampoline code that this is a virtual call, thus an unbox
2911 * trampoline might need to be called.
2914 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2918 code = emit_vret_token (ins, code);
2919 code = emit_move_return_value (ins, code);
2921 case OP_FCALL_MEMBASE:
2922 case OP_LCALL_MEMBASE:
2923 case OP_VCALL_MEMBASE:
2924 case OP_VOIDCALL_MEMBASE:
2925 case OP_CALL_MEMBASE:
2926 call = (MonoCallInst*)ins;
2927 g_assert (sparc_is_imm13 (ins->inst_offset));
2928 code = emit_save_sp_to_lmf (cfg, code);
2929 sparc_ld_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
2930 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
2932 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
2936 code = emit_vret_token (ins, code);
2937 code = emit_move_return_value (ins, code);
2940 if (cfg->method->signature->ret->type == MONO_TYPE_R4)
2941 sparc_fdtos (code, ins->sreg1, sparc_f0);
2943 sparc_fmovs (code, ins->sreg1, ins->dreg);
2944 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
2948 g_assert_not_reached ();
2951 /* Keep alignment */
2952 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
2953 sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
2954 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
2955 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
2956 /* Keep %sp valid at all times */
2957 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
2958 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
2959 sparc_add_imm (code, FALSE, ins->dreg, cfg->arch.localloc_offset, ins->dreg);
2961 case OP_SPARC_LOCALLOC_IMM: {
2962 guint32 offset = ins->inst_c0;
2963 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
2964 if (sparc_is_imm13 (offset))
2965 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
2967 sparc_set (code, offset, sparc_o7);
2968 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
2970 sparc_mov_reg_reg (code, sparc_sp, ins->dreg);
2971 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
2972 sparc_add_imm (code, FALSE, ins->dreg, cfg->arch.localloc_offset, ins->dreg);
2976 /* The return is done in the epilog */
2977 g_assert_not_reached ();
2980 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_INTERNAL_METHOD,
2981 (gpointer)"mono_arch_throw_exception");
2982 sparc_call_simple (code, 0);
2984 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
2986 case OP_START_HANDLER: {
2988 * The START_HANDLER instruction marks the beginning of a handler
2989 * block. It is called using a call instruction, so %o7 contains
2990 * the return address. Since the handler executes in the same stack
2991 * frame as the method itself, we can't use save/restore to save
2992 * the return address. Instead, we save it into a dedicated
2995 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
2996 if (!sparc_is_imm13 (spvar->inst_offset)) {
2997 sparc_set (code, spvar->inst_offset, sparc_g0);
2998 sparc_st (code, sparc_o7, spvar->inst_basereg, sparc_g0);
3001 sparc_st_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3004 case OP_ENDFILTER: {
3005 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3006 if (!sparc_is_imm13 (spvar->inst_offset)) {
3007 sparc_set (code, spvar->inst_offset, sparc_g0);
3008 sparc_ld (code, spvar->inst_basereg, sparc_g0, sparc_o7);
3011 sparc_ld_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3012 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3014 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3017 case CEE_ENDFINALLY: {
3018 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3019 if (!sparc_is_imm13 (spvar->inst_offset)) {
3020 sparc_set (code, spvar->inst_offset, sparc_g0);
3021 sparc_ld (code, spvar->inst_basereg, sparc_g0, sparc_o7);
3024 sparc_ld_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3025 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3029 case OP_CALL_HANDLER:
3030 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3031 sparc_call_simple (code, 0);
3035 ins->inst_c0 = (guint8*)code - cfg->native_code;
3038 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3039 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3041 if (ins->flags & MONO_INST_BRLABEL) {
3042 if (ins->inst_i0->inst_c0) {
3043 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3044 g_assert (sparc_is_imm22 (disp));
3045 sparc_branch (code, 1, sparc_ba, disp);
3047 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3048 sparc_branch (code, 1, sparc_ba, 0);
3051 if (ins->inst_target_bb->native_offset) {
3052 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3053 g_assert (sparc_is_imm22 (disp));
3054 sparc_branch (code, 1, sparc_ba, disp);
3056 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3057 sparc_branch (code, 1, sparc_ba, 0);
3063 sparc_jmp (code, ins->sreg1, sparc_g0);
3071 //if (cfg->opt & MONO_OPT_CMOV) {
3073 sparc_clr_reg (code, ins->dreg);
3074 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3077 sparc_clr_reg (code, ins->dreg);
3078 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3080 sparc_set (code, 1, ins->dreg);
3083 case OP_COND_EXC_EQ:
3084 case OP_COND_EXC_NE_UN:
3085 case OP_COND_EXC_LT:
3086 case OP_COND_EXC_LT_UN:
3087 case OP_COND_EXC_GT:
3088 case OP_COND_EXC_GT_UN:
3089 case OP_COND_EXC_GE:
3090 case OP_COND_EXC_GE_UN:
3091 case OP_COND_EXC_LE:
3092 case OP_COND_EXC_LE_UN:
3093 case OP_COND_EXC_OV:
3094 case OP_COND_EXC_NO:
3096 case OP_COND_EXC_NC:
3097 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3110 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3112 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3116 /* We misuse the macro arguments */
3117 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3119 case OP_SPARC_BRLEZ:
3120 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3123 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3126 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3129 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3131 case OP_SPARC_BRGEZ:
3132 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3135 /* floating point opcodes */
3137 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3138 sparc_sethi (code, 0, sparc_o7);
3139 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3142 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3143 sparc_sethi (code, 0, sparc_o7);
3144 sparc_ldf_imm (code, sparc_o7, 0, ins->dreg);
3146 /* Extend to double */
3147 sparc_fstod (code, ins->dreg, ins->dreg);
3149 case OP_STORER8_MEMBASE_REG:
3150 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3151 sparc_set (code, ins->inst_offset, sparc_o7);
3152 if (ins->inst_offset % 8) {
3154 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3155 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3156 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3158 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3161 if (ins->inst_offset % 8) {
3163 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3164 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3166 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3169 case OP_LOADR8_MEMBASE:
3170 g_assert ((ins->inst_offset % 8) == 0);
3171 EMIT_LOAD_MEMBASE (ins, lddf);
3173 case OP_STORER4_MEMBASE_REG:
3174 /* This requires a double->single conversion */
3175 sparc_fdtos (code, ins->sreg1, sparc_f0);
3176 if (!sparc_is_imm13 (ins->inst_offset)) {
3177 sparc_set (code, ins->inst_offset, sparc_o7);
3178 sparc_stf (code, sparc_f0, ins->inst_destbasereg, sparc_o7);
3181 sparc_stf_imm (code, sparc_f0, ins->inst_destbasereg, ins->inst_offset);
3183 case OP_LOADR4_MEMBASE:
3184 EMIT_LOAD_MEMBASE (ins, ldf);
3185 /* Extend to double */
3186 sparc_fstod (code, ins->dreg, ins->dreg);
3189 sparc_fmovs (code, ins->sreg1, ins->dreg);
3190 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3193 guint32 offset = mono_spillvar_offset_float (cfg, 0);
3194 if (!sparc_is_imm13 (offset))
3196 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3197 sparc_ldf_imm (code, sparc_sp, offset, sparc_f0);
3198 sparc_fitos (code, sparc_f0, sparc_f0);
3199 sparc_fstod (code, sparc_f0, ins->dreg);
3203 guint32 offset = mono_spillvar_offset_float (cfg, 0);
3204 if (!sparc_is_imm13 (offset))
3206 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3207 sparc_ldf_imm (code, sparc_sp, offset, sparc_f0);
3208 sparc_fitod (code, sparc_f0, ins->dreg);
3211 case OP_FCONV_TO_I1:
3212 case OP_FCONV_TO_U1:
3213 case OP_FCONV_TO_I2:
3214 case OP_FCONV_TO_U2:
3215 case OP_FCONV_TO_I4:
3217 case OP_FCONV_TO_U4:
3218 case OP_FCONV_TO_U: {
3219 guint32 offset = mono_spillvar_offset_float (cfg, 0);
3220 if (!sparc_is_imm13 (offset))
3222 /* FIXME: Is having the same code for all of these ok ? */
3223 sparc_fdtoi (code, ins->sreg1, sparc_f0);
3224 sparc_stdf_imm (code, sparc_f0, sparc_sp, offset);
3225 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3228 case OP_FCONV_TO_I8:
3229 case OP_FCONV_TO_U8:
3231 g_assert_not_reached ();
3235 g_assert_not_reached ();
3237 case OP_LCONV_TO_R_UN: {
3239 g_assert_not_reached ();
3242 case OP_LCONV_TO_OVF_I: {
3243 guint32 *br [3], *label [1];
3246 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3248 sparc_cmp_imm (code, ins->sreg1, 0);
3250 sparc_branch (code, 1, sparc_bneg, 0);
3254 /* ms word must be 0 */
3255 sparc_cmp_imm (code, ins->sreg2, 0);
3257 sparc_branch (code, 1, sparc_be, 0);
3262 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3265 sparc_patch (br [0], code);
3267 /* ms word must 0xfffffff */
3268 sparc_cmp_imm (code, ins->sreg2, -1);
3270 sparc_branch (code, 1, sparc_bne, 0);
3271 sparc_patch (br [2], label [0]);
3274 sparc_patch (br [1], code);
3275 if (ins->sreg1 != ins->dreg)
3276 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3280 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3283 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3286 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3289 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3292 sparc_fnegs (code, ins->sreg1, ins->dreg);
3295 sparc_fdivd (code, ins->sreg1, ins->sreg2, sparc_f0);
3296 sparc_fmuld (code, ins->sreg2, sparc_f0, sparc_f0);
3297 sparc_fsubd (code, ins->sreg1, sparc_f0, ins->dreg);
3300 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3307 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3308 sparc_clr_reg (code, ins->dreg);
3309 switch (ins->opcode) {
3312 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3314 sparc_set (code, 1, ins->dreg);
3315 sparc_fbranch (code, 1, sparc_fbu, 2);
3317 sparc_set (code, 1, ins->dreg);
3320 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3322 sparc_set (code, 1, ins->dreg);
3328 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3331 /* clt.un + brfalse */
3333 sparc_fbranch (code, 1, sparc_fbul, 0);
3336 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3337 sparc_patch ((guint8*)p, (guint8*)code);
3341 /* cgt.un + brfalse */
3343 sparc_fbranch (code, 1, sparc_fbug, 0);
3346 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
3347 sparc_patch ((guint8*)p, (guint8*)code);
3351 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
3352 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3355 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
3356 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3359 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
3360 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3363 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
3364 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3367 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
3368 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
3370 case CEE_CKFINITE: {
3371 guint32 offset = mono_spillvar_offset_float (cfg, 0);
3372 if (!sparc_is_imm13 (offset))
3374 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
3375 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
3376 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
3377 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
3378 sparc_cmp_imm (code, sparc_o7, 2047);
3379 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
3380 sparc_fmovs (code, ins->sreg1, ins->dreg);
3381 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3386 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
3388 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
3390 g_assert_not_reached ();
3393 if ((((guint8*)code) - code_start) > max_len) {
3394 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
3395 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
3396 g_assert_not_reached ();
3406 cfg->code_len = (guint8*)code - cfg->native_code;
3410 mono_arch_register_lowlevel_calls (void)
3412 mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
3413 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
3417 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
3419 MonoJumpInfo *patch_info;
3421 /* FIXME: Move part of this to arch independent code */
3422 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
3423 unsigned char *ip = patch_info->ip.i + code;
3424 const unsigned char *target = NULL;
3426 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
3428 switch (patch_info->type) {
3429 case MONO_PATCH_INFO_CLASS_INIT: {
3430 unsigned char *ip2 = ip;
3431 /* Might already been changed to a nop */
3432 sparc_call_simple (ip2, 0);
3435 case MONO_PATCH_INFO_R4: {
3436 float *f = g_new0 (float, 1);
3437 *f = *(float*)patch_info->data.target;
3441 case MONO_PATCH_INFO_R8: {
3442 double *d = g_new0 (double, 1);
3443 *d = *(double*)patch_info->data.target;
3450 sparc_patch (ip, target);
3455 * Allow tracing to work with this interface (with an optional argument)
3459 * This may be needed on some archs or for debugging support.
3462 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
3464 /* no stack room needed now (may be needed for FASTCALL-trace support) */
3466 /* split prolog-epilog requirements? */
3467 *code = 256; /* max bytes needed: check this number */
3471 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3473 int stack, code_size;
3474 guint32 *code = (guint32*)p;
3476 /* Save registers to stack */
3477 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3478 sparc_st_imm (code, sparc_i1, sparc_fp, 72);
3479 sparc_st_imm (code, sparc_i2, sparc_fp, 76);
3480 sparc_st_imm (code, sparc_i3, sparc_fp, 80);
3481 sparc_st_imm (code, sparc_i4, sparc_fp, 84);
3482 sparc_st_imm (code, sparc_i5, sparc_fp, 88);
3484 sparc_set (code, cfg->method, sparc_o0);
3485 sparc_mov_reg_reg (code, sparc_fp, sparc_o1);
3487 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3488 sparc_sethi (code, 0, sparc_o7);
3489 sparc_jmpl_imm (code, sparc_o7, 0, sparc_callsite);
3492 mono_arch_instrument_mem_needs (cfg->method, &stack, &code_size);
3494 g_assert ((code - (guint32*)p) <= (code_size * 4));
3508 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
3511 int save_mode = SAVE_NONE;
3512 MonoMethod *method = cfg->method;
3513 int rtype = method->signature->ret->type;
3517 case MONO_TYPE_VOID:
3518 /* special case string .ctor icall */
3519 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
3520 save_mode = SAVE_ONE;
3522 save_mode = SAVE_NONE;
3526 save_mode = SAVE_TWO;
3530 save_mode = SAVE_FP;
3532 case MONO_TYPE_VALUETYPE:
3533 if (method->signature->ret->data.klass->enumtype) {
3534 rtype = method->signature->ret->data.klass->enum_basetype->type;
3537 save_mode = SAVE_STRUCT;
3540 save_mode = SAVE_ONE;
3544 /* Save the result to the stack and also put it into the output registers */
3546 switch (save_mode) {
3548 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3549 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
3550 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3551 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
3554 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
3555 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
3558 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
3559 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
3560 sparc_ld_imm (code, sparc_fp, 72, sparc_o2);
3563 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
3570 sparc_set (code, cfg->method, sparc_o0);
3572 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
3573 sparc_sethi (code, 0, sparc_o7);
3574 sparc_jmpl_imm (code, sparc_o7, 0, sparc_callsite);
3577 /* Restore result */
3579 switch (save_mode) {
3581 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3582 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
3585 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
3588 sparc_lddf_imm (code, sparc_fp, 72, sparc_f0);
3599 mono_arch_max_epilog_size (MonoCompile *cfg)
3601 int exc_count = 0, max_epilog_size = 16 + 20*4;
3602 MonoJumpInfo *patch_info;
3604 if (cfg->method->save_lmf)
3605 max_epilog_size += 128;
3607 if (mono_jit_trace_calls != NULL)
3608 max_epilog_size += 50;
3610 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
3611 max_epilog_size += 50;
3613 /* count the number of exception infos */
3615 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3616 if (patch_info->type == MONO_PATCH_INFO_EXC)
3621 * make sure we have enough space for exceptions
3623 max_epilog_size += exc_count * 24;
3625 return max_epilog_size;
3629 mono_arch_emit_prolog (MonoCompile *cfg)
3631 MonoMethod *method = cfg->method;
3632 MonoMethodSignature *sig;
3638 cfg->code_size = 256;
3639 code = cfg->native_code = g_malloc (cfg->code_size);
3641 /* FIXME: Generate intermediate code instead */
3643 offset = cfg->stack_offset;
3644 offset += 64; /* register save area */
3645 offset += 4; /* struct/union return pointer */
3647 /* add parameter area size for called functions */
3648 if (cfg->param_area < 24)
3649 /* Reserve space for the first 6 arguments even if it is unused */
3652 offset += cfg->param_area;
3654 /* align the stack size to 8 bytes */
3655 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3658 * localloc'd memory is stored between the local variables (whose
3659 * size is given by cfg->stack_offset), and between the space reserved
3662 cfg->arch.localloc_offset = offset - cfg->stack_offset;
3664 cfg->stack_offset = offset;
3666 if (!sparc_is_imm13 (- cfg->stack_offset)) {
3667 /* Can't use sparc_o7 here, since we're still in the caller's frame */
3668 sparc_set (code, (- cfg->stack_offset), sparc_g1);
3669 sparc_save (code, sparc_sp, sparc_g1, sparc_sp);
3672 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
3674 if (strstr (cfg->method->name, "test_marshal_struct")) {
3675 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
3676 sparc_call_simple (code, 0);
3680 sig = method->signature;
3682 cinfo = get_call_info (sig, FALSE);
3684 /* Keep in sync with emit_load_volatile_arguments */
3685 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
3686 ArgInfo *ainfo = cinfo->args + i;
3687 guint32 stack_offset;
3689 inst = cfg->varinfo [i];
3691 if (sig->hasthis && (i == 0))
3692 arg_type = mono_defaults.object_class;
3694 arg_type = sig->params [i - sig->hasthis];
3696 stack_offset = ainfo->offset + 68;
3698 /* Save the split arguments so they will reside entirely on the stack */
3699 if (ainfo->storage == ArgInSplitRegStack) {
3700 /* Save the register to the stack */
3701 g_assert (inst->opcode == OP_REGOFFSET);
3702 if (!sparc_is_imm13 (stack_offset))
3704 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
3707 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
3708 /* Save the argument to a dword aligned stack location */
3710 * stack_offset contains the offset of the argument on the stack.
3711 * inst->inst_offset contains the dword aligned offset where the value
3714 if (ainfo->storage == ArgInIRegPair) {
3715 if (!sparc_is_imm13 (inst->inst_offset + 4))
3717 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3718 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3721 if (ainfo->storage == ArgInSplitRegStack) {
3722 if (stack_offset != inst->inst_offset) {
3723 /* stack_offset is not dword aligned, so we need to make a copy */
3724 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
3725 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3726 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3730 if (ainfo->storage == ArgOnStackPair) {
3731 if (stack_offset != inst->inst_offset) {
3732 /* stack_offset is not dword aligned, so we need to make a copy */
3733 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
3734 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
3735 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
3736 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
3740 g_assert_not_reached ();
3743 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
3744 /* Argument in register, but need to be saved to stack */
3745 if (!sparc_is_imm13 (stack_offset))
3747 if (stack_offset & 0x1)
3748 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3750 if (stack_offset & 0x2)
3751 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3753 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
3756 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
3757 /* Argument in regpair, but need to be saved to stack */
3758 if (!sparc_is_imm13 (inst->inst_offset + 4))
3760 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
3761 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
3764 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
3765 if (inst->opcode == OP_REGVAR)
3766 /* FIXME: Load the argument into memory */
3772 if (cfg->method->save_lmf) {
3773 gint32 lmf_offset = - cfg->arch.lmf_offset;
3776 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
3777 sparc_set (code, 0xfffffff, sparc_o7);
3778 sparc_st_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
3780 sparc_st_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
3782 sparc_st_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
3784 /* FIXME: add a relocation for this */
3785 sparc_set (code, cfg->method, sparc_o7);
3786 sparc_st_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
3788 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3789 (gpointer)"mono_arch_get_lmf_addr");
3790 sparc_call_simple (code, 0);
3793 code = (guint32*)mono_sparc_emit_save_lmf ((guint32*)code, lmf_offset);
3796 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3797 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
3799 cfg->code_len = code - cfg->native_code;
3801 g_assert (cfg->code_len <= cfg->code_size);
3807 mono_arch_emit_epilog (MonoCompile *cfg)
3809 MonoJumpInfo *patch_info;
3810 MonoMethod *method = cfg->method;
3813 code = cfg->native_code + cfg->code_len;
3815 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
3816 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
3818 if (cfg->method->save_lmf) {
3819 gint32 lmf_offset = - cfg->arch.lmf_offset;
3821 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
3825 * The sparc ABI requires that calls to functions which return a structure
3828 if (cfg->method->signature->pinvoke && MONO_TYPE_ISSTRUCT(cfg->method->signature->ret))
3829 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
3832 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3834 /* add code to raise exceptions */
3835 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
3836 switch (patch_info->type) {
3837 case MONO_PATCH_INFO_EXC:
3838 sparc_patch (cfg->native_code + patch_info->ip.i, code);
3839 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
3840 sparc_set (code, 0xffffff, sparc_o0);
3841 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_REL, (gpointer)patch_info->ip.i);
3842 sparc_set (code, 0xffffff, sparc_o1);
3843 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
3844 patch_info->data.name = "mono_arch_throw_exception_by_name";
3845 patch_info->ip.i = code - cfg->native_code;
3846 sparc_call_simple (code, 0);
3855 cfg->code_len = code - cfg->native_code;
3857 g_assert (cfg->code_len < cfg->code_size);
3861 gboolean lmf_addr_key_inited = FALSE;
3863 #ifdef MONO_SPARC_THR_TLS
3864 thread_key_t lmf_addr_key;
3866 pthread_key_t lmf_addr_key;
3870 mono_arch_get_lmf_addr (void)
3872 /* This is perf critical so we bypass the IO layer */
3873 /* The thr_... functions seem to be somewhat faster */
3874 #ifdef MONO_SPARC_THR_TLS
3876 thr_getspecific (lmf_addr_key, &res);
3879 return pthread_getspecific (lmf_addr_key);
3884 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
3886 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
3888 struct sigaltstack sa;
3893 printf ("SIGALT!\n");
3894 /* Setup an alternate signal stack */
3895 tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
3896 tls->signal_stack_size = SIGNAL_STACK_SIZE;
3899 sa.ss_sp = tls->signal_stack;
3900 sa.ss_size = SIGNAL_STACK_SIZE;
3902 g_assert (sigaltstack (&sa, NULL) == 0);
3904 sigstk.ss_sp = tls->signal_stack;
3905 sigstk.ss_size = SIGNAL_STACK_SIZE;
3906 sigstk.ss_flags = 0;
3907 g_assert (sigaltstack (&sigstk, NULL) == 0);
3911 if (!lmf_addr_key_inited) {
3914 lmf_addr_key_inited = TRUE;
3916 #ifdef MONO_SPARC_THR_TLS
3917 res = thr_keycreate (&lmf_addr_key, NULL);
3919 res = pthread_key_create (&lmf_addr_key, NULL);
3921 g_assert (res == 0);
3925 #ifdef MONO_SPARC_THR_TLS
3926 thr_setspecific (lmf_addr_key, &tls->lmf);
3928 pthread_setspecific (lmf_addr_key, &tls->lmf);
3933 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
3938 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
3940 /* add the this argument */
3941 if (this_reg != -1) {
3943 MONO_INST_NEW (cfg, this, OP_SETREG);
3944 this->type = this_type;
3945 this->sreg1 = this_reg;
3946 this->dreg = sparc_o0;
3947 mono_bblock_add_inst (cfg->cbb, this);
3951 /* Set the 'struct/union return pointer' location on the stack */
3952 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
3958 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
3964 * mono_arch_get_argument_info:
3965 * @csig: a method signature
3966 * @param_count: the number of parameters to consider
3967 * @arg_info: an array to store the result infos
3969 * Gathers information on parameters such as size, alignment and
3970 * padding. arg_info should be large enought to hold param_count + 1 entries.
3972 * Returns the size of the activation frame.
3975 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
3981 cinfo = get_call_info (csig, FALSE);
3983 if (csig->hasthis) {
3984 ainfo = &cinfo->args [0];
3985 arg_info [0].offset = 68 + ainfo->offset;
3988 for (k = 0; k < param_count; k++) {
3989 ainfo = &cinfo->args [k + csig->hasthis];
3991 arg_info [k + 1].offset = 68 + ainfo->offset;
3992 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4002 mono_arch_print_tree (MonoInst *tree, int arity)