2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/utils/mono-math.h>
29 #include "mini-sparc.h"
32 #include "cpu-sparc.h"
35 * Sparc V9 means two things:
36 * - the instruction set
39 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
40 * processors in use are 64 bit processors. The V9 ABI is only usable if the
41 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
42 * instructions without using the 64 bit ABI.
47 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
48 * code. Unused input registers are used for global register allocation.
49 * - %l0..%l7 is used for local register allocation
50 * - %o0..%o6 is used for outgoing arguments
51 * - %o7 and %g1 is used as scratch registers in opcodes
52 * - all floating point registers are used for local register allocation except %f0.
53 * Only double precision registers are used.
55 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
56 * used for local allocation.
61 * - doubles and longs must be stored in dword aligned locations
65 * The following things are not implemented or do not work:
66 * - some fp arithmetic corner cases
67 * The following tests in mono/mini are expected to fail:
68 * - test_0_simple_double_casts
69 * This test casts (guint64)-1 to double and then back to guint64 again.
70 * Under x86, it returns 0, while under sparc it returns -1.
72 * In addition to this, the runtime requires the trunc function, or its
73 * solaris counterpart, aintl, to do some double->int conversions. If this
74 * function is not available, it is emulated somewhat, but the results can be
80 * - optimize sparc_set according to the memory model
81 * - when non-AOT compiling, compute patch targets immediately so we don't
82 * have to emit the 6 byte template.
84 * - struct arguments/returns
89 * - sparc_call_simple can't be used in a lot of places since the displacement
90 * might not fit into an imm30.
91 * - g1 can't be used in a lot of places since it is used as a scratch reg in
93 * - sparc_f0 can't be used as a scratch register on V9
94 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
96 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
97 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
98 * be a double precision register which has no single precision part.
99 * - passing/returning structs is hard to implement, because:
100 * - the spec is very hard to understand
101 * - it requires knowledge about the fields of structure, needs to handle
102 * nested structures etc.
106 * Possible optimizations:
107 * - delay slot scheduling
108 * - allocate large constants to registers
109 * - use %o registers for local allocation
110 * - implement unwinding through native frames
111 * - add more mul/div/rem optimizations
115 #define MONO_SPARC_THR_TLS 1
119 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
120 * causing infinite loops in dominator computation. So glib-2.4 is required.
123 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
124 #error "glib 2.4 or later is required for 64 bit mode."
128 #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
130 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
132 #define SIGNAL_STACK_SIZE (64 * 1024)
134 #define STACK_BIAS MONO_SPARC_STACK_BIAS
138 /* %g1 is used by sparc_set */
139 #define GP_SCRATCH_REG sparc_g4
140 /* %f0 is used for parameter passing */
141 #define FP_SCRATCH_REG sparc_f30
142 #define ARGS_OFFSET (STACK_BIAS + 128)
146 #define FP_SCRATCH_REG sparc_f0
147 #define ARGS_OFFSET 68
148 #define GP_SCRATCH_REG sparc_g1
152 /* Whenever the CPU supports v9 instructions */
153 static gboolean sparcv9 = FALSE;
155 /* Whenever this is a 64bit executable */
157 static gboolean v64 = TRUE;
159 static gboolean v64 = FALSE;
162 static gpointer mono_arch_get_lmf_addr (void);
165 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
168 mono_arch_regname (int reg) {
169 static const char * rnames[] = {
170 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
171 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
172 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
173 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
174 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
175 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
176 "sparc_fp", "sparc_retadr"
178 if (reg >= 0 && reg < 32)
184 * Initialize the cpu to execute managed code.
187 mono_arch_cpu_init (void)
190 /* make sure sparcv9 is initialized for embedded use */
191 mono_arch_cpu_optimizazions(&dummy);
195 * This function returns the optimizations supported on this cpu.
198 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
206 if (!sysinfo (SI_ISALIST, buf, 1024))
207 g_assert_not_reached ();
209 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
210 * (in)directly implies that we're a v9 or better.
211 * Improvements to this are greatly accepted...
212 * Also, we don't differentiate between v7 and v8. I sense SIGILL
213 * sniffing in my future.
215 if (getpagesize() == 8192)
216 strcpy (buf, "sparcv9");
218 strcpy (buf, "sparcv8");
222 * On some processors, the cmov instructions are even slower than the
225 if (strstr (buf, "sparcv9")) {
226 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
230 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
236 mono_sparc_break (void)
241 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
242 #else /* assume Sun's compiler */
243 static void flushi(void *addr)
250 void sync_instruction_memory(caddr_t addr, int len);
254 mono_arch_flush_icache (guint8 *code, gint size)
257 /* Hopefully this is optimized based on the actual CPU */
258 sync_instruction_memory (code, size);
260 guint64 *p = (guint64*)code;
261 guint64 *end = (guint64*)(code + ((size + 8) /8));
264 * FIXME: Flushing code in dword chunks in _slow_.
268 __asm__ __volatile__ ("iflush %0"::"r"(p++));
278 * Flush all register windows to memory. Every register window is saved to
279 * a 16 word area on the stack pointed to by its %sp register.
282 mono_sparc_flushw (void)
284 static guint32 start [64];
285 static int inited = 0;
287 static void (*flushw) (void);
292 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
295 sparc_restore_simple (code);
297 g_assert ((code - start) < 64);
299 flushw = (gpointer)start;
308 mono_arch_flush_register_windows (void)
310 mono_sparc_flushw ();
314 mono_arch_is_inst_imm (gint64 imm)
316 return sparc_is_imm13 (imm);
320 mono_sparc_is_v9 (void) {
325 mono_sparc_is_sparc64 (void) {
337 ArgInFloatReg, /* V9 only */
338 ArgInDoubleReg /* V9 only */
343 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
346 guint32 vt_offset; /* for valuetypes */
364 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
366 ainfo->offset = *stack_size;
369 if (*gr >= PARAM_REGS) {
370 ainfo->storage = ArgOnStack;
373 ainfo->storage = ArgInIReg;
378 /* Allways reserve stack space for parameters passed in registers */
379 (*stack_size) += sizeof (gpointer);
382 if (*gr < PARAM_REGS - 1) {
383 /* A pair of registers */
384 ainfo->storage = ArgInIRegPair;
388 else if (*gr >= PARAM_REGS) {
389 /* A pair of stack locations */
390 ainfo->storage = ArgOnStackPair;
393 ainfo->storage = ArgInSplitRegStack;
398 (*stack_size) += 2 * sizeof (gpointer);
404 #define FLOAT_PARAM_REGS 32
407 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
409 ainfo->offset = *stack_size;
412 if (*gr >= FLOAT_PARAM_REGS) {
413 ainfo->storage = ArgOnStack;
416 /* A single is passed in an even numbered fp register */
417 ainfo->storage = ArgInFloatReg;
418 ainfo->reg = *gr + 1;
423 if (*gr < FLOAT_PARAM_REGS) {
424 /* A double register */
425 ainfo->storage = ArgInDoubleReg;
430 ainfo->storage = ArgOnStack;
434 (*stack_size) += sizeof (gpointer);
442 * Obtain information about a call according to the calling convention.
443 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
444 * document for more information.
445 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
446 * the 'Sparc Compliance Definition 2.4' document.
449 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
451 guint32 i, gr, fr, simpletype;
452 int n = sig->hasthis + sig->param_count;
453 guint32 stack_size = 0;
456 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
462 if (((sig->ret->type == MONO_TYPE_VALUETYPE) && !sig->ret->data.klass->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
463 /* The address of the return value is passed in %o0 */
464 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
465 cinfo->ret.reg += sparc_i0;
471 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
473 for (i = 0; i < sig->param_count; ++i) {
474 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
476 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
477 /* Emit the signature cookie just before the implicit arguments */
478 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
479 /* Prevent implicit arguments from being passed in registers */
483 DEBUG(printf("param %d: ", i));
484 if (sig->params [i]->byref) {
485 DEBUG(printf("byref\n"));
487 add_general (&gr, &stack_size, ainfo, FALSE);
490 simpletype = sig->params [i]->type;
492 switch (simpletype) {
493 case MONO_TYPE_BOOLEAN:
496 add_general (&gr, &stack_size, ainfo, FALSE);
497 /* the value is in the ls byte */
498 ainfo->offset += sizeof (gpointer) - 1;
503 add_general (&gr, &stack_size, ainfo, FALSE);
504 /* the value is in the ls word */
505 ainfo->offset += sizeof (gpointer) - 2;
509 add_general (&gr, &stack_size, ainfo, FALSE);
510 /* the value is in the ls dword */
511 ainfo->offset += sizeof (gpointer) - 4;
516 case MONO_TYPE_CLASS:
517 case MONO_TYPE_OBJECT:
518 case MONO_TYPE_STRING:
519 case MONO_TYPE_SZARRAY:
520 case MONO_TYPE_ARRAY:
521 add_general (&gr, &stack_size, ainfo, FALSE);
523 case MONO_TYPE_VALUETYPE:
524 if (sig->params [i]->data.klass->enumtype) {
525 simpletype = sig->params [i]->data.klass->enum_basetype->type;
533 add_general (&gr, &stack_size, ainfo, FALSE);
535 case MONO_TYPE_TYPEDBYREF:
536 add_general (&gr, &stack_size, ainfo, FALSE);
541 add_general (&gr, &stack_size, ainfo, FALSE);
543 add_general (&gr, &stack_size, ainfo, TRUE);
548 add_float (&fr, &stack_size, ainfo, TRUE);
551 /* single precision values are passed in integer registers */
552 add_general (&gr, &stack_size, ainfo, FALSE);
557 add_float (&fr, &stack_size, ainfo, FALSE);
560 /* double precision values are passed in a pair of registers */
561 add_general (&gr, &stack_size, ainfo, TRUE);
565 g_assert_not_reached ();
571 simpletype = sig->ret->type;
573 switch (simpletype) {
574 case MONO_TYPE_BOOLEAN:
585 case MONO_TYPE_CLASS:
586 case MONO_TYPE_OBJECT:
587 case MONO_TYPE_SZARRAY:
588 case MONO_TYPE_ARRAY:
589 case MONO_TYPE_STRING:
590 cinfo->ret.storage = ArgInIReg;
591 cinfo->ret.reg = sparc_i0;
598 cinfo->ret.storage = ArgInIReg;
599 cinfo->ret.reg = sparc_i0;
603 cinfo->ret.storage = ArgInIRegPair;
604 cinfo->ret.reg = sparc_i0;
611 cinfo->ret.storage = ArgInFReg;
612 cinfo->ret.reg = sparc_f0;
614 case MONO_TYPE_VALUETYPE:
615 if (sig->ret->data.klass->enumtype) {
616 simpletype = sig->ret->data.klass->enum_basetype->type;
627 cinfo->ret.storage = ArgOnStack;
629 case MONO_TYPE_TYPEDBYREF:
632 /* Same as a valuetype with size 24 */
639 cinfo->ret.storage = ArgOnStack;
644 g_error ("Can't handle as return value 0x%x", sig->ret->type);
648 cinfo->stack_usage = stack_size;
649 cinfo->reg_usage = gr;
654 is_regsize_var (MonoType *t) {
658 case MONO_TYPE_BOOLEAN:
669 case MONO_TYPE_OBJECT:
670 case MONO_TYPE_STRING:
671 case MONO_TYPE_CLASS:
672 case MONO_TYPE_SZARRAY:
673 case MONO_TYPE_ARRAY:
675 case MONO_TYPE_VALUETYPE:
676 if (t->data.klass->enumtype)
677 return is_regsize_var (t->data.klass->enum_basetype);
689 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
695 * FIXME: If an argument is allocated to a register, then load it from the
696 * stack in the prolog.
699 for (i = 0; i < cfg->num_varinfo; i++) {
700 MonoInst *ins = cfg->varinfo [i];
701 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
704 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
707 /* FIXME: Make arguments on stack allocateable to registers */
708 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
711 if (is_regsize_var (ins->inst_vtype)) {
712 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
713 g_assert (i == vmv->idx);
715 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
723 mono_arch_get_global_int_regs (MonoCompile *cfg)
727 MonoMethodSignature *sig;
730 sig = cfg->method->signature;
732 cinfo = get_call_info (sig, FALSE);
734 /* Use unused input registers */
735 for (i = cinfo->reg_usage; i < 6; ++i)
736 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
738 /* Use %l0..%l3 as global registers */
739 for (i = sparc_l0; i < sparc_l4; ++i)
740 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
748 * mono_arch_regalloc_cost:
750 * Return the cost, in number of memory references, of the action of
751 * allocating the variable VMV into a register during global register
755 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
761 * Set var information according to the calling convention. sparc version.
762 * The locals var stuff should most likely be split in another method.
765 mono_arch_allocate_vars (MonoCompile *m)
767 MonoMethodSignature *sig;
768 MonoMethodHeader *header;
770 int i, offset, size, align, curinst;
773 header = ((MonoMethodNormal *)m->method)->header;
775 sig = m->method->signature;
777 cinfo = get_call_info (sig, FALSE);
779 if (sig->ret->type != MONO_TYPE_VOID) {
780 switch (cinfo->ret.storage) {
784 m->ret->opcode = OP_REGVAR;
785 m->ret->inst_c0 = cinfo->ret.reg;
789 g_assert_not_reached ();
792 m->ret->opcode = OP_REGOFFSET;
793 m->ret->inst_basereg = sparc_fp;
794 m->ret->inst_offset = 64;
800 m->ret->dreg = m->ret->inst_c0;
804 * We use the ABI calling conventions for managed code as well.
805 * Exception: valuetypes are never returned in registers on V9.
806 * FIXME: Use something more optimized.
809 /* Locals are allocated backwards from %fp */
810 m->frame_reg = sparc_fp;
814 * Reserve a stack slot for holding information used during exception
817 if (header->num_clauses)
818 offset += sizeof (gpointer) * 2;
820 if (m->method->save_lmf) {
821 offset += sizeof (MonoLMF);
822 m->arch.lmf_offset = offset;
825 curinst = m->locals_start;
826 for (i = curinst; i < m->num_varinfo; ++i) {
827 inst = m->varinfo [i];
829 if (inst->opcode == OP_REGVAR) {
830 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
834 /* inst->unused indicates native sized value types, this is used by the
835 * pinvoke wrappers when they call functions returning structure */
836 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
837 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
839 size = mono_type_stack_size (inst->inst_vtype, &align);
842 * This is needed since structures containing doubles must be doubleword
844 * FIXME: Do this only if needed.
846 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
850 * variables are accessed as negative offsets from %fp, so increase
851 * the offset before assigning it to a variable
856 offset &= ~(align - 1);
857 inst->opcode = OP_REGOFFSET;
858 inst->inst_basereg = sparc_fp;
859 inst->inst_offset = STACK_BIAS + -offset;
861 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
864 if (sig->call_convention == MONO_CALL_VARARG) {
865 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
868 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
869 inst = m->varinfo [i];
870 if (inst->opcode != OP_REGVAR) {
871 ArgInfo *ainfo = &cinfo->args [i];
872 gboolean inreg = TRUE;
876 if (sig->hasthis && (i == 0))
877 arg_type = &mono_defaults.object_class->byval_arg;
879 arg_type = sig->params [i - sig->hasthis];
882 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
883 || (arg_type->type == MONO_TYPE_R8)))
885 * Since float arguments are passed in integer registers, we need to
886 * save them to the stack in the prolog.
891 /* FIXME: Allocate volatile arguments to registers */
892 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
895 if (MONO_TYPE_ISSTRUCT (arg_type))
896 /* FIXME: this isn't needed */
899 inst->opcode = OP_REGOFFSET;
902 storage = ArgOnStack;
904 storage = ainfo->storage;
909 inst->opcode = OP_REGVAR;
910 inst->dreg = sparc_i0 + ainfo->reg;
915 * Since float regs are volatile, we save the arguments to
916 * the stack in the prolog.
917 * FIXME: Avoid this if the method contains no calls.
921 case ArgInSplitRegStack:
922 /* Split arguments are saved to the stack in the prolog */
923 inst->opcode = OP_REGOFFSET;
924 /* in parent frame */
925 inst->inst_basereg = sparc_fp;
926 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
928 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
930 * It is very hard to load doubles from non-doubleword aligned
931 * memory locations. So if the offset is misaligned, we copy the
932 * argument to a stack location in the prolog.
934 if ((inst->inst_offset - STACK_BIAS) % 8) {
935 inst->inst_basereg = sparc_fp;
939 offset &= ~(align - 1);
940 inst->inst_offset = STACK_BIAS + -offset;
949 if (MONO_TYPE_ISSTRUCT (arg_type)) {
950 /* Add a level of indirection */
952 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
953 * are destructively modified in a lot of places in inssel.brg.
956 MONO_INST_NEW (m, indir, 0);
958 inst->opcode = OP_SPARC_INARG_VT;
959 inst->inst_left = indir;
965 * spillvars are stored between the normal locals and the storage reserved
969 m->stack_offset = offset;
971 /* Add a properly aligned dword for use by int<->float conversion opcodes */
973 mono_spillvar_offset_float (m, 0);
979 * take the arguments and generate the arch-specific
980 * instructions to properly call the function in call.
981 * This includes pushing, moving arguments to the right register
985 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
987 MonoMethodSignature *sig;
991 guint32 extra_space = 0;
993 sig = call->signature;
994 n = sig->param_count + sig->hasthis;
996 cinfo = get_call_info (sig, sig->pinvoke);
998 for (i = 0; i < n; ++i) {
999 ainfo = cinfo->args + i;
1000 if (is_virtual && i == 0) {
1001 /* the argument will be attached to the call instruction */
1002 in = call->args [i];
1004 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1005 /* FIXME: Test varargs with 0 implicit args */
1006 /* FIXME: Test interaction with hasthis */
1007 /* Emit the signature cookie just before the first implicit argument */
1009 /* FIXME: Add support for signature tokens to AOT */
1010 cfg->disable_aot = TRUE;
1011 /* We allways pass the signature on the stack for simplicity */
1012 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
1013 arg->inst_basereg = sparc_sp;
1014 arg->inst_imm = ARGS_OFFSET + cinfo->sig_cookie.offset;
1015 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1016 sig_arg->inst_p0 = call->signature;
1017 arg->inst_left = sig_arg;
1018 arg->type = STACK_PTR;
1019 /* prepend, so they get reversed */
1020 arg->next = call->out_args;
1021 call->out_args = arg;
1024 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1025 in = call->args [i];
1026 arg->cil_code = in->cil_code;
1027 arg->inst_left = in;
1028 arg->type = in->type;
1029 /* prepend, we'll need to reverse them later */
1030 arg->next = call->out_args;
1031 call->out_args = arg;
1033 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1036 guint32 offset, pad;
1044 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1045 size = sizeof (MonoTypedRef);
1046 align = sizeof (gpointer);
1050 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1052 size = mono_type_stack_size (&in->klass->byval_arg, &align);
1055 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1056 * use the normal OUTARG opcodes to pass the address of the location to
1059 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1060 inst->inst_left = in;
1062 /* The first 6 argument locations are reserved */
1063 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1064 cinfo->stack_usage = 6 * sizeof (gpointer);
1066 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1067 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1069 inst->inst_c1 = STACK_BIAS + offset;
1070 inst->unused = size;
1071 arg->inst_left = inst;
1073 cinfo->stack_usage += size;
1074 cinfo->stack_usage += pad;
1077 switch (ainfo->storage) {
1081 if (ainfo->storage == ArgInIRegPair)
1082 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1083 arg->unused = sparc_o0 + ainfo->reg;
1084 call->used_iregs |= 1 << ainfo->reg;
1086 if ((i >= sig->hasthis) && (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)) {
1088 * The OUTARG (freg) implementation needs an extra dword to store
1089 * the temporary value.
1095 arg->opcode = OP_SPARC_OUTARG_MEM;
1097 case ArgOnStackPair:
1098 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1100 case ArgInSplitRegStack:
1101 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1102 arg->unused = sparc_o0 + ainfo->reg;
1103 call->used_iregs |= 1 << ainfo->reg;
1106 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1107 arg->unused = sparc_f0 + ainfo->reg;
1109 case ArgInDoubleReg:
1110 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1111 arg->unused = sparc_f0 + ainfo->reg;
1117 arg->inst_basereg = sparc_sp;
1118 arg->inst_imm = ARGS_OFFSET + ainfo->offset;
1123 * Reverse the call->out_args list.
1126 MonoInst *prev = NULL, *list = call->out_args, *next;
1133 call->out_args = prev;
1135 call->stack_usage = cinfo->stack_usage + extra_space;
1136 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1137 cfg->flags |= MONO_CFG_HAS_CALLS;
1143 /* Map opcode to the sparc condition codes */
1144 static inline SparcCond
1145 opcode_to_sparc_cond (int opcode)
1167 case OP_COND_EXC_EQ:
1170 case OP_COND_EXC_NE_UN:
1177 case OP_COND_EXC_LT:
1183 case OP_COND_EXC_LT_UN:
1189 case OP_COND_EXC_GT:
1195 case OP_COND_EXC_GT_UN:
1199 case OP_COND_EXC_GE:
1203 case OP_COND_EXC_GE_UN:
1207 case OP_COND_EXC_LE:
1211 case OP_COND_EXC_LE_UN:
1213 case OP_COND_EXC_OV:
1214 case OP_COND_EXC_IOV:
1217 case OP_COND_EXC_IC:
1219 case OP_COND_EXC_NO:
1220 case OP_COND_EXC_NC:
1223 g_assert_not_reached ();
1228 #define COMPUTE_DISP(ins) \
1229 if (ins->flags & MONO_INST_BRLABEL) { \
1230 if (ins->inst_i0->inst_c0) \
1231 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1234 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1237 if (ins->inst_true_bb->native_offset) \
1238 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1241 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1246 #define DEFAULT_ICC sparc_xcc_short
1248 #define DEFAULT_ICC sparc_icc_short
1252 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1256 COMPUTE_DISP(ins); \
1257 predict = (disp != 0) ? 1 : 0; \
1258 g_assert (sparc_is_imm19 (disp)); \
1259 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1260 if (filldelay) sparc_nop (code); \
1262 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1263 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1267 COMPUTE_DISP(ins); \
1268 predict = (disp != 0) ? 1 : 0; \
1269 g_assert (sparc_is_imm19 (disp)); \
1270 sparc_fbranch (code, (annul), cond, disp); \
1271 if (filldelay) sparc_nop (code); \
1274 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1275 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1278 COMPUTE_DISP(ins); \
1279 g_assert (sparc_is_imm22 (disp)); \
1280 sparc_ ## bop (code, (annul), cond, disp); \
1281 if (filldelay) sparc_nop (code); \
1283 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1284 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1287 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1291 COMPUTE_DISP(ins); \
1292 predict = (disp != 0) ? 1 : 0; \
1293 g_assert (sparc_is_imm19 (disp)); \
1294 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1295 if (filldelay) sparc_nop (code); \
1298 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1301 COMPUTE_DISP(ins); \
1302 g_assert (sparc_is_imm22 (disp)); \
1303 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1304 if (filldelay) sparc_nop (code); \
1307 /* emit an exception if condition is fail */
1309 * We put the exception throwing code out-of-line, at the end of the method
1311 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1312 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1313 MONO_PATCH_INFO_EXC, sexc_name); \
1315 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1318 sparc_branch (code, 0, cond, 0); \
1320 if (filldelay) sparc_nop (code); \
1323 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1325 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1326 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1327 MONO_PATCH_INFO_EXC, sexc_name); \
1328 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1332 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1333 if (sparc_is_imm13 ((ins)->inst_imm)) \
1334 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1336 sparc_set (code, ins->inst_imm, sparc_o7); \
1337 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1341 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1342 if (sparc_is_imm13 (ins->inst_offset)) \
1343 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1345 sparc_set (code, ins->inst_offset, sparc_o7); \
1346 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1351 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1353 if (ins->inst_imm == 0) \
1356 sparc_set (code, ins->inst_imm, sparc_o7); \
1359 if (!sparc_is_imm13 (ins->inst_offset)) { \
1360 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1361 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1364 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1367 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1368 if (!sparc_is_imm13 (ins->inst_offset)) { \
1369 sparc_set (code, ins->inst_offset, sparc_o7); \
1370 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1373 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1376 #define EMIT_CALL() do { \
1378 sparc_set_template (code, sparc_o7); \
1379 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1382 sparc_call_simple (code, 0); \
1387 extern gboolean mono_compile_aot;
1390 * A call template is 7 instructions long, so we want to avoid it if possible.
1393 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1397 /* FIXME: This only works if the target method is already compiled */
1398 if (0 && v64 && !mono_compile_aot) {
1399 MonoJumpInfo patch_info;
1401 patch_info.type = patch_type;
1402 patch_info.data.target = data;
1404 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1406 /* FIXME: Add optimizations if the target is close enough */
1407 sparc_set (code, target, sparc_o7);
1408 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1412 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1420 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1422 MonoInst *ins, *last_ins = NULL;
1427 switch (ins->opcode) {
1429 /* remove unnecessary multiplication with 1 */
1430 if (ins->inst_imm == 1) {
1431 if (ins->dreg != ins->sreg1) {
1432 ins->opcode = OP_MOVE;
1434 last_ins->next = ins->next;
1441 case OP_LOAD_MEMBASE:
1442 case OP_LOADI4_MEMBASE:
1444 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1445 * OP_LOAD_MEMBASE offset(basereg), reg
1447 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1448 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1449 ins->inst_basereg == last_ins->inst_destbasereg &&
1450 ins->inst_offset == last_ins->inst_offset) {
1451 if (ins->dreg == last_ins->sreg1) {
1452 last_ins->next = ins->next;
1456 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1457 ins->opcode = OP_MOVE;
1458 ins->sreg1 = last_ins->sreg1;
1462 * Note: reg1 must be different from the basereg in the second load
1463 * OP_LOAD_MEMBASE offset(basereg), reg1
1464 * OP_LOAD_MEMBASE offset(basereg), reg2
1466 * OP_LOAD_MEMBASE offset(basereg), reg1
1467 * OP_MOVE reg1, reg2
1469 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1470 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1471 ins->inst_basereg != last_ins->dreg &&
1472 ins->inst_basereg == last_ins->inst_basereg &&
1473 ins->inst_offset == last_ins->inst_offset) {
1475 if (ins->dreg == last_ins->dreg) {
1476 last_ins->next = ins->next;
1480 ins->opcode = OP_MOVE;
1481 ins->sreg1 = last_ins->dreg;
1484 //g_assert_not_reached ();
1488 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1489 * OP_LOAD_MEMBASE offset(basereg), reg
1491 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1492 * OP_ICONST reg, imm
1494 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1495 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1496 ins->inst_basereg == last_ins->inst_destbasereg &&
1497 ins->inst_offset == last_ins->inst_offset) {
1498 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1499 ins->opcode = OP_ICONST;
1500 ins->inst_c0 = last_ins->inst_imm;
1501 g_assert_not_reached (); // check this rule
1506 case OP_LOADU1_MEMBASE:
1507 case OP_LOADI1_MEMBASE:
1508 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1509 ins->inst_basereg == last_ins->inst_destbasereg &&
1510 ins->inst_offset == last_ins->inst_offset) {
1511 if (ins->dreg == last_ins->sreg1) {
1512 last_ins->next = ins->next;
1516 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1517 ins->opcode = OP_MOVE;
1518 ins->sreg1 = last_ins->sreg1;
1522 case OP_LOADU2_MEMBASE:
1523 case OP_LOADI2_MEMBASE:
1524 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1525 ins->inst_basereg == last_ins->inst_destbasereg &&
1526 ins->inst_offset == last_ins->inst_offset) {
1527 if (ins->dreg == last_ins->sreg1) {
1528 last_ins->next = ins->next;
1532 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1533 ins->opcode = OP_MOVE;
1534 ins->sreg1 = last_ins->sreg1;
1538 case OP_STOREI4_MEMBASE_IMM:
1539 /* Convert pairs of 0 stores to a dword 0 store */
1540 /* Used when initializing temporaries */
1541 /* We know sparc_fp is dword aligned */
1542 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1543 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1544 (ins->inst_destbasereg == sparc_fp) &&
1545 (ins->inst_offset < 0) &&
1546 ((ins->inst_offset % 8) == 0) &&
1547 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1548 (ins->inst_imm == 0) &&
1549 (last_ins->inst_imm == 0)) {
1551 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1552 last_ins->inst_offset = ins->inst_offset;
1553 last_ins->next = ins->next;
1565 case OP_COND_EXC_EQ:
1566 case OP_COND_EXC_GE:
1567 case OP_COND_EXC_GT:
1568 case OP_COND_EXC_LE:
1569 case OP_COND_EXC_LT:
1570 case OP_COND_EXC_NE_UN:
1572 * Convert compare with zero+branch to BRcc
1575 * This only works in 64 bit mode, since it examines all 64
1576 * bits of the register.
1577 * Only do this if the method is small since BPr only has a 16bit
1580 if (v64 && (((MonoMethodNormal*)cfg->method)->header->code_size < 10000) && last_ins &&
1581 (last_ins->opcode == OP_COMPARE_IMM) &&
1582 (last_ins->inst_imm == 0)) {
1583 MonoInst *next = ins->next;
1584 switch (ins->opcode) {
1586 ins->opcode = OP_SPARC_BRZ;
1589 ins->opcode = OP_SPARC_BRNZ;
1592 ins->opcode = OP_SPARC_BRLZ;
1595 ins->opcode = OP_SPARC_BRGZ;
1598 ins->opcode = OP_SPARC_BRGEZ;
1601 ins->opcode = OP_SPARC_BRLEZ;
1603 case OP_COND_EXC_EQ:
1604 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1606 case OP_COND_EXC_GE:
1607 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1609 case OP_COND_EXC_GT:
1610 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1612 case OP_COND_EXC_LE:
1613 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1615 case OP_COND_EXC_LT:
1616 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1618 case OP_COND_EXC_NE_UN:
1619 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1622 g_assert_not_reached ();
1624 ins->sreg1 = last_ins->sreg1;
1626 last_ins->next = next;
1637 if (ins->dreg == ins->sreg1) {
1639 last_ins->next = ins->next;
1644 * OP_MOVE sreg, dreg
1645 * OP_MOVE dreg, sreg
1647 if (last_ins && last_ins->opcode == OP_MOVE &&
1648 ins->sreg1 == last_ins->dreg &&
1649 ins->dreg == last_ins->sreg1) {
1650 last_ins->next = ins->next;
1659 bb->last_ins = last_ins;
1662 /* Parameters used by the register allocator */
1664 /* Use %l4..%l7 as local registers */
1665 #define ARCH_CALLER_REGS (0xf0<<16)
1668 /* Use %d34..%d62 as the double precision floating point local registers */
1669 /* %d32 has the same encoding as %f1, so %d36%d38 == 0b1010 == 0xa */
1670 #define ARCH_CALLER_FREGS (0xaaaaaaa8)
1672 /* Use %f2..%f30 as the double precision floating point local registers */
1673 #define ARCH_CALLER_FREGS (0x55555554)
1677 #define DEBUG(a) if (cfg->verbose_level > 1) a
1679 #define reg_is_freeable(r) ((1 << (r)) & ARCH_CALLER_REGS)
1680 #define freg_is_freeable(r) (((1) << (r)) & ARCH_CALLER_FREGS)
1689 static const char*const * ins_spec = sparc_desc;
1691 static inline const char*
1692 get_ins_spec (int opcode)
1694 if (ins_spec [opcode])
1695 return ins_spec [opcode];
1697 return ins_spec [CEE_ADD];
1701 print_ins (int i, MonoInst *ins)
1703 const char *spec = get_ins_spec (ins->opcode);
1704 g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1705 if (spec [MONO_INST_DEST]) {
1706 if (ins->dreg >= MONO_MAX_IREGS)
1707 g_print (" R%d <-", ins->dreg);
1709 if (spec [MONO_INST_DEST] == 'b')
1710 g_print (" [%s + 0x%lx] <-", mono_arch_regname (ins->dreg), (long)ins->inst_offset);
1712 g_print (" %s <-", mono_arch_regname (ins->dreg));
1714 if (spec [MONO_INST_SRC1]) {
1715 if (ins->sreg1 >= MONO_MAX_IREGS)
1716 g_print (" R%d", ins->sreg1);
1718 if (spec [MONO_INST_SRC1] == 'b')
1719 g_print (" [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset);
1721 g_print (" %s", mono_arch_regname (ins->sreg1));
1723 if (spec [MONO_INST_SRC2]) {
1724 if (ins->sreg2 >= MONO_MAX_IREGS)
1725 g_print (" R%d", ins->sreg2);
1727 g_print (" %s", mono_arch_regname (ins->sreg2));
1729 if (spec [MONO_INST_CLOB])
1730 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1735 print_regtrack (RegTrack *t, int num)
1741 for (i = 0; i < num; ++i) {
1744 if (i >= MONO_MAX_IREGS) {
1745 g_snprintf (buf, sizeof(buf), "R%d", i);
1748 r = mono_arch_regname (i);
1749 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1753 typedef struct InstList InstList;
1761 static inline InstList*
1762 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1764 InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1773 #define STACK_OFFSETS_POSITIVE
1776 * returns the offset used by spillvar. It allocates a new
1777 * spill variable if necessary.
1780 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1782 MonoSpillInfo **si, *info;
1785 si = &cfg->spill_info;
1787 while (i <= spillvar) {
1790 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1792 cfg->stack_offset += sizeof (gpointer);
1793 info->offset = - cfg->stack_offset;
1797 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1803 g_assert_not_reached ();
1808 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1810 MonoSpillInfo **si, *info;
1813 si = &cfg->spill_info_float;
1815 while (i <= spillvar) {
1818 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1820 cfg->stack_offset += sizeof (double);
1821 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1822 info->offset = - cfg->stack_offset;
1826 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1832 g_assert_not_reached ();
1837 * Force the spilling of the variable in the symbolic register 'reg'.
1839 G_GNUC_UNUSED static int
1840 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
1845 sel = cfg->rs->iassign [reg];
1846 /*i = cfg->rs->isymbolic [sel];
1847 g_assert (i == reg);*/
1849 spill = ++cfg->spill_count;
1850 cfg->rs->iassign [i] = -spill - 1;
1851 mono_regstate_free_int (cfg->rs, sel);
1852 /* we need to create a spill var and insert a load to sel after the current instruction */
1853 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1855 load->inst_basereg = cfg->frame_reg;
1856 load->inst_offset = mono_spillvar_offset (cfg, spill);
1858 while (ins->next != item->prev->data)
1861 load->next = ins->next;
1863 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1864 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1865 g_assert (i == sel);
1871 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1876 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1877 /* exclude the registers in the current instruction */
1878 if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
1879 if (ins->sreg1 >= MONO_MAX_IREGS)
1880 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
1882 regmask &= ~ (1 << ins->sreg1);
1883 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1885 if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
1886 if (ins->sreg2 >= MONO_MAX_IREGS)
1887 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
1889 regmask &= ~ (1 << ins->sreg2);
1890 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1892 if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
1893 regmask &= ~ (1 << ins->dreg);
1894 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1897 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1898 g_assert (regmask); /* need at least a register we can free */
1900 /* we should track prev_use and spill the register that's farther */
1901 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1902 if (regmask & (1 << i)) {
1904 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1908 i = cfg->rs->isymbolic [sel];
1909 spill = ++cfg->spill_count;
1910 cfg->rs->iassign [i] = -spill - 1;
1911 mono_regstate_free_int (cfg->rs, sel);
1912 /* we need to create a spill var and insert a load to sel after the current instruction */
1913 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1915 load->inst_basereg = cfg->frame_reg;
1916 load->inst_offset = mono_spillvar_offset (cfg, spill);
1918 while (ins->next != item->prev->data)
1921 load->next = ins->next;
1923 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1924 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1925 g_assert (i == sel);
1931 get_float_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1936 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1937 /* exclude the registers in the current instruction */
1938 if (reg != ins->sreg1 && (freg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg1] >= 0))) {
1939 if (ins->sreg1 >= MONO_MAX_FREGS)
1940 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg1]);
1942 regmask &= ~ (1 << ins->sreg1);
1943 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1945 if (reg != ins->sreg2 && (freg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg2] >= 0))) {
1946 if (ins->sreg2 >= MONO_MAX_FREGS)
1947 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg2]);
1949 regmask &= ~ (1 << ins->sreg2);
1950 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1952 if (reg != ins->dreg && freg_is_freeable (ins->dreg)) {
1953 regmask &= ~ (1 << ins->dreg);
1954 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1957 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1958 g_assert (regmask); /* need at least a register we can free */
1960 /* we should track prev_use and spill the register that's farther */
1961 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1962 if (regmask & (1 << i)) {
1964 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->fassign [sel]));
1968 i = cfg->rs->fsymbolic [sel];
1969 spill = ++cfg->spill_count;
1970 cfg->rs->fassign [i] = -spill - 1;
1971 mono_regstate_free_float(cfg->rs, sel);
1972 /* we need to create a spill var and insert a load to sel after the current instruction */
1973 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1975 load->inst_basereg = cfg->frame_reg;
1976 load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1978 while (ins->next != item->prev->data)
1981 load->next = ins->next;
1983 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1984 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1985 g_assert (i == sel);
1991 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
1994 MONO_INST_NEW (cfg, copy, OP_MOVE);
1998 copy->next = ins->next;
2001 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
2005 G_GNUC_UNUSED static MonoInst*
2006 create_copy_ins_float (MonoCompile *cfg, int dest, int src, MonoInst *ins)
2009 MONO_INST_NEW (cfg, copy, OP_FMOVE);
2013 copy->next = ins->next;
2016 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
2021 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2024 MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
2026 store->inst_destbasereg = cfg->frame_reg;
2027 store->inst_offset = mono_spillvar_offset (cfg, spill);
2029 store->next = ins->next;
2032 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2037 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2040 MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
2042 store->inst_destbasereg = cfg->frame_reg;
2043 store->inst_offset = mono_spillvar_offset_float (cfg, spill);
2045 store->next = ins->next;
2048 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2053 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
2056 g_assert (item->next);
2057 prev = item->next->data;
2059 while (prev->next != ins)
2061 to_insert->next = ins;
2062 prev->next = to_insert;
2064 * needed otherwise in the next instruction we can add an ins to the
2065 * end and that would get past this instruction.
2067 item->data = to_insert;
2070 G_GNUC_UNUSED static int
2071 alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
2073 int val = cfg->rs->iassign [sym_reg];
2077 /* the register gets spilled after this inst */
2080 val = mono_regstate_alloc_int (cfg->rs, allow_mask);
2082 val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
2083 cfg->rs->iassign [sym_reg] = val;
2084 /* add option to store before the instruction for src registers */
2086 create_spilled_store (cfg, spill, val, sym_reg, ins);
2088 cfg->rs->isymbolic [val] = sym_reg;
2092 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
2095 * Local register allocation.
2096 * We first scan the list of instructions and we save the liveness info of
2097 * each register (when the register is first used, when it's value is set etc.).
2098 * We also reverse the list of instructions (in the InstList list) because assigning
2099 * registers backwards allows for more tricks to be used.
2102 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2105 MonoRegState *rs = cfg->rs;
2107 RegTrack *reginfo, *reginfof;
2108 RegTrack *reginfo1, *reginfo2, *reginfod;
2109 InstList *tmp, *reversed = NULL;
2111 guint32 src1_mask, src2_mask, dest_mask;
2112 guint32 cur_iregs, cur_fregs;
2114 /* FIXME: Use caller saved regs and %i1-%2 for allocation */
2118 rs->next_vireg = bb->max_ireg;
2119 rs->next_vfreg = bb->max_freg;
2120 mono_regstate_assign (rs);
2121 reginfo = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vireg);
2122 reginfof = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vfreg);
2123 rs->ifree_mask = ARCH_CALLER_REGS;
2124 rs->ffree_mask = ARCH_CALLER_FREGS;
2128 DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2129 /* forward pass on the instructions to collect register liveness info */
2131 spec = ins_spec [ins->opcode];
2134 spec = ins_spec [CEE_ADD];
2136 DEBUG (print_ins (i, ins));
2138 if (spec [MONO_INST_SRC1]) {
2139 if (spec [MONO_INST_SRC1] == 'f')
2140 reginfo1 = reginfof;
2143 reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2144 reginfo1 [ins->sreg1].last_use = i;
2148 if (spec [MONO_INST_SRC2]) {
2149 if (spec [MONO_INST_SRC2] == 'f')
2150 reginfo2 = reginfof;
2153 reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2154 reginfo2 [ins->sreg2].last_use = i;
2158 if (spec [MONO_INST_DEST]) {
2159 if (spec [MONO_INST_DEST] == 'f')
2160 reginfod = reginfof;
2163 if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2164 reginfod [ins->dreg].killed_in = i;
2165 reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2166 reginfod [ins->dreg].last_use = i;
2167 if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2168 reginfod [ins->dreg].born_in = i;
2169 if (!v64 && (spec [MONO_INST_DEST] == 'l')) {
2170 /* result in a regpair, the virtual register is allocated sequentially */
2171 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2172 reginfod [ins->dreg + 1].last_use = i;
2173 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2174 reginfod [ins->dreg + 1].born_in = i;
2179 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2184 cur_iregs = ARCH_CALLER_REGS;
2185 cur_fregs = ARCH_CALLER_FREGS;
2187 DEBUG (print_regtrack (reginfo, rs->next_vireg));
2188 DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2191 int prev_dreg, prev_sreg1, prev_sreg2;
2194 spec = ins_spec [ins->opcode];
2196 spec = ins_spec [CEE_ADD];
2197 DEBUG (g_print ("processing:"));
2198 DEBUG (print_ins (i, ins));
2200 /* make the register available for allocation: FIXME add fp reg */
2201 if (ins->opcode == OP_SETREG || ins->opcode == OP_SETREGIMM) {
2202 /* Dont free register which can't be allocated */
2203 if (reg_is_freeable (ins->dreg)) {
2204 cur_iregs |= 1 << ins->dreg;
2205 DEBUG (g_print ("adding %d to cur_iregs\n", ins->dreg));
2207 } else if (ins->opcode == OP_SETFREG) {
2208 if (freg_is_freeable (ins->dreg)) {
2209 cur_fregs |= 1 << ins->dreg;
2210 DEBUG (g_print ("adding %d to cur_fregs\n", ins->dreg));
2212 } else if (spec [MONO_INST_CLOB] == 'c') {
2213 MonoCallInst *cinst = (MonoCallInst*)ins;
2214 DEBUG (g_print ("excluding regs 0x%lx from cur_iregs (0x%x)\n", (long)cinst->used_iregs, cur_iregs));
2215 cur_iregs &= ~cinst->used_iregs;
2216 cur_fregs &= ~cinst->used_fregs;
2217 DEBUG (g_print ("available cur_iregs: 0x%x\n", cur_iregs));
2218 /* registers used by the calling convention are excluded from
2219 * allocation: they will be selectively enabled when they are
2220 * assigned by the special SETREG opcodes.
2223 dest_mask = src1_mask = src2_mask = cur_iregs;
2228 /* update for use with FP regs... */
2229 if (spec [MONO_INST_DEST] == 'f') {
2230 if (ins->dreg >= MONO_MAX_FREGS) {
2231 val = rs->fassign [ins->dreg];
2232 prev_dreg = ins->dreg;
2236 /* the register gets spilled after this inst */
2239 dest_mask = cur_fregs;
2240 val = mono_regstate_alloc_float (rs, dest_mask);
2242 val = get_float_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2243 rs->fassign [ins->dreg] = val;
2245 create_spilled_store_float (cfg, spill, val, prev_dreg, ins);
2247 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2248 rs->fsymbolic [val] = prev_dreg;
2253 if (freg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i || !(cur_fregs & (1 << ins->dreg)))) {
2254 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2255 mono_regstate_free_float (rs, ins->dreg);
2257 } else if (ins->dreg >= MONO_MAX_IREGS) {
2258 val = rs->iassign [ins->dreg];
2259 prev_dreg = ins->dreg;
2263 /* the register gets spilled after this inst */
2266 val = mono_regstate_alloc_int (rs, dest_mask);
2268 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2269 rs->iassign [ins->dreg] = val;
2271 create_spilled_store (cfg, spill, val, prev_dreg, ins);
2273 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2274 rs->isymbolic [val] = prev_dreg;
2276 if (!v64 && spec [MONO_INST_DEST] == 'l') {
2277 int hreg = prev_dreg + 1;
2278 val = rs->iassign [hreg];
2282 /* the register gets spilled after this inst */
2285 /* The second register must be a pair of the first */
2286 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2287 val = mono_regstate_alloc_int (rs, dest_mask);
2289 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2290 rs->iassign [hreg] = val;
2292 create_spilled_store (cfg, spill, val, hreg, ins);
2295 /* The second register must be a pair of the first */
2296 if (val != rs->iassign [prev_dreg] + 1) {
2297 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2299 val = mono_regstate_alloc_int (rs, dest_mask);
2301 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2303 create_copy_ins (cfg, rs->iassign [hreg], val, ins);
2305 rs->iassign [hreg] = val;
2309 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2310 rs->isymbolic [val] = hreg;
2312 if (reg_is_freeable (val) && hreg >= 0 && (reginfo [hreg].born_in >= i && !(cur_iregs & (1 << val)))) {
2313 DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2314 mono_regstate_free_int (rs, val);
2320 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i)) {
2321 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2322 mono_regstate_free_int (rs, ins->dreg);
2328 if (spec [MONO_INST_SRC1] == 'f') {
2329 if (ins->sreg1 >= MONO_MAX_FREGS) {
2330 val = rs->fassign [ins->sreg1];
2331 prev_sreg1 = ins->sreg1;
2335 /* the register gets spilled after this inst */
2338 //g_assert (val == -1); /* source cannot be spilled */
2339 src1_mask = cur_fregs;
2340 val = mono_regstate_alloc_float (rs, src1_mask);
2342 val = get_float_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2343 rs->fassign [ins->sreg1] = val;
2344 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2346 MonoInst *store = create_spilled_store_float (cfg, spill, val, prev_sreg1, NULL);
2347 insert_before_ins (ins, tmp, store);
2350 rs->fsymbolic [val] = prev_sreg1;
2355 } else if (ins->sreg1 >= MONO_MAX_IREGS) {
2356 val = rs->iassign [ins->sreg1];
2357 prev_sreg1 = ins->sreg1;
2361 /* the register gets spilled after this inst */
2364 if (0 && (ins->opcode == OP_MOVE) && reg_is_freeable (ins->dreg)) {
2366 * small optimization: the dest register is already allocated
2367 * but the src one is not: we can simply assign the same register
2368 * here and peephole will get rid of the instruction later.
2369 * This optimization may interfere with the clobbering handling:
2370 * it removes a mov operation that will be added again to handle clobbering.
2371 * There are also some other issues that should with make testjit.
2373 mono_regstate_alloc_int (rs, 1 << ins->dreg);
2374 val = rs->iassign [ins->sreg1] = ins->dreg;
2375 //g_assert (val >= 0);
2376 DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2378 //g_assert (val == -1); /* source cannot be spilled */
2379 val = mono_regstate_alloc_int (rs, src1_mask);
2381 val = get_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2382 rs->iassign [ins->sreg1] = val;
2383 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2386 MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
2387 insert_before_ins (ins, tmp, store);
2390 rs->isymbolic [val] = prev_sreg1;
2399 if (spec [MONO_INST_SRC2] == 'f') {
2400 if (ins->sreg2 >= MONO_MAX_FREGS) {
2401 val = rs->fassign [ins->sreg2];
2402 prev_sreg2 = ins->sreg2;
2406 /* the register gets spilled after this inst */
2409 src2_mask = cur_fregs;
2410 val = mono_regstate_alloc_float (rs, src2_mask);
2412 val = get_float_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2413 rs->fassign [ins->sreg2] = val;
2414 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2416 create_spilled_store_float (cfg, spill, val, prev_sreg2, ins);
2418 rs->fsymbolic [val] = prev_sreg2;
2423 } else if (ins->sreg2 >= MONO_MAX_IREGS) {
2424 val = rs->iassign [ins->sreg2];
2425 prev_sreg2 = ins->sreg2;
2429 /* the register gets spilled after this inst */
2432 val = mono_regstate_alloc_int (rs, src2_mask);
2434 val = get_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2435 rs->iassign [ins->sreg2] = val;
2436 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2438 create_spilled_store (cfg, spill, val, prev_sreg2, ins);
2440 rs->isymbolic [val] = prev_sreg2;
2446 if (spec [MONO_INST_CLOB] == 'c') {
2448 guint32 clob_mask = ARCH_CALLER_REGS;
2449 for (j = 0; j < MONO_MAX_IREGS; ++j) {
2451 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2452 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2456 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2457 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2458 mono_regstate_free_int (rs, ins->sreg1);
2460 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2461 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2462 mono_regstate_free_int (rs, ins->sreg2);
2465 //DEBUG (print_ins (i, ins));
2472 sparc_patch (guint32 *code, const gpointer target)
2475 guint32 ins = *code;
2476 guint32 op = ins >> 30;
2477 guint32 op2 = (ins >> 22) & 0x7;
2478 guint32 rd = (ins >> 25) & 0x1f;
2479 guint8* target8 = (guint8*)target;
2480 gint64 disp = (target8 - (guint8*)code) >> 2;
2483 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2485 if ((op == 0) && (op2 == 2)) {
2486 if (!sparc_is_imm22 (disp))
2489 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2491 else if ((op == 0) && (op2 == 1)) {
2492 if (!sparc_is_imm19 (disp))
2495 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
2497 else if ((op == 0) && (op2 == 3)) {
2498 if (!sparc_is_imm16 (disp))
2501 *code &= ~(0x180000 | 0x3fff);
2502 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
2504 else if ((op == 0) && (op2 == 6)) {
2505 if (!sparc_is_imm22 (disp))
2508 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2510 else if ((op == 0) && (op2 == 4)) {
2511 guint32 ins2 = code [1];
2513 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
2514 /* sethi followed by or */
2516 sparc_set (p, target8, rd);
2517 while (p <= (code + 1))
2520 else if (ins2 == 0x01000000) {
2521 /* sethi followed by nop */
2523 sparc_set (p, target8, rd);
2524 while (p <= (code + 1))
2527 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
2528 /* sethi followed by load/store */
2530 guint32 t = (guint32)target8;
2531 *code &= ~(0x3fffff);
2533 *(code + 1) &= ~(0x3ff);
2534 *(code + 1) |= (t & 0x3ff);
2538 (sparc_inst_rd (ins) == sparc_g1) &&
2539 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
2540 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
2541 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
2545 reg = sparc_inst_rd (c [1]);
2546 sparc_set (p, target8, reg);
2550 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
2551 (sparc_inst_imm (ins2))) {
2552 /* sethi followed by jmpl */
2554 guint32 t = (guint32)target8;
2555 *code &= ~(0x3fffff);
2557 *(code + 1) &= ~(0x3ff);
2558 *(code + 1) |= (t & 0x3ff);
2564 else if (op == 01) {
2565 gint64 disp = (target8 - (guint8*)code) >> 2;
2567 if (!sparc_is_imm30 (disp))
2569 sparc_call_simple (code, target8 - (guint8*)code);
2571 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
2573 g_assert (sparc_is_imm13 (target8));
2575 *code |= (guint32)target8;
2577 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
2578 /* sparc_set case 5. */
2582 reg = sparc_inst_rd (c [3]);
2583 sparc_set (p, target, reg);
2590 // g_print ("patched with 0x%08x\n", ins);
2594 * mono_sparc_emit_save_lmf:
2596 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
2597 * trampolines as well.
2600 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2603 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2604 /* Save previous_lmf */
2605 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
2606 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2608 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2609 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
2615 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2617 /* Load previous_lmf */
2618 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2620 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2621 /* *(lmf) = previous_lmf */
2622 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2627 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2630 * Since register windows are saved to the current value of %sp, we need to
2631 * set the sp field in the lmf before the call, not in the prolog.
2633 if (cfg->method->save_lmf) {
2634 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2637 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2644 emit_vret_token (MonoInst *ins, guint32 *code)
2646 MonoCallInst *call = (MonoCallInst*)ins;
2650 * The sparc ABI requires that calls to functions which return a structure
2651 * contain an additional unimpl instruction which is checked by the callee.
2653 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2654 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2655 size = mono_type_stack_size (call->signature->ret, NULL);
2657 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2658 sparc_unimp (code, size & 0xfff);
2665 emit_move_return_value (MonoInst *ins, guint32 *code)
2667 /* Move return value to the target register */
2668 /* FIXME: do this in the local reg allocator */
2669 switch (ins->opcode) {
2671 case OP_VOIDCALL_REG:
2672 case OP_VOIDCALL_MEMBASE:
2676 case OP_CALL_MEMBASE:
2677 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2681 case OP_LCALL_MEMBASE:
2683 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2687 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2689 sparc_mov_reg_reg (code, sparc_o0, ins->dreg + 1);
2690 sparc_mov_reg_reg (code, sparc_o1, ins->dreg);
2695 case OP_FCALL_MEMBASE:
2697 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2698 sparc_fmovs (code, sparc_f0, ins->dreg);
2699 sparc_fstod (code, ins->dreg, ins->dreg);
2702 sparc_fmovd (code, sparc_f0, ins->dreg);
2704 sparc_fmovs (code, sparc_f0, ins->dreg);
2705 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2706 sparc_fstod (code, ins->dreg, ins->dreg);
2708 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2713 case OP_VCALL_MEMBASE:
2723 * emit_load_volatile_arguments:
2725 * Load volatile arguments from the stack to the original input registers.
2726 * Required before a tail call.
2729 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2731 MonoMethod *method = cfg->method;
2732 MonoMethodSignature *sig;
2737 /* FIXME: Generate intermediate code instead */
2739 sig = method->signature;
2741 cinfo = get_call_info (sig, FALSE);
2743 /* This is the opposite of the code in emit_prolog */
2745 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2746 ArgInfo *ainfo = cinfo->args + i;
2747 gint32 stack_offset;
2749 inst = cfg->varinfo [i];
2751 if (sig->hasthis && (i == 0))
2752 arg_type = &mono_defaults.object_class->byval_arg;
2754 arg_type = sig->params [i - sig->hasthis];
2756 stack_offset = ainfo->offset + ARGS_OFFSET;
2757 ireg = sparc_i0 + ainfo->reg;
2759 if (ainfo->storage == ArgInSplitRegStack) {
2760 g_assert (inst->opcode == OP_REGOFFSET);
2762 if (!sparc_is_imm13 (stack_offset))
2764 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2767 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2768 if (ainfo->storage == ArgInIRegPair) {
2769 if (!sparc_is_imm13 (inst->inst_offset + 4))
2771 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2772 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2775 if (ainfo->storage == ArgInSplitRegStack) {
2776 if (stack_offset != inst->inst_offset) {
2777 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2778 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2779 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2784 if (ainfo->storage == ArgOnStackPair) {
2785 if (stack_offset != inst->inst_offset) {
2786 /* stack_offset is not dword aligned, so we need to make a copy */
2787 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2788 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2790 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2791 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2796 g_assert_not_reached ();
2799 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2800 /* Argument in register, but need to be saved to stack */
2801 if (!sparc_is_imm13 (stack_offset))
2803 if ((stack_offset - ARGS_OFFSET) & 0x1)
2804 /* FIXME: Is this ldsb or ldub ? */
2805 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2807 if ((stack_offset - ARGS_OFFSET) & 0x2)
2808 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2810 if ((stack_offset - ARGS_OFFSET) & 0x4)
2811 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2814 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2816 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2819 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2820 /* Argument in regpair, but need to be saved to stack */
2821 if (!sparc_is_imm13 (inst->inst_offset + 4))
2823 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2824 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2826 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2829 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2833 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2834 if (inst->opcode == OP_REGVAR)
2835 /* FIXME: Load the argument into memory */
2845 * mono_sparc_is_virtual_call:
2847 * Determine whenever the instruction at CODE is a virtual call.
2850 mono_sparc_is_virtual_call (guint32 *code)
2857 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2859 * Register indirect call. If it is a virtual call, then the
2860 * instruction in the delay slot is a special kind of nop.
2863 /* Construct special nop */
2864 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2867 if (code [1] == p [0])
2875 * mono_sparc_get_vcall_slot_addr:
2877 * Determine the vtable slot used by a virtual call.
2880 mono_sparc_get_vcall_slot_addr (guint32 *code, gpointer *fp)
2882 guint32 ins = code [0];
2883 guint32 prev_ins = code [-1];
2885 mono_sparc_flushw ();
2887 fp = (gpointer*)((guint8*)fp + MONO_SPARC_STACK_BIAS);
2889 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2890 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2891 /* ld [r1 + CONST ], r2; call r2 */
2892 guint32 base = sparc_inst_rs1 (prev_ins);
2893 guint32 disp = sparc_inst_imm13 (prev_ins);
2896 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2898 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2900 base_val = fp [base - 16];
2902 return (gpointer)((guint8*)base_val + disp);
2905 g_assert_not_reached ();
2908 g_assert_not_reached ();
2914 * Some conventions used in the following code.
2915 * 2) The only scratch registers we have are o7 and g1. We try to
2916 * stick to o7 when we can, and use g1 when necessary.
2920 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2925 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2926 MonoInst *last_ins = NULL;
2930 if (cfg->opt & MONO_OPT_PEEPHOLE)
2931 peephole_pass (cfg, bb);
2933 if (cfg->verbose_level > 2)
2934 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2936 cpos = bb->max_offset;
2938 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2946 offset = (guint8*)code - cfg->native_code;
2948 spec = ins_spec [ins->opcode];
2950 spec = ins_spec [CEE_ADD];
2952 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2954 if (offset > (cfg->code_size - max_len - 16)) {
2955 cfg->code_size *= 2;
2956 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2957 code = (guint32*)(cfg->native_code + offset);
2959 code_start = (guint8*)code;
2960 // if (ins->cil_code)
2961 // g_print ("cil code\n");
2962 mono_debug_record_line_number (cfg, ins, offset);
2964 switch (ins->opcode) {
2965 case OP_STOREI1_MEMBASE_IMM:
2966 EMIT_STORE_MEMBASE_IMM (ins, stb);
2968 case OP_STOREI2_MEMBASE_IMM:
2969 EMIT_STORE_MEMBASE_IMM (ins, sth);
2971 case OP_STORE_MEMBASE_IMM:
2972 EMIT_STORE_MEMBASE_IMM (ins, sti);
2974 case OP_STOREI4_MEMBASE_IMM:
2975 EMIT_STORE_MEMBASE_IMM (ins, st);
2977 case OP_STOREI8_MEMBASE_IMM:
2979 EMIT_STORE_MEMBASE_IMM (ins, stx);
2981 /* Only generated by peephole opts */
2982 g_assert ((ins->inst_offset % 8) == 0);
2983 g_assert (ins->inst_imm == 0);
2984 EMIT_STORE_MEMBASE_IMM (ins, stx);
2987 case OP_STOREI1_MEMBASE_REG:
2988 EMIT_STORE_MEMBASE_REG (ins, stb);
2990 case OP_STOREI2_MEMBASE_REG:
2991 EMIT_STORE_MEMBASE_REG (ins, sth);
2993 case OP_STOREI4_MEMBASE_REG:
2994 EMIT_STORE_MEMBASE_REG (ins, st);
2996 case OP_STOREI8_MEMBASE_REG:
2998 EMIT_STORE_MEMBASE_REG (ins, stx);
3000 /* Only used by OP_MEMSET */
3001 EMIT_STORE_MEMBASE_REG (ins, std);
3004 case OP_STORE_MEMBASE_REG:
3005 EMIT_STORE_MEMBASE_REG (ins, sti);
3009 sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
3011 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3016 sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
3018 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3022 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3025 sparc_set (code, ins->inst_c0, ins->dreg);
3026 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
3028 case OP_LOADI4_MEMBASE:
3030 EMIT_LOAD_MEMBASE (ins, ldsw);
3032 EMIT_LOAD_MEMBASE (ins, ld);
3035 case OP_LOADU4_MEMBASE:
3036 EMIT_LOAD_MEMBASE (ins, ld);
3038 case OP_LOADU1_MEMBASE:
3039 EMIT_LOAD_MEMBASE (ins, ldub);
3041 case OP_LOADI1_MEMBASE:
3042 EMIT_LOAD_MEMBASE (ins, ldsb);
3044 case OP_LOADU2_MEMBASE:
3045 EMIT_LOAD_MEMBASE (ins, lduh);
3047 case OP_LOADI2_MEMBASE:
3048 EMIT_LOAD_MEMBASE (ins, ldsh);
3050 case OP_LOAD_MEMBASE:
3052 EMIT_LOAD_MEMBASE (ins, ldx);
3054 EMIT_LOAD_MEMBASE (ins, ld);
3058 case OP_LOADI8_MEMBASE:
3059 EMIT_LOAD_MEMBASE (ins, ldx);
3063 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
3064 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
3067 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3068 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
3071 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
3074 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3075 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
3077 case CEE_CONV_OVF_U4:
3078 /* Only used on V9 */
3079 sparc_cmp_imm (code, ins->sreg1, 0);
3080 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3081 MONO_PATCH_INFO_EXC, "OverflowException");
3082 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
3084 sparc_set (code, 1, sparc_o7);
3085 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
3086 sparc_cmp (code, ins->sreg1, sparc_o7);
3087 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3088 MONO_PATCH_INFO_EXC, "OverflowException");
3089 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
3091 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3093 case CEE_CONV_OVF_I4_UN:
3094 /* Only used on V9 */
3099 /* Only used on V9 */
3100 sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
3104 /* Only used on V9 */
3105 sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
3110 sparc_cmp (code, ins->sreg1, ins->sreg2);
3112 case OP_COMPARE_IMM:
3113 case OP_ICOMPARE_IMM:
3114 if (sparc_is_imm13 (ins->inst_imm))
3115 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
3117 sparc_set (code, ins->inst_imm, sparc_o7);
3118 sparc_cmp (code, ins->sreg1, sparc_o7);
3121 case OP_X86_TEST_NULL:
3122 sparc_cmp_imm (code, ins->sreg1, 0);
3126 * gdb does not like encountering 'ta 1' in the debugged code. So
3127 * instead of emitting a trap, we emit a call a C function and place a
3130 //sparc_ta (code, 1);
3131 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
3136 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3140 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3145 /* according to inssel-long32.brg, this should set cc */
3146 EMIT_ALU_IMM (ins, add, TRUE);
3150 /* according to inssel-long32.brg, this should set cc */
3151 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3155 EMIT_ALU_IMM (ins, addx, TRUE);
3159 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3163 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3168 /* according to inssel-long32.brg, this should set cc */
3169 EMIT_ALU_IMM (ins, sub, TRUE);
3173 /* according to inssel-long32.brg, this should set cc */
3174 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3178 EMIT_ALU_IMM (ins, subx, TRUE);
3182 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3186 EMIT_ALU_IMM (ins, and, FALSE);
3190 /* Sign extend sreg1 into %y */
3191 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3192 sparc_wry (code, sparc_o7, sparc_g0);
3193 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3194 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3198 sparc_wry (code, sparc_g0, sparc_g0);
3199 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3204 /* Transform division into a shift */
3205 for (i = 1; i < 30; ++i) {
3207 if (ins->inst_imm == imm)
3213 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
3214 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3215 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
3218 /* http://compilers.iecc.com/comparch/article/93-04-079 */
3219 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3220 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
3221 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3222 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
3226 /* Sign extend sreg1 into %y */
3227 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3228 sparc_wry (code, sparc_o7, sparc_g0);
3229 EMIT_ALU_IMM (ins, sdiv, TRUE);
3230 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3236 /* Sign extend sreg1 into %y */
3237 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3238 sparc_wry (code, sparc_o7, sparc_g0);
3239 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
3240 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3241 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3242 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3246 sparc_wry (code, sparc_g0, sparc_g0);
3247 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
3248 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3249 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3253 /* Sign extend sreg1 into %y */
3254 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3255 sparc_wry (code, sparc_o7, sparc_g0);
3256 if (!sparc_is_imm13 (ins->inst_imm)) {
3257 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
3258 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
3259 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3260 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
3263 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
3264 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3265 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
3267 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3271 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3275 EMIT_ALU_IMM (ins, or, FALSE);
3279 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3283 EMIT_ALU_IMM (ins, xor, FALSE);
3287 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
3291 if (ins->inst_imm < (1 << 5))
3292 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3294 sparc_set (code, ins->inst_imm, sparc_o7);
3295 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
3300 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
3304 if (ins->inst_imm < (1 << 5))
3305 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3307 sparc_set (code, ins->inst_imm, sparc_o7);
3308 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
3312 case OP_ISHR_UN_IMM:
3313 if (ins->inst_imm < (1 << 5))
3314 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3316 sparc_set (code, ins->inst_imm, sparc_o7);
3317 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
3322 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
3325 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
3328 if (ins->inst_imm < (1 << 6))
3329 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3331 sparc_set (code, ins->inst_imm, sparc_o7);
3332 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
3336 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
3339 if (ins->inst_imm < (1 << 6))
3340 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3342 sparc_set (code, ins->inst_imm, sparc_o7);
3343 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
3347 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
3349 case OP_LSHR_UN_IMM:
3350 if (ins->inst_imm < (1 << 6))
3351 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3353 sparc_set (code, ins->inst_imm, sparc_o7);
3354 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
3359 /* can't use sparc_not */
3360 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
3364 /* can't use sparc_neg */
3365 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
3369 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3375 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
3378 /* Transform multiplication into a shift */
3379 for (i = 0; i < 30; ++i) {
3381 if (ins->inst_imm == imm)
3385 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
3387 EMIT_ALU_IMM (ins, smul, FALSE);
3392 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3393 sparc_rdy (code, sparc_g1);
3394 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
3395 sparc_cmp (code, sparc_g1, sparc_o7);
3396 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3398 case CEE_MUL_OVF_UN:
3399 case OP_IMUL_OVF_UN:
3400 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3401 sparc_rdy (code, sparc_o7);
3402 sparc_cmp (code, sparc_o7, sparc_g0);
3403 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3407 sparc_set (code, ins->inst_c0, ins->dreg);
3410 sparc_set (code, ins->inst_l, ins->dreg);
3413 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3414 sparc_set_template (code, ins->dreg);
3420 if (ins->sreg1 != ins->dreg)
3421 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3424 /* Only used on V9 */
3425 if (ins->sreg1 != ins->dreg)
3426 sparc_fmovd (code, ins->sreg1, ins->dreg);
3428 case OP_SPARC_SETFREG_FLOAT:
3429 /* Only used on V9 */
3430 sparc_fdtos (code, ins->sreg1, ins->dreg);
3433 if (cfg->method->save_lmf)
3436 code = emit_load_volatile_arguments (cfg, code);
3437 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3438 sparc_set_template (code, sparc_o7);
3439 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
3440 /* Restore parent frame in delay slot */
3441 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3444 /* ensure ins->sreg1 is not NULL */
3445 sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
3448 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
3449 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
3456 call = (MonoCallInst*)ins;
3457 g_assert (!call->virtual);
3458 code = emit_save_sp_to_lmf (cfg, code);
3459 if (ins->flags & MONO_INST_HAS_METHOD)
3460 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3462 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3464 code = emit_vret_token (ins, code);
3465 code = emit_move_return_value (ins, code);
3470 case OP_VOIDCALL_REG:
3472 call = (MonoCallInst*)ins;
3473 code = emit_save_sp_to_lmf (cfg, code);
3474 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
3476 * We emit a special kind of nop in the delay slot to tell the
3477 * trampoline code that this is a virtual call, thus an unbox
3478 * trampoline might need to be called.
3481 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3485 code = emit_vret_token (ins, code);
3486 code = emit_move_return_value (ins, code);
3488 case OP_FCALL_MEMBASE:
3489 case OP_LCALL_MEMBASE:
3490 case OP_VCALL_MEMBASE:
3491 case OP_VOIDCALL_MEMBASE:
3492 case OP_CALL_MEMBASE:
3493 call = (MonoCallInst*)ins;
3494 g_assert (sparc_is_imm13 (ins->inst_offset));
3495 code = emit_save_sp_to_lmf (cfg, code);
3496 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
3497 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
3499 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3503 code = emit_vret_token (ins, code);
3504 code = emit_move_return_value (ins, code);
3507 if (cfg->method->signature->ret->type == MONO_TYPE_R4)
3508 sparc_fdtos (code, ins->sreg1, sparc_f0);
3511 sparc_fmovd (code, ins->sreg1, ins->dreg);
3513 /* FIXME: Why not use fmovd ? */
3514 sparc_fmovs (code, ins->sreg1, ins->dreg);
3515 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3520 g_assert_not_reached ();
3523 /* Keep alignment */
3524 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
3525 sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
3526 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
3527 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3528 /* Keep %sp valid at all times */
3529 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3530 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
3531 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3533 case OP_SPARC_LOCALLOC_IMM: {
3534 gint32 offset = ins->inst_c0;
3535 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3536 if (sparc_is_imm13 (offset))
3537 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3539 sparc_set (code, offset, sparc_o7);
3540 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3542 sparc_mov_reg_reg (code, sparc_sp, ins->dreg);
3543 g_assert (sparc_is_imm13 (cfg->arch.localloc_offset));
3544 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3548 /* The return is done in the epilog */
3549 g_assert_not_reached ();
3552 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3553 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3554 (gpointer)"mono_arch_throw_exception");
3557 case OP_START_HANDLER: {
3559 * The START_HANDLER instruction marks the beginning of a handler
3560 * block. It is called using a call instruction, so %o7 contains
3561 * the return address. Since the handler executes in the same stack
3562 * frame as the method itself, we can't use save/restore to save
3563 * the return address. Instead, we save it into a dedicated
3566 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3567 if (!sparc_is_imm13 (spvar->inst_offset)) {
3568 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3569 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3572 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3575 case OP_ENDFILTER: {
3576 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3577 if (!sparc_is_imm13 (spvar->inst_offset)) {
3578 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3579 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3582 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3583 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3585 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3588 case CEE_ENDFINALLY: {
3589 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3590 if (!sparc_is_imm13 (spvar->inst_offset)) {
3591 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3592 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3595 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3596 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3600 case OP_CALL_HANDLER:
3601 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3602 /* This is a jump inside the method, so call_simple works even on V9 */
3603 sparc_call_simple (code, 0);
3607 ins->inst_c0 = (guint8*)code - cfg->native_code;
3610 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3611 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3613 if (ins->flags & MONO_INST_BRLABEL) {
3614 if (ins->inst_i0->inst_c0) {
3615 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3616 g_assert (sparc_is_imm22 (disp));
3617 sparc_branch (code, 1, sparc_ba, disp);
3619 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3620 sparc_branch (code, 1, sparc_ba, 0);
3623 if (ins->inst_target_bb->native_offset) {
3624 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3625 g_assert (sparc_is_imm22 (disp));
3626 sparc_branch (code, 1, sparc_ba, disp);
3628 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3629 sparc_branch (code, 1, sparc_ba, 0);
3635 sparc_jmp (code, ins->sreg1, sparc_g0);
3643 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3644 sparc_clr_reg (code, ins->dreg);
3645 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3648 sparc_clr_reg (code, ins->dreg);
3650 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3652 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3655 sparc_set (code, 1, ins->dreg);
3663 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3664 sparc_clr_reg (code, ins->dreg);
3665 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3668 sparc_clr_reg (code, ins->dreg);
3669 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3671 sparc_set (code, 1, ins->dreg);
3674 case OP_COND_EXC_EQ:
3675 case OP_COND_EXC_NE_UN:
3676 case OP_COND_EXC_LT:
3677 case OP_COND_EXC_LT_UN:
3678 case OP_COND_EXC_GT:
3679 case OP_COND_EXC_GT_UN:
3680 case OP_COND_EXC_GE:
3681 case OP_COND_EXC_GE_UN:
3682 case OP_COND_EXC_LE:
3683 case OP_COND_EXC_LE_UN:
3684 case OP_COND_EXC_OV:
3685 case OP_COND_EXC_NO:
3687 case OP_COND_EXC_NC:
3688 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3690 case OP_SPARC_COND_EXC_EQZ:
3691 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3693 case OP_SPARC_COND_EXC_GEZ:
3694 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3696 case OP_SPARC_COND_EXC_GTZ:
3697 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3699 case OP_SPARC_COND_EXC_LEZ:
3700 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3702 case OP_SPARC_COND_EXC_LTZ:
3703 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3705 case OP_SPARC_COND_EXC_NEZ:
3706 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3708 case OP_COND_EXC_IOV:
3709 case OP_COND_EXC_IC:
3710 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
3723 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3725 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3739 /* Only used on V9 */
3740 EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
3745 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3747 case OP_SPARC_BRLEZ:
3748 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3751 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3754 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3757 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3759 case OP_SPARC_BRGEZ:
3760 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3763 /* floating point opcodes */
3765 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3767 sparc_set_template (code, sparc_o7);
3769 sparc_sethi (code, 0, sparc_o7);
3771 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3774 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3776 sparc_set_template (code, sparc_o7);
3778 sparc_sethi (code, 0, sparc_o7);
3780 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3782 /* Extend to double */
3783 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3785 case OP_STORER8_MEMBASE_REG:
3786 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3787 sparc_set (code, ins->inst_offset, sparc_o7);
3788 /* SPARCV9 handles misaligned fp loads/stores */
3789 if (!v64 && (ins->inst_offset % 8)) {
3791 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3792 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3793 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3795 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3798 if (!v64 && (ins->inst_offset % 8)) {
3800 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3801 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3803 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3806 case OP_LOADR8_MEMBASE:
3807 EMIT_LOAD_MEMBASE (ins, lddf);
3809 case OP_STORER4_MEMBASE_REG:
3810 /* This requires a double->single conversion */
3811 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3812 if (!sparc_is_imm13 (ins->inst_offset)) {
3813 sparc_set (code, ins->inst_offset, sparc_o7);
3814 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3817 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3819 case OP_LOADR4_MEMBASE: {
3820 /* ldf needs a single precision register */
3821 int dreg = ins->dreg;
3822 ins->dreg = FP_SCRATCH_REG;
3823 EMIT_LOAD_MEMBASE (ins, ldf);
3825 /* Extend to double */
3826 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3831 sparc_fmovd (code, ins->sreg1, ins->dreg);
3833 sparc_fmovs (code, ins->sreg1, ins->dreg);
3834 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3838 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3839 if (!sparc_is_imm13 (offset))
3842 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3843 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3844 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3846 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3847 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3848 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3850 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3854 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3855 if (!sparc_is_imm13 (offset))
3858 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3859 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3860 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3862 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3863 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3864 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3868 case OP_FCONV_TO_I1:
3869 case OP_FCONV_TO_U1:
3870 case OP_FCONV_TO_I2:
3871 case OP_FCONV_TO_U2:
3876 case OP_FCONV_TO_I4:
3877 case OP_FCONV_TO_U4: {
3878 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3879 if (!sparc_is_imm13 (offset))
3881 /* FIXME: Is having the same code for all of these ok ? */
3882 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3883 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3884 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3887 case OP_FCONV_TO_I8:
3888 case OP_FCONV_TO_U8:
3890 g_assert_not_reached ();
3894 g_assert_not_reached ();
3896 case OP_LCONV_TO_R_UN: {
3898 g_assert_not_reached ();
3901 case OP_LCONV_TO_OVF_I: {
3902 guint32 *br [3], *label [1];
3905 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
3907 sparc_cmp_imm (code, ins->sreg1, 0);
3909 sparc_branch (code, 1, sparc_bneg, 0);
3913 /* ms word must be 0 */
3914 sparc_cmp_imm (code, ins->sreg2, 0);
3916 sparc_branch (code, 1, sparc_be, 0);
3921 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
3924 sparc_patch (br [0], code);
3926 /* ms word must 0xfffffff */
3927 sparc_cmp_imm (code, ins->sreg2, -1);
3929 sparc_branch (code, 1, sparc_bne, 0);
3930 sparc_patch (br [2], label [0]);
3933 sparc_patch (br [1], code);
3934 if (ins->sreg1 != ins->dreg)
3935 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3939 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
3942 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
3945 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
3948 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
3952 sparc_fnegd (code, ins->sreg1, ins->dreg);
3954 /* FIXME: why don't use fnegd ? */
3955 sparc_fnegs (code, ins->sreg1, ins->dreg);
3959 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
3960 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
3961 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
3964 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3971 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
3972 sparc_clr_reg (code, ins->dreg);
3973 switch (ins->opcode) {
3976 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
3978 sparc_set (code, 1, ins->dreg);
3979 sparc_fbranch (code, 1, sparc_fbu, 2);
3981 sparc_set (code, 1, ins->dreg);
3984 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3986 sparc_set (code, 1, ins->dreg);
3992 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3995 /* clt.un + brfalse */
3997 sparc_fbranch (code, 1, sparc_fbul, 0);
4000 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
4001 sparc_patch (p, (guint8*)code);
4005 /* cgt.un + brfalse */
4007 sparc_fbranch (code, 1, sparc_fbug, 0);
4010 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
4011 sparc_patch (p, (guint8*)code);
4015 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
4016 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4019 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
4020 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4023 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
4024 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4027 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
4028 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4031 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
4032 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4034 case CEE_CKFINITE: {
4035 gint32 offset = mono_spillvar_offset_float (cfg, 0);
4036 if (!sparc_is_imm13 (offset))
4038 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
4039 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
4040 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
4041 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
4042 sparc_cmp_imm (code, sparc_o7, 2047);
4043 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
4045 sparc_fmovd (code, ins->sreg1, ins->dreg);
4047 sparc_fmovs (code, ins->sreg1, ins->dreg);
4048 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
4054 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4056 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
4058 g_assert_not_reached ();
4061 if ((((guint8*)code) - code_start) > max_len) {
4062 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4063 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
4064 g_assert_not_reached ();
4074 cfg->code_len = (guint8*)code - cfg->native_code;
4078 mono_arch_register_lowlevel_calls (void)
4080 mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
4081 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
4085 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4087 MonoJumpInfo *patch_info;
4089 /* FIXME: Move part of this to arch independent code */
4090 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4091 unsigned char *ip = patch_info->ip.i + code;
4094 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4096 switch (patch_info->type) {
4097 case MONO_PATCH_INFO_CLASS_INIT: {
4098 guint32 *ip2 = (guint32*)ip;
4099 /* Might already been changed to a nop */
4101 sparc_set_template (ip2, sparc_o7);
4102 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
4104 sparc_call_simple (ip2, 0);
4108 case MONO_PATCH_INFO_METHOD_JUMP: {
4109 guint32 *ip2 = (guint32*)ip;
4110 /* Might already been patched */
4111 sparc_set_template (ip2, sparc_o7);
4117 sparc_patch ((guint32*)ip, target);
4122 mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
4129 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4131 int i, stack, code_size;
4132 guint32 *code = (guint32*)p;
4133 MonoMethodSignature *sig = cfg->method->signature;
4136 /* Save registers to stack */
4137 for (i = 0; i < 6; ++i)
4138 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
4140 cinfo = get_call_info (sig, FALSE);
4142 /* Save float regs on V9, since they are caller saved */
4143 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4144 ArgInfo *ainfo = cinfo->args + i;
4145 gint32 stack_offset;
4147 stack_offset = ainfo->offset + ARGS_OFFSET;
4149 if (ainfo->storage == ArgInFloatReg) {
4150 if (!sparc_is_imm13 (stack_offset))
4152 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4154 else if (ainfo->storage == ArgInDoubleReg) {
4155 /* The offset is guaranteed to be aligned by the ABI rules */
4156 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4160 sparc_set (code, cfg->method, sparc_o0);
4161 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
4163 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
4166 /* Restore float regs on V9 */
4167 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4168 ArgInfo *ainfo = cinfo->args + i;
4169 gint32 stack_offset;
4171 stack_offset = ainfo->offset + ARGS_OFFSET;
4173 if (ainfo->storage == ArgInFloatReg) {
4174 if (!sparc_is_imm13 (stack_offset))
4176 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4178 else if (ainfo->storage == ArgInDoubleReg) {
4179 /* The offset is guaranteed to be aligned by the ABI rules */
4180 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4184 mono_arch_instrument_mem_needs (cfg->method, &stack, &code_size);
4186 g_assert ((code - (guint32*)p) <= (code_size * 4));
4202 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4204 guint32 *code = (guint32*)p;
4205 int save_mode = SAVE_NONE;
4206 MonoMethod *method = cfg->method;
4207 int rtype = method->signature->ret->type;
4211 case MONO_TYPE_VOID:
4212 /* special case string .ctor icall */
4213 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
4214 save_mode = SAVE_ONE;
4216 save_mode = SAVE_NONE;
4221 save_mode = SAVE_ONE;
4223 save_mode = SAVE_TWO;
4228 save_mode = SAVE_FP;
4230 case MONO_TYPE_VALUETYPE:
4231 if (method->signature->ret->data.klass->enumtype) {
4232 rtype = method->signature->ret->data.klass->enum_basetype->type;
4235 save_mode = SAVE_STRUCT;
4238 save_mode = SAVE_ONE;
4242 /* Save the result to the stack and also put it into the output registers */
4244 switch (save_mode) {
4247 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
4248 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
4249 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4250 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
4253 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
4254 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4258 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
4260 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
4261 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
4262 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
4267 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4269 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
4277 sparc_set (code, cfg->method, sparc_o0);
4279 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
4282 /* Restore result */
4284 switch (save_mode) {
4286 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
4287 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
4290 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
4293 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
4304 mono_arch_max_epilog_size (MonoCompile *cfg)
4306 int exc_count = 0, max_epilog_size = 16 + 20*4;
4307 MonoJumpInfo *patch_info;
4309 if (cfg->method->save_lmf)
4310 max_epilog_size += 128;
4312 if (mono_jit_trace_calls != NULL)
4313 max_epilog_size += 50;
4315 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4316 max_epilog_size += 50;
4318 /* count the number of exception infos */
4320 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4321 if (patch_info->type == MONO_PATCH_INFO_EXC)
4326 * make sure we have enough space for exceptions
4329 max_epilog_size += exc_count * (20 * 4);
4331 max_epilog_size += exc_count * 24;
4334 return max_epilog_size;
4338 mono_arch_emit_prolog (MonoCompile *cfg)
4340 MonoMethod *method = cfg->method;
4341 MonoMethodSignature *sig;
4347 cfg->code_size = 256;
4348 cfg->native_code = g_malloc (cfg->code_size);
4349 code = (guint32*)cfg->native_code;
4351 /* FIXME: Generate intermediate code instead */
4353 offset = cfg->stack_offset;
4354 offset += (16 * sizeof (gpointer)); /* register save area */
4356 offset += 4; /* struct/union return pointer */
4359 /* add parameter area size for called functions */
4360 if (cfg->param_area < (6 * sizeof (gpointer)))
4361 /* Reserve space for the first 6 arguments even if it is unused */
4362 offset += 6 * sizeof (gpointer);
4364 offset += cfg->param_area;
4366 /* align the stack size */
4367 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
4370 * localloc'd memory is stored between the local variables (whose
4371 * size is given by cfg->stack_offset), and between the space reserved
4374 cfg->arch.localloc_offset = offset - cfg->stack_offset;
4376 cfg->stack_offset = offset;
4378 if (!sparc_is_imm13 (- cfg->stack_offset)) {
4379 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4380 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
4381 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
4384 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
4387 if (strstr (cfg->method->name, "test_marshal_struct")) {
4388 mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4389 sparc_call_simple (code, 0);
4394 sig = method->signature;
4396 cinfo = get_call_info (sig, FALSE);
4398 /* Keep in sync with emit_load_volatile_arguments */
4399 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4400 ArgInfo *ainfo = cinfo->args + i;
4401 gint32 stack_offset;
4403 inst = cfg->varinfo [i];
4405 if (sig->hasthis && (i == 0))
4406 arg_type = &mono_defaults.object_class->byval_arg;
4408 arg_type = sig->params [i - sig->hasthis];
4410 stack_offset = ainfo->offset + ARGS_OFFSET;
4412 /* Save the split arguments so they will reside entirely on the stack */
4413 if (ainfo->storage == ArgInSplitRegStack) {
4414 /* Save the register to the stack */
4415 g_assert (inst->opcode == OP_REGOFFSET);
4416 if (!sparc_is_imm13 (stack_offset))
4418 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
4421 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
4422 /* Save the argument to a dword aligned stack location */
4424 * stack_offset contains the offset of the argument on the stack.
4425 * inst->inst_offset contains the dword aligned offset where the value
4428 if (ainfo->storage == ArgInIRegPair) {
4429 if (!sparc_is_imm13 (inst->inst_offset + 4))
4431 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4432 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4435 if (ainfo->storage == ArgInSplitRegStack) {
4437 g_assert_not_reached ();
4439 if (stack_offset != inst->inst_offset) {
4440 /* stack_offset is not dword aligned, so we need to make a copy */
4441 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4442 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4443 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4447 if (ainfo->storage == ArgOnStackPair) {
4449 g_assert_not_reached ();
4451 if (stack_offset != inst->inst_offset) {
4452 /* stack_offset is not dword aligned, so we need to make a copy */
4453 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4454 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4455 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4456 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4460 g_assert_not_reached ();
4463 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4464 /* Argument in register, but need to be saved to stack */
4465 if (!sparc_is_imm13 (stack_offset))
4467 if ((stack_offset - ARGS_OFFSET) & 0x1)
4468 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4470 if ((stack_offset - ARGS_OFFSET) & 0x2)
4471 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4473 if ((stack_offset - ARGS_OFFSET) & 0x4)
4474 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4477 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4479 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4483 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4487 /* Argument in regpair, but need to be saved to stack */
4488 if (!sparc_is_imm13 (inst->inst_offset + 4))
4490 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4491 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4493 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4494 if (!sparc_is_imm13 (stack_offset))
4496 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4498 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4499 /* The offset is guaranteed to be aligned by the ABI rules */
4500 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4503 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4504 /* Need to move into the a double precision register */
4505 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4508 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4509 if (inst->opcode == OP_REGVAR)
4510 /* FIXME: Load the argument into memory */
4516 if (cfg->method->save_lmf) {
4517 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4520 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4521 sparc_set_template (code, sparc_o7);
4522 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4524 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4526 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4528 /* FIXME: add a relocation for this */
4529 sparc_set (code, cfg->method, sparc_o7);
4530 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4532 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4533 (gpointer)"mono_arch_get_lmf_addr");
4536 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4539 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4540 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4542 cfg->code_len = (guint8*)code - cfg->native_code;
4544 g_assert (cfg->code_len <= cfg->code_size);
4546 return (guint8*)code;
4550 mono_arch_emit_epilog (MonoCompile *cfg)
4552 MonoJumpInfo *patch_info;
4553 MonoMethod *method = cfg->method;
4557 code = (guint32*)(cfg->native_code + cfg->code_len);
4559 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4560 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4562 if (cfg->method->save_lmf) {
4563 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4565 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4569 * The V8 ABI requires that calls to functions which return a structure
4572 if (!v64 && cfg->method->signature->pinvoke && MONO_TYPE_ISSTRUCT(cfg->method->signature->ret))
4573 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4577 /* Only fold last instruction into the restore if the exit block has an in count of 1
4578 and the previous block hasn't been optimized away since it may have an in count > 1 */
4579 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4582 /* Try folding last instruction into the restore */
4583 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4584 /* or reg, imm, %i0 */
4585 int reg = sparc_inst_rs1 (code [-2]);
4586 int imm = sparc_inst_imm13 (code [-2]);
4587 code [-2] = code [-1];
4589 sparc_restore_imm (code, reg, imm, sparc_o0);
4592 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4593 /* or reg, reg, %i0 */
4594 int reg1 = sparc_inst_rs1 (code [-2]);
4595 int reg2 = sparc_inst_rs2 (code [-2]);
4596 code [-2] = code [-1];
4598 sparc_restore (code, reg1, reg2, sparc_o0);
4601 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4603 /* add code to raise exceptions */
4604 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4605 glong offset = patch_info->ip.i;
4607 switch (patch_info->type) {
4608 case MONO_PATCH_INFO_EXC:
4609 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4610 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
4611 sparc_set_template (code, sparc_o0);
4612 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_REL, (gpointer)offset);
4613 sparc_set_template (code, sparc_o1);
4614 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4615 patch_info->data.name = "mono_arch_throw_exception_by_name";
4616 patch_info->ip.i = (guint8*)code - cfg->native_code;
4625 cfg->code_len = (guint8*)code - cfg->native_code;
4627 g_assert (cfg->code_len < cfg->code_size);
4631 gboolean lmf_addr_key_inited = FALSE;
4633 #ifdef MONO_SPARC_THR_TLS
4634 thread_key_t lmf_addr_key;
4636 pthread_key_t lmf_addr_key;
4640 mono_arch_get_lmf_addr (void)
4642 /* This is perf critical so we bypass the IO layer */
4643 /* The thr_... functions seem to be somewhat faster */
4644 #ifdef MONO_SPARC_THR_TLS
4646 thr_getspecific (lmf_addr_key, &res);
4649 return pthread_getspecific (lmf_addr_key);
4654 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4656 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4658 struct sigaltstack sa;
4663 printf ("SIGALT!\n");
4664 /* Setup an alternate signal stack */
4665 tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
4666 tls->signal_stack_size = SIGNAL_STACK_SIZE;
4669 sa.ss_sp = tls->signal_stack;
4670 sa.ss_size = SIGNAL_STACK_SIZE;
4672 g_assert (sigaltstack (&sa, NULL) == 0);
4674 sigstk.ss_sp = tls->signal_stack;
4675 sigstk.ss_size = SIGNAL_STACK_SIZE;
4676 sigstk.ss_flags = 0;
4677 g_assert (sigaltstack (&sigstk, NULL) == 0);
4681 if (!lmf_addr_key_inited) {
4684 lmf_addr_key_inited = TRUE;
4686 #ifdef MONO_SPARC_THR_TLS
4687 res = thr_keycreate (&lmf_addr_key, NULL);
4689 res = pthread_key_create (&lmf_addr_key, NULL);
4691 g_assert (res == 0);
4695 #ifdef MONO_SPARC_THR_TLS
4696 thr_setspecific (lmf_addr_key, &tls->lmf);
4698 pthread_setspecific (lmf_addr_key, &tls->lmf);
4703 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4708 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4710 int this_out_reg = sparc_o0;
4715 MONO_INST_NEW (cfg, ins, OP_SETREG);
4716 ins->sreg1 = vt_reg;
4717 ins->dreg = sparc_o0;
4718 mono_bblock_add_inst (cfg->cbb, ins);
4719 this_out_reg = sparc_o1;
4721 /* Set the 'struct/union return pointer' location on the stack */
4722 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4726 /* add the this argument */
4727 if (this_reg != -1) {
4729 MONO_INST_NEW (cfg, this, OP_SETREG);
4730 this->type = this_type;
4731 this->sreg1 = this_reg;
4732 this->dreg = this_out_reg;
4733 mono_bblock_add_inst (cfg->cbb, this);
4739 mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4745 * mono_arch_get_argument_info:
4746 * @csig: a method signature
4747 * @param_count: the number of parameters to consider
4748 * @arg_info: an array to store the result infos
4750 * Gathers information on parameters such as size, alignment and
4751 * padding. arg_info should be large enought to hold param_count + 1 entries.
4753 * Returns the size of the activation frame.
4756 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4762 cinfo = get_call_info (csig, FALSE);
4764 if (csig->hasthis) {
4765 ainfo = &cinfo->args [0];
4766 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4769 for (k = 0; k < param_count; k++) {
4770 ainfo = &cinfo->args [k + csig->hasthis];
4772 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4773 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4782 mono_arch_print_tree (MonoInst *tree, int arity)
4787 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4792 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)