2 * mini-sparc.c: Sparc backend for the Mono code generator
5 * Paolo Molaro (lupus@ximian.com)
6 * Dietmar Maurer (dietmar@ximian.com)
9 * Christopher Taylor (ct@gentoo.org)
10 * Mark Crichton (crichton@gimp.org)
11 * Zoltan Varga (vargaz@freemail.hu)
13 * (C) 2003 Ximian, Inc.
21 #include <sys/systeminfo.h>
25 #include <mono/metadata/appdomain.h>
26 #include <mono/metadata/debug-helpers.h>
27 #include <mono/metadata/tokentype.h>
28 #include <mono/utils/mono-math.h>
30 #include "mini-sparc.h"
33 #include "cpu-sparc.h"
36 * Sparc V9 means two things:
37 * - the instruction set
40 * V9 instructions are only usable if the underlying processor is 64 bit. Most Sparc
41 * processors in use are 64 bit processors. The V9 ABI is only usable if the
42 * mono executable is a 64 bit executable. So it would make sense to use the 64 bit
43 * instructions without using the 64 bit ABI.
48 * - %i0..%i<n> hold the incoming arguments, these are never written by JITted
49 * code. Unused input registers are used for global register allocation.
50 * - %l0..%l7 is used for local register allocation
51 * - %o0..%o6 is used for outgoing arguments
52 * - %o7 and %g1 is used as scratch registers in opcodes
53 * - all floating point registers are used for local register allocation except %f0.
54 * Only double precision registers are used.
56 * - fp registers %d0..%d30 are used for parameter passing, and %d32..%d62 are
57 * used for local allocation.
62 * - doubles and longs must be stored in dword aligned locations
66 * The following things are not implemented or do not work:
67 * - some fp arithmetic corner cases
68 * The following tests in mono/mini are expected to fail:
69 * - test_0_simple_double_casts
70 * This test casts (guint64)-1 to double and then back to guint64 again.
71 * Under x86, it returns 0, while under sparc it returns -1.
73 * In addition to this, the runtime requires the trunc function, or its
74 * solaris counterpart, aintl, to do some double->int conversions. If this
75 * function is not available, it is emulated somewhat, but the results can be
81 * - optimize sparc_set according to the memory model
82 * - when non-AOT compiling, compute patch targets immediately so we don't
83 * have to emit the 6 byte template.
85 * - struct arguments/returns
90 * - sparc_call_simple can't be used in a lot of places since the displacement
91 * might not fit into an imm30.
92 * - g1 can't be used in a lot of places since it is used as a scratch reg in
94 * - sparc_f0 can't be used as a scratch register on V9
95 * - the %d34..%d62 fp registers are encoded as: %dx = %f(x - 32 + 1), ie.
97 * - ldind.i4/u4 needs to sign extend/clear out upper word -> slows things down
98 * - ins->dreg can't be used as a scatch register in r4 opcodes since it might
99 * be a double precision register which has no single precision part.
100 * - passing/returning structs is hard to implement, because:
101 * - the spec is very hard to understand
102 * - it requires knowledge about the fields of structure, needs to handle
103 * nested structures etc.
107 * Possible optimizations:
108 * - delay slot scheduling
109 * - allocate large constants to registers
110 * - use %o registers for local allocation
111 * - implement unwinding through native frames
112 * - add more mul/div/rem optimizations
116 #define MONO_SPARC_THR_TLS 1
120 * There was a 64 bit bug in glib-2.2: g_bit_nth_msf (0, -1) would return 32,
121 * causing infinite loops in dominator computation. So glib-2.4 is required.
124 #if GLIB_MAJOR_VERSION == 2 && GLIB_MINOR_VERSION < 4
125 #error "glib 2.4 or later is required for 64 bit mode."
129 #define NOT_IMPLEMENTED do { g_assert_not_reached (); } while (0)
131 #define ALIGN_TO(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
133 #define SIGNAL_STACK_SIZE (64 * 1024)
135 #define STACK_BIAS MONO_SPARC_STACK_BIAS
139 /* %g1 is used by sparc_set */
140 #define GP_SCRATCH_REG sparc_g4
141 /* %f0 is used for parameter passing */
142 #define FP_SCRATCH_REG sparc_f30
143 #define ARGS_OFFSET (STACK_BIAS + 128)
147 #define FP_SCRATCH_REG sparc_f0
148 #define ARGS_OFFSET 68
149 #define GP_SCRATCH_REG sparc_g1
153 /* Whenever the CPU supports v9 instructions */
154 static gboolean sparcv9 = FALSE;
156 /* Whenever this is a 64bit executable */
158 static gboolean v64 = TRUE;
160 static gboolean v64 = FALSE;
163 static gpointer mono_arch_get_lmf_addr (void);
166 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar);
169 mono_arch_regname (int reg) {
170 static const char * rnames[] = {
171 "sparc_g0", "sparc_g1", "sparc_g2", "sparc_g3", "sparc_g4",
172 "sparc_g5", "sparc_g6", "sparc_g7", "sparc_o0", "sparc_o1",
173 "sparc_o2", "sparc_o3", "sparc_o4", "sparc_o5", "sparc_sp",
174 "sparc_call", "sparc_l0", "sparc_l1", "sparc_l2", "sparc_l3",
175 "sparc_l4", "sparc_l5", "sparc_l6", "sparc_l7", "sparc_i0",
176 "sparc_i1", "sparc_i2", "sparc_i3", "sparc_i4", "sparc_i5",
177 "sparc_fp", "sparc_retadr"
179 if (reg >= 0 && reg < 32)
185 * Initialize the cpu to execute managed code.
188 mono_arch_cpu_init (void)
191 /* make sure sparcv9 is initialized for embedded use */
192 mono_arch_cpu_optimizazions(&dummy);
196 * This function returns the optimizations supported on this cpu.
199 mono_arch_cpu_optimizazions (guint32 *exclude_mask)
207 if (!sysinfo (SI_ISALIST, buf, 1024))
208 g_assert_not_reached ();
210 /* From glibc. If the getpagesize is 8192, we're on sparc64, which
211 * (in)directly implies that we're a v9 or better.
212 * Improvements to this are greatly accepted...
213 * Also, we don't differentiate between v7 and v8. I sense SIGILL
214 * sniffing in my future.
216 if (getpagesize() == 8192)
217 strcpy (buf, "sparcv9");
219 strcpy (buf, "sparcv8");
223 * On some processors, the cmov instructions are even slower than the
226 if (strstr (buf, "sparcv9")) {
227 opts |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
231 *exclude_mask |= MONO_OPT_CMOV | MONO_OPT_FCMOV;
237 mono_sparc_break (void)
242 #define flushi(addr) __asm__ __volatile__ ("iflush %0"::"r"(addr):"memory")
243 #else /* assume Sun's compiler */
244 static void flushi(void *addr)
251 void sync_instruction_memory(caddr_t addr, int len);
255 mono_arch_flush_icache (guint8 *code, gint size)
258 /* Hopefully this is optimized based on the actual CPU */
259 sync_instruction_memory (code, size);
261 guint64 *p = (guint64*)code;
262 guint64 *end = (guint64*)(code + ((size + 8) /8));
265 * FIXME: Flushing code in dword chunks in _slow_.
269 __asm__ __volatile__ ("iflush %0"::"r"(p++));
279 * Flush all register windows to memory. Every register window is saved to
280 * a 16 word area on the stack pointed to by its %sp register.
283 mono_sparc_flushw (void)
285 static guint32 start [64];
286 static int inited = 0;
288 static void (*flushw) (void);
293 sparc_save_imm (code, sparc_sp, -160, sparc_sp);
296 sparc_restore_simple (code);
298 g_assert ((code - start) < 64);
300 flushw = (gpointer)start;
309 mono_arch_flush_register_windows (void)
311 mono_sparc_flushw ();
315 mono_arch_is_inst_imm (gint64 imm)
317 return sparc_is_imm13 (imm);
321 mono_sparc_is_v9 (void) {
326 mono_sparc_is_sparc64 (void) {
338 ArgInFloatReg, /* V9 only */
339 ArgInDoubleReg /* V9 only */
344 /* This needs to be offset by %i0 or %o0 depending on caller/callee */
347 guint32 vt_offset; /* for valuetypes */
365 add_general (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean pair)
367 ainfo->offset = *stack_size;
370 if (*gr >= PARAM_REGS) {
371 ainfo->storage = ArgOnStack;
374 ainfo->storage = ArgInIReg;
379 /* Allways reserve stack space for parameters passed in registers */
380 (*stack_size) += sizeof (gpointer);
383 if (*gr < PARAM_REGS - 1) {
384 /* A pair of registers */
385 ainfo->storage = ArgInIRegPair;
389 else if (*gr >= PARAM_REGS) {
390 /* A pair of stack locations */
391 ainfo->storage = ArgOnStackPair;
394 ainfo->storage = ArgInSplitRegStack;
399 (*stack_size) += 2 * sizeof (gpointer);
405 #define FLOAT_PARAM_REGS 32
408 add_float (guint32 *gr, guint32 *stack_size, ArgInfo *ainfo, gboolean single)
410 ainfo->offset = *stack_size;
413 if (*gr >= FLOAT_PARAM_REGS) {
414 ainfo->storage = ArgOnStack;
417 /* A single is passed in an even numbered fp register */
418 ainfo->storage = ArgInFloatReg;
419 ainfo->reg = *gr + 1;
424 if (*gr < FLOAT_PARAM_REGS) {
425 /* A double register */
426 ainfo->storage = ArgInDoubleReg;
431 ainfo->storage = ArgOnStack;
435 (*stack_size) += sizeof (gpointer);
443 * Obtain information about a call according to the calling convention.
444 * For V8, see the "System V ABI, Sparc Processor Supplement" Sparc V8 version
445 * document for more information.
446 * For V9, see the "Low Level System Information (64-bit psABI)" chapter in
447 * the 'Sparc Compliance Definition 2.4' document.
450 get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
453 int n = sig->hasthis + sig->param_count;
454 guint32 stack_size = 0;
457 cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
463 if (MONO_TYPE_ISSTRUCT ((sig->ret))) {
464 /* The address of the return value is passed in %o0 */
465 add_general (&gr, &stack_size, &cinfo->ret, FALSE);
466 cinfo->ret.reg += sparc_i0;
472 add_general (&gr, &stack_size, cinfo->args + 0, FALSE);
474 if ((sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
477 /* Emit the signature cookie just before the implicit arguments */
478 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
481 for (i = 0; i < sig->param_count; ++i) {
482 ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
484 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
487 /* Emit the signature cookie just before the implicit arguments */
488 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
491 DEBUG(printf("param %d: ", i));
492 if (sig->params [i]->byref) {
493 DEBUG(printf("byref\n"));
495 add_general (&gr, &stack_size, ainfo, FALSE);
498 switch (mono_type_get_underlying_type (sig->params [i])->type) {
499 case MONO_TYPE_BOOLEAN:
502 add_general (&gr, &stack_size, ainfo, FALSE);
503 /* the value is in the ls byte */
504 ainfo->offset += sizeof (gpointer) - 1;
509 add_general (&gr, &stack_size, ainfo, FALSE);
510 /* the value is in the ls word */
511 ainfo->offset += sizeof (gpointer) - 2;
515 add_general (&gr, &stack_size, ainfo, FALSE);
516 /* the value is in the ls dword */
517 ainfo->offset += sizeof (gpointer) - 4;
522 case MONO_TYPE_CLASS:
523 case MONO_TYPE_OBJECT:
524 case MONO_TYPE_STRING:
525 case MONO_TYPE_SZARRAY:
526 case MONO_TYPE_ARRAY:
527 add_general (&gr, &stack_size, ainfo, FALSE);
529 case MONO_TYPE_VALUETYPE:
534 add_general (&gr, &stack_size, ainfo, FALSE);
536 case MONO_TYPE_TYPEDBYREF:
537 add_general (&gr, &stack_size, ainfo, FALSE);
542 add_general (&gr, &stack_size, ainfo, FALSE);
544 add_general (&gr, &stack_size, ainfo, TRUE);
549 add_float (&fr, &stack_size, ainfo, TRUE);
552 /* single precision values are passed in integer registers */
553 add_general (&gr, &stack_size, ainfo, FALSE);
558 add_float (&fr, &stack_size, ainfo, FALSE);
561 /* double precision values are passed in a pair of registers */
562 add_general (&gr, &stack_size, ainfo, TRUE);
566 g_assert_not_reached ();
570 if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n > 0) && (sig->sentinelpos == sig->param_count)) {
573 /* Emit the signature cookie just before the implicit arguments */
574 add_general (&gr, &stack_size, &cinfo->sig_cookie, FALSE);
579 switch (mono_type_get_underlying_type (sig->ret)->type) {
580 case MONO_TYPE_BOOLEAN:
591 case MONO_TYPE_CLASS:
592 case MONO_TYPE_OBJECT:
593 case MONO_TYPE_SZARRAY:
594 case MONO_TYPE_ARRAY:
595 case MONO_TYPE_STRING:
596 cinfo->ret.storage = ArgInIReg;
597 cinfo->ret.reg = sparc_i0;
604 cinfo->ret.storage = ArgInIReg;
605 cinfo->ret.reg = sparc_i0;
609 cinfo->ret.storage = ArgInIRegPair;
610 cinfo->ret.reg = sparc_i0;
617 cinfo->ret.storage = ArgInFReg;
618 cinfo->ret.reg = sparc_f0;
620 case MONO_TYPE_VALUETYPE:
629 cinfo->ret.storage = ArgOnStack;
631 case MONO_TYPE_TYPEDBYREF:
634 /* Same as a valuetype with size 24 */
641 cinfo->ret.storage = ArgOnStack;
646 g_error ("Can't handle as return value 0x%x", sig->ret->type);
650 cinfo->stack_usage = stack_size;
651 cinfo->reg_usage = gr;
656 is_regsize_var (MonoType *t) {
659 switch (mono_type_get_underlying_type (t)->type) {
660 case MONO_TYPE_BOOLEAN:
671 case MONO_TYPE_OBJECT:
672 case MONO_TYPE_STRING:
673 case MONO_TYPE_CLASS:
674 case MONO_TYPE_SZARRAY:
675 case MONO_TYPE_ARRAY:
677 case MONO_TYPE_VALUETYPE:
689 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
695 * FIXME: If an argument is allocated to a register, then load it from the
696 * stack in the prolog.
699 for (i = 0; i < cfg->num_varinfo; i++) {
700 MonoInst *ins = cfg->varinfo [i];
701 MonoMethodVar *vmv = MONO_VARINFO (cfg, i);
704 if (vmv->range.first_use.abs_pos >= vmv->range.last_use.abs_pos)
707 /* FIXME: Make arguments on stack allocateable to registers */
708 if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (ins->opcode == OP_REGVAR) || (ins->opcode == OP_ARG))
711 if (is_regsize_var (ins->inst_vtype)) {
712 g_assert (MONO_VARINFO (cfg, i)->reg == -1);
713 g_assert (i == vmv->idx);
715 vars = mono_varlist_insert_sorted (cfg, vars, vmv, FALSE);
723 mono_arch_get_global_int_regs (MonoCompile *cfg)
727 MonoMethodSignature *sig;
730 sig = cfg->method->signature;
732 cinfo = get_call_info (sig, FALSE);
734 /* Use unused input registers */
735 for (i = cinfo->reg_usage; i < 6; ++i)
736 regs = g_list_prepend (regs, GUINT_TO_POINTER (sparc_i0 + i));
738 /* Use %l0..%l3 as global registers */
739 for (i = sparc_l0; i < sparc_l4; ++i)
740 regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
748 * mono_arch_regalloc_cost:
750 * Return the cost, in number of memory references, of the action of
751 * allocating the variable VMV into a register during global register
755 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
761 * Set var information according to the calling convention. sparc version.
762 * The locals var stuff should most likely be split in another method.
765 mono_arch_allocate_vars (MonoCompile *m)
767 MonoMethodSignature *sig;
768 MonoMethodHeader *header;
770 int i, offset, size, align, curinst;
773 header = mono_method_get_header (m->method);
775 sig = m->method->signature;
777 cinfo = get_call_info (sig, FALSE);
779 if (sig->ret->type != MONO_TYPE_VOID) {
780 switch (cinfo->ret.storage) {
784 m->ret->opcode = OP_REGVAR;
785 m->ret->inst_c0 = cinfo->ret.reg;
789 g_assert_not_reached ();
792 m->ret->opcode = OP_REGOFFSET;
793 m->ret->inst_basereg = sparc_fp;
794 m->ret->inst_offset = 64;
800 m->ret->dreg = m->ret->inst_c0;
804 * We use the ABI calling conventions for managed code as well.
805 * Exception: valuetypes are never returned in registers on V9.
806 * FIXME: Use something more optimized.
809 /* Locals are allocated backwards from %fp */
810 m->frame_reg = sparc_fp;
814 * Reserve a stack slot for holding information used during exception
817 if (header->num_clauses)
818 offset += sizeof (gpointer) * 2;
820 if (m->method->save_lmf) {
821 offset += sizeof (MonoLMF);
822 m->arch.lmf_offset = offset;
825 curinst = m->locals_start;
826 for (i = curinst; i < m->num_varinfo; ++i) {
827 inst = m->varinfo [i];
829 if (inst->opcode == OP_REGVAR) {
830 //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
834 if (inst->flags & MONO_INST_IS_DEAD)
837 /* inst->unused indicates native sized value types, this is used by the
838 * pinvoke wrappers when they call functions returning structure */
839 if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
840 size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
842 size = mono_type_stack_size (inst->inst_vtype, &align);
845 * This is needed since structures containing doubles must be doubleword
847 * FIXME: Do this only if needed.
849 if (MONO_TYPE_ISSTRUCT (inst->inst_vtype))
853 * variables are accessed as negative offsets from %fp, so increase
854 * the offset before assigning it to a variable
859 offset &= ~(align - 1);
860 inst->opcode = OP_REGOFFSET;
861 inst->inst_basereg = sparc_fp;
862 inst->inst_offset = STACK_BIAS + -offset;
864 //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
867 if (sig->call_convention == MONO_CALL_VARARG) {
868 m->sig_cookie = cinfo->sig_cookie.offset + ARGS_OFFSET;
871 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
872 inst = m->varinfo [i];
873 if (inst->opcode != OP_REGVAR) {
874 ArgInfo *ainfo = &cinfo->args [i];
875 gboolean inreg = TRUE;
879 if (sig->hasthis && (i == 0))
880 arg_type = &mono_defaults.object_class->byval_arg;
882 arg_type = sig->params [i - sig->hasthis];
885 if (!arg_type->byref && ((arg_type->type == MONO_TYPE_R4)
886 || (arg_type->type == MONO_TYPE_R8)))
888 * Since float arguments are passed in integer registers, we need to
889 * save them to the stack in the prolog.
894 /* FIXME: Allocate volatile arguments to registers */
895 if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
898 if (MONO_TYPE_ISSTRUCT (arg_type))
899 /* FIXME: this isn't needed */
902 inst->opcode = OP_REGOFFSET;
905 storage = ArgOnStack;
907 storage = ainfo->storage;
912 inst->opcode = OP_REGVAR;
913 inst->dreg = sparc_i0 + ainfo->reg;
918 * Since float regs are volatile, we save the arguments to
919 * the stack in the prolog.
920 * FIXME: Avoid this if the method contains no calls.
924 case ArgInSplitRegStack:
925 /* Split arguments are saved to the stack in the prolog */
926 inst->opcode = OP_REGOFFSET;
927 /* in parent frame */
928 inst->inst_basereg = sparc_fp;
929 inst->inst_offset = ainfo->offset + ARGS_OFFSET;
931 if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
933 * It is very hard to load doubles from non-doubleword aligned
934 * memory locations. So if the offset is misaligned, we copy the
935 * argument to a stack location in the prolog.
937 if ((inst->inst_offset - STACK_BIAS) % 8) {
938 inst->inst_basereg = sparc_fp;
942 offset &= ~(align - 1);
943 inst->inst_offset = STACK_BIAS + -offset;
952 if (MONO_TYPE_ISSTRUCT (arg_type)) {
953 /* Add a level of indirection */
955 * It would be easier to add OP_LDIND_I here, but ldind_i instructions
956 * are destructively modified in a lot of places in inssel.brg.
959 MONO_INST_NEW (m, indir, 0);
961 inst->opcode = OP_SPARC_INARG_VT;
962 inst->inst_left = indir;
968 * spillvars are stored between the normal locals and the storage reserved
972 m->stack_offset = offset;
974 /* Add a properly aligned dword for use by int<->float conversion opcodes */
976 mono_spillvar_offset_float (m, 0);
982 * take the arguments and generate the arch-specific
983 * instructions to properly call the function in call.
984 * This includes pushing, moving arguments to the right register
988 mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
990 MonoMethodSignature *sig;
994 guint32 extra_space = 0;
996 sig = call->signature;
997 n = sig->param_count + sig->hasthis;
999 cinfo = get_call_info (sig, sig->pinvoke);
1001 for (i = 0; i < n; ++i) {
1002 ainfo = cinfo->args + i;
1004 if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
1005 /* Emit the signature cookie just before the first implicit argument */
1007 MonoMethodSignature *tmp_sig;
1010 * mono_ArgIterator_Setup assumes the signature cookie is
1011 * passed first and all the arguments which were before it are
1012 * passed on the stack after the signature. So compensate by
1013 * passing a different signature.
1015 tmp_sig = mono_metadata_signature_dup (call->signature);
1016 tmp_sig->param_count -= call->signature->sentinelpos;
1017 tmp_sig->sentinelpos = 0;
1018 memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
1020 /* FIXME: Add support for signature tokens to AOT */
1021 cfg->disable_aot = TRUE;
1022 /* We allways pass the signature on the stack for simplicity */
1023 MONO_INST_NEW (cfg, arg, OP_SPARC_OUTARG_MEM);
1024 arg->inst_basereg = sparc_sp;
1025 arg->inst_imm = ARGS_OFFSET + cinfo->sig_cookie.offset;
1026 MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
1027 sig_arg->inst_p0 = tmp_sig;
1028 arg->inst_left = sig_arg;
1029 arg->type = STACK_PTR;
1030 /* prepend, so they get reversed */
1031 arg->next = call->out_args;
1032 call->out_args = arg;
1035 if (is_virtual && i == 0) {
1036 /* the argument will be attached to the call instruction */
1037 in = call->args [i];
1039 MONO_INST_NEW (cfg, arg, OP_OUTARG);
1040 in = call->args [i];
1041 arg->cil_code = in->cil_code;
1042 arg->inst_left = in;
1043 arg->type = in->type;
1044 /* prepend, we'll need to reverse them later */
1045 arg->next = call->out_args;
1046 call->out_args = arg;
1048 if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
1051 guint32 offset, pad;
1059 if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
1060 size = sizeof (MonoTypedRef);
1061 align = sizeof (gpointer);
1065 size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
1067 size = mono_type_stack_size (&in->klass->byval_arg, &align);
1070 * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
1071 * use the normal OUTARG opcodes to pass the address of the location to
1074 MONO_INST_NEW (cfg, inst, OP_OUTARG_VT);
1075 inst->inst_left = in;
1077 /* The first 6 argument locations are reserved */
1078 if (cinfo->stack_usage < 6 * sizeof (gpointer))
1079 cinfo->stack_usage = 6 * sizeof (gpointer);
1081 offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
1082 pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
1084 inst->inst_c1 = STACK_BIAS + offset;
1085 inst->unused = size;
1086 arg->inst_left = inst;
1088 cinfo->stack_usage += size;
1089 cinfo->stack_usage += pad;
1092 switch (ainfo->storage) {
1096 if (ainfo->storage == ArgInIRegPair)
1097 arg->opcode = OP_SPARC_OUTARG_REGPAIR;
1098 arg->unused = sparc_o0 + ainfo->reg;
1099 call->used_iregs |= 1 << ainfo->reg;
1101 if ((i >= sig->hasthis) && (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8)) {
1103 * The OUTARG (freg) implementation needs an extra dword to store
1104 * the temporary value.
1110 arg->opcode = OP_SPARC_OUTARG_MEM;
1112 case ArgOnStackPair:
1113 arg->opcode = OP_SPARC_OUTARG_MEMPAIR;
1115 case ArgInSplitRegStack:
1116 arg->opcode = OP_SPARC_OUTARG_SPLIT_REG_STACK;
1117 arg->unused = sparc_o0 + ainfo->reg;
1118 call->used_iregs |= 1 << ainfo->reg;
1121 arg->opcode = OP_SPARC_OUTARG_FLOAT_REG;
1122 arg->unused = sparc_f0 + ainfo->reg;
1124 case ArgInDoubleReg:
1125 arg->opcode = OP_SPARC_OUTARG_DOUBLE_REG;
1126 arg->unused = sparc_f0 + ainfo->reg;
1132 arg->inst_basereg = sparc_sp;
1133 arg->inst_imm = ARGS_OFFSET + ainfo->offset;
1138 * Reverse the call->out_args list.
1141 MonoInst *prev = NULL, *list = call->out_args, *next;
1148 call->out_args = prev;
1150 call->stack_usage = cinfo->stack_usage + extra_space;
1151 cfg->param_area = MAX (cfg->param_area, call->stack_usage);
1152 cfg->flags |= MONO_CFG_HAS_CALLS;
1158 /* Map opcode to the sparc condition codes */
1159 static inline SparcCond
1160 opcode_to_sparc_cond (int opcode)
1182 case OP_COND_EXC_EQ:
1185 case OP_COND_EXC_NE_UN:
1192 case OP_COND_EXC_LT:
1198 case OP_COND_EXC_LT_UN:
1204 case OP_COND_EXC_GT:
1210 case OP_COND_EXC_GT_UN:
1214 case OP_COND_EXC_GE:
1218 case OP_COND_EXC_GE_UN:
1222 case OP_COND_EXC_LE:
1226 case OP_COND_EXC_LE_UN:
1228 case OP_COND_EXC_OV:
1229 case OP_COND_EXC_IOV:
1232 case OP_COND_EXC_IC:
1234 case OP_COND_EXC_NO:
1235 case OP_COND_EXC_NC:
1238 g_assert_not_reached ();
1243 #define COMPUTE_DISP(ins) \
1244 if (ins->flags & MONO_INST_BRLABEL) { \
1245 if (ins->inst_i0->inst_c0) \
1246 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2; \
1249 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
1252 if (ins->inst_true_bb->native_offset) \
1253 disp = (ins->inst_true_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2; \
1256 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
1261 #define DEFAULT_ICC sparc_xcc_short
1263 #define DEFAULT_ICC sparc_icc_short
1267 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) \
1271 COMPUTE_DISP(ins); \
1272 predict = (disp != 0) ? 1 : 0; \
1273 g_assert (sparc_is_imm19 (disp)); \
1274 sparc_branchp (code, (annul), cond, icc, (predict), disp); \
1275 if (filldelay) sparc_nop (code); \
1277 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_ICC ((ins), (cond), (annul), (filldelay), (sparc_xcc_short))
1278 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) \
1282 COMPUTE_DISP(ins); \
1283 predict = (disp != 0) ? 1 : 0; \
1284 g_assert (sparc_is_imm19 (disp)); \
1285 sparc_fbranch (code, (annul), cond, disp); \
1286 if (filldelay) sparc_nop (code); \
1289 #define EMIT_COND_BRANCH_ICC(ins,cond,annul,filldelay,icc) g_assert_not_reached ()
1290 #define EMIT_COND_BRANCH_GENERAL(ins,bop,cond,annul,filldelay) \
1293 COMPUTE_DISP(ins); \
1294 g_assert (sparc_is_imm22 (disp)); \
1295 sparc_ ## bop (code, (annul), cond, disp); \
1296 if (filldelay) sparc_nop (code); \
1298 #define EMIT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),branch,(cond),annul,filldelay)
1299 #define EMIT_FLOAT_COND_BRANCH(ins,cond,annul,filldelay) EMIT_COND_BRANCH_GENERAL((ins),fbranch,(cond),annul,filldelay)
1302 #define EMIT_COND_BRANCH_PREDICTED(ins,cond,annul,filldelay) \
1306 COMPUTE_DISP(ins); \
1307 predict = (disp != 0) ? 1 : 0; \
1308 g_assert (sparc_is_imm19 (disp)); \
1309 sparc_branchp (code, (annul), (cond), DEFAULT_ICC, (predict), disp); \
1310 if (filldelay) sparc_nop (code); \
1313 #define EMIT_COND_BRANCH_BPR(ins,bop,predict,annul,filldelay) \
1316 COMPUTE_DISP(ins); \
1317 g_assert (sparc_is_imm22 (disp)); \
1318 sparc_ ## bop (code, (annul), (predict), ins->sreg1, disp); \
1319 if (filldelay) sparc_nop (code); \
1322 /* emit an exception if condition is fail */
1324 * We put the exception throwing code out-of-line, at the end of the method
1326 #define EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,filldelay,icc) do { \
1327 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1328 MONO_PATCH_INFO_EXC, sexc_name); \
1330 sparc_branchp (code, 0, (cond), (icc), 0, 0); \
1333 sparc_branch (code, 0, cond, 0); \
1335 if (filldelay) sparc_nop (code); \
1338 #define EMIT_COND_SYSTEM_EXCEPTION(ins,cond,sexc_name) EMIT_COND_SYSTEM_EXCEPTION_GENERAL(ins,cond,sexc_name,TRUE,DEFAULT_ICC)
1340 #define EMIT_COND_SYSTEM_EXCEPTION_BPR(ins,bop,sexc_name) do { \
1341 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code, \
1342 MONO_PATCH_INFO_EXC, sexc_name); \
1343 sparc_ ## bop (code, FALSE, FALSE, ins->sreg1, 0); \
1347 #define EMIT_ALU_IMM(ins,op,setcc) do { \
1348 if (sparc_is_imm13 ((ins)->inst_imm)) \
1349 sparc_ ## op ## _imm (code, (setcc), (ins)->sreg1, ins->inst_imm, (ins)->dreg); \
1351 sparc_set (code, ins->inst_imm, sparc_o7); \
1352 sparc_ ## op (code, (setcc), (ins)->sreg1, sparc_o7, (ins)->dreg); \
1356 #define EMIT_LOAD_MEMBASE(ins,op) do { \
1357 if (sparc_is_imm13 (ins->inst_offset)) \
1358 sparc_ ## op ## _imm (code, ins->inst_basereg, ins->inst_offset, ins->dreg); \
1360 sparc_set (code, ins->inst_offset, sparc_o7); \
1361 sparc_ ## op (code, ins->inst_basereg, sparc_o7, ins->dreg); \
1366 #define EMIT_STORE_MEMBASE_IMM(ins,op) do { \
1368 if (ins->inst_imm == 0) \
1371 sparc_set (code, ins->inst_imm, sparc_o7); \
1374 if (!sparc_is_imm13 (ins->inst_offset)) { \
1375 sparc_set (code, ins->inst_offset, GP_SCRATCH_REG); \
1376 sparc_ ## op (code, sreg, ins->inst_destbasereg, GP_SCRATCH_REG); \
1379 sparc_ ## op ## _imm (code, sreg, ins->inst_destbasereg, ins->inst_offset); \
1382 #define EMIT_STORE_MEMBASE_REG(ins,op) do { \
1383 if (!sparc_is_imm13 (ins->inst_offset)) { \
1384 sparc_set (code, ins->inst_offset, sparc_o7); \
1385 sparc_ ## op (code, ins->sreg1, ins->inst_destbasereg, sparc_o7); \
1388 sparc_ ## op ## _imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset); \
1391 #define EMIT_CALL() do { \
1393 sparc_set_template (code, sparc_o7); \
1394 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7); \
1397 sparc_call_simple (code, 0); \
1403 * A call template is 7 instructions long, so we want to avoid it if possible.
1406 emit_call (MonoCompile *cfg, guint32 *code, guint32 patch_type, gconstpointer data)
1410 /* FIXME: This only works if the target method is already compiled */
1411 if (0 && v64 && !cfg->compile_aot) {
1412 MonoJumpInfo patch_info;
1414 patch_info.type = patch_type;
1415 patch_info.data.target = data;
1417 target = mono_resolve_patch_target (cfg->method, cfg->domain, NULL, &patch_info, FALSE);
1419 /* FIXME: Add optimizations if the target is close enough */
1420 sparc_set (code, target, sparc_o7);
1421 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_o7);
1425 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, patch_type, data);
1433 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
1435 MonoInst *ins, *last_ins = NULL;
1440 switch (ins->opcode) {
1442 /* remove unnecessary multiplication with 1 */
1443 if (ins->inst_imm == 1) {
1444 if (ins->dreg != ins->sreg1) {
1445 ins->opcode = OP_MOVE;
1447 last_ins->next = ins->next;
1454 case OP_LOAD_MEMBASE:
1455 case OP_LOADI4_MEMBASE:
1457 * OP_STORE_MEMBASE_REG reg, offset(basereg)
1458 * OP_LOAD_MEMBASE offset(basereg), reg
1460 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
1461 || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
1462 ins->inst_basereg == last_ins->inst_destbasereg &&
1463 ins->inst_offset == last_ins->inst_offset) {
1464 if (ins->dreg == last_ins->sreg1) {
1465 last_ins->next = ins->next;
1469 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1470 ins->opcode = OP_MOVE;
1471 ins->sreg1 = last_ins->sreg1;
1475 * Note: reg1 must be different from the basereg in the second load
1476 * OP_LOAD_MEMBASE offset(basereg), reg1
1477 * OP_LOAD_MEMBASE offset(basereg), reg2
1479 * OP_LOAD_MEMBASE offset(basereg), reg1
1480 * OP_MOVE reg1, reg2
1482 } if (last_ins && (last_ins->opcode == OP_LOADI4_MEMBASE
1483 || last_ins->opcode == OP_LOAD_MEMBASE) &&
1484 ins->inst_basereg != last_ins->dreg &&
1485 ins->inst_basereg == last_ins->inst_basereg &&
1486 ins->inst_offset == last_ins->inst_offset) {
1488 if (ins->dreg == last_ins->dreg) {
1489 last_ins->next = ins->next;
1493 ins->opcode = OP_MOVE;
1494 ins->sreg1 = last_ins->dreg;
1497 //g_assert_not_reached ();
1501 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1502 * OP_LOAD_MEMBASE offset(basereg), reg
1504 * OP_STORE_MEMBASE_IMM imm, offset(basereg)
1505 * OP_ICONST reg, imm
1507 } else if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM
1508 || last_ins->opcode == OP_STORE_MEMBASE_IMM) &&
1509 ins->inst_basereg == last_ins->inst_destbasereg &&
1510 ins->inst_offset == last_ins->inst_offset) {
1511 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1512 ins->opcode = OP_ICONST;
1513 ins->inst_c0 = last_ins->inst_imm;
1514 g_assert_not_reached (); // check this rule
1519 case OP_LOADU1_MEMBASE:
1520 case OP_LOADI1_MEMBASE:
1521 if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
1522 ins->inst_basereg == last_ins->inst_destbasereg &&
1523 ins->inst_offset == last_ins->inst_offset) {
1524 if (ins->dreg == last_ins->sreg1) {
1525 last_ins->next = ins->next;
1529 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1530 ins->opcode = OP_MOVE;
1531 ins->sreg1 = last_ins->sreg1;
1535 case OP_LOADU2_MEMBASE:
1536 case OP_LOADI2_MEMBASE:
1537 if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
1538 ins->inst_basereg == last_ins->inst_destbasereg &&
1539 ins->inst_offset == last_ins->inst_offset) {
1540 if (ins->dreg == last_ins->sreg1) {
1541 last_ins->next = ins->next;
1545 //static int c = 0; printf ("MATCHX %s %d\n", cfg->method->name,c++);
1546 ins->opcode = OP_MOVE;
1547 ins->sreg1 = last_ins->sreg1;
1551 case OP_STOREI4_MEMBASE_IMM:
1552 /* Convert pairs of 0 stores to a dword 0 store */
1553 /* Used when initializing temporaries */
1554 /* We know sparc_fp is dword aligned */
1555 if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_IMM) &&
1556 (ins->inst_destbasereg == last_ins->inst_destbasereg) &&
1557 (ins->inst_destbasereg == sparc_fp) &&
1558 (ins->inst_offset < 0) &&
1559 ((ins->inst_offset % 8) == 0) &&
1560 ((ins->inst_offset == last_ins->inst_offset - 4)) &&
1561 (ins->inst_imm == 0) &&
1562 (last_ins->inst_imm == 0)) {
1564 last_ins->opcode = OP_STOREI8_MEMBASE_IMM;
1565 last_ins->inst_offset = ins->inst_offset;
1566 last_ins->next = ins->next;
1578 case OP_COND_EXC_EQ:
1579 case OP_COND_EXC_GE:
1580 case OP_COND_EXC_GT:
1581 case OP_COND_EXC_LE:
1582 case OP_COND_EXC_LT:
1583 case OP_COND_EXC_NE_UN:
1585 * Convert compare with zero+branch to BRcc
1588 * This only works in 64 bit mode, since it examines all 64
1589 * bits of the register.
1590 * Only do this if the method is small since BPr only has a 16bit
1593 if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
1594 (last_ins->opcode == OP_COMPARE_IMM) &&
1595 (last_ins->inst_imm == 0)) {
1596 MonoInst *next = ins->next;
1597 switch (ins->opcode) {
1599 ins->opcode = OP_SPARC_BRZ;
1602 ins->opcode = OP_SPARC_BRNZ;
1605 ins->opcode = OP_SPARC_BRLZ;
1608 ins->opcode = OP_SPARC_BRGZ;
1611 ins->opcode = OP_SPARC_BRGEZ;
1614 ins->opcode = OP_SPARC_BRLEZ;
1616 case OP_COND_EXC_EQ:
1617 ins->opcode = OP_SPARC_COND_EXC_EQZ;
1619 case OP_COND_EXC_GE:
1620 ins->opcode = OP_SPARC_COND_EXC_GEZ;
1622 case OP_COND_EXC_GT:
1623 ins->opcode = OP_SPARC_COND_EXC_GTZ;
1625 case OP_COND_EXC_LE:
1626 ins->opcode = OP_SPARC_COND_EXC_LEZ;
1628 case OP_COND_EXC_LT:
1629 ins->opcode = OP_SPARC_COND_EXC_LTZ;
1631 case OP_COND_EXC_NE_UN:
1632 ins->opcode = OP_SPARC_COND_EXC_NEZ;
1635 g_assert_not_reached ();
1637 ins->sreg1 = last_ins->sreg1;
1639 last_ins->next = next;
1650 if (ins->dreg == ins->sreg1) {
1652 last_ins->next = ins->next;
1657 * OP_MOVE sreg, dreg
1658 * OP_MOVE dreg, sreg
1660 if (last_ins && last_ins->opcode == OP_MOVE &&
1661 ins->sreg1 == last_ins->dreg &&
1662 ins->dreg == last_ins->sreg1) {
1663 last_ins->next = ins->next;
1672 bb->last_ins = last_ins;
1675 /* Parameters used by the register allocator */
1677 /* Use %l4..%l7 as local registers */
1678 #define ARCH_CALLER_REGS (0xf0<<16)
1681 /* Use %d34..%d62 as the double precision floating point local registers */
1682 /* %d32 has the same encoding as %f1, so %d36%d38 == 0b1010 == 0xa */
1683 #define ARCH_CALLER_FREGS (0xaaaaaaa8)
1685 /* Use %f2..%f30 as the double precision floating point local registers */
1686 #define ARCH_CALLER_FREGS (0x55555554)
1690 #define DEBUG(a) if (cfg->verbose_level > 1) a
1692 #define reg_is_freeable(r) ((1 << (r)) & ARCH_CALLER_REGS)
1693 #define freg_is_freeable(r) (((1) << (r)) & ARCH_CALLER_FREGS)
1702 static const char*const * ins_spec = sparc_desc;
1704 static inline const char*
1705 get_ins_spec (int opcode)
1707 if (ins_spec [opcode])
1708 return ins_spec [opcode];
1710 return ins_spec [CEE_ADD];
1714 print_ins (int i, MonoInst *ins)
1716 const char *spec = get_ins_spec (ins->opcode);
1717 g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
1718 if (spec [MONO_INST_DEST]) {
1719 if (ins->dreg >= MONO_MAX_IREGS)
1720 g_print (" R%d <-", ins->dreg);
1722 if (spec [MONO_INST_DEST] == 'b')
1723 g_print (" [%s + 0x%lx] <-", mono_arch_regname (ins->dreg), (long)ins->inst_offset);
1725 g_print (" %s <-", mono_arch_regname (ins->dreg));
1727 if (spec [MONO_INST_SRC1]) {
1728 if (ins->sreg1 >= MONO_MAX_IREGS)
1729 g_print (" R%d", ins->sreg1);
1731 if (spec [MONO_INST_SRC1] == 'b')
1732 g_print (" [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset);
1734 g_print (" %s", mono_arch_regname (ins->sreg1));
1736 if (spec [MONO_INST_SRC2]) {
1737 if (ins->sreg2 >= MONO_MAX_IREGS)
1738 g_print (" R%d", ins->sreg2);
1740 g_print (" %s", mono_arch_regname (ins->sreg2));
1742 if (spec [MONO_INST_CLOB])
1743 g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
1748 print_regtrack (RegTrack *t, int num)
1754 for (i = 0; i < num; ++i) {
1757 if (i >= MONO_MAX_IREGS) {
1758 g_snprintf (buf, sizeof(buf), "R%d", i);
1761 r = mono_arch_regname (i);
1762 g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
1766 typedef struct InstList InstList;
1774 static inline InstList*
1775 inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
1777 InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
1786 #define STACK_OFFSETS_POSITIVE
1789 * returns the offset used by spillvar. It allocates a new
1790 * spill variable if necessary.
1793 mono_spillvar_offset (MonoCompile *cfg, int spillvar)
1795 MonoSpillInfo **si, *info;
1798 si = &cfg->spill_info;
1800 while (i <= spillvar) {
1803 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1805 cfg->stack_offset += sizeof (gpointer);
1806 info->offset = - cfg->stack_offset;
1810 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1816 g_assert_not_reached ();
1821 mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
1823 MonoSpillInfo **si, *info;
1826 si = &cfg->spill_info_float;
1828 while (i <= spillvar) {
1831 *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
1833 cfg->stack_offset += sizeof (double);
1834 cfg->stack_offset = ALIGN_TO (cfg->stack_offset, 8);
1835 info->offset = - cfg->stack_offset;
1839 return MONO_SPARC_STACK_BIAS + (*si)->offset;
1845 g_assert_not_reached ();
1850 * Force the spilling of the variable in the symbolic register 'reg'.
1852 G_GNUC_UNUSED static int
1853 get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg)
1858 sel = cfg->rs->iassign [reg];
1859 /*i = cfg->rs->isymbolic [sel];
1860 g_assert (i == reg);*/
1862 spill = ++cfg->spill_count;
1863 cfg->rs->iassign [i] = -spill - 1;
1864 mono_regstate_free_int (cfg->rs, sel);
1865 /* we need to create a spill var and insert a load to sel after the current instruction */
1866 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1868 load->inst_basereg = cfg->frame_reg;
1869 load->inst_offset = mono_spillvar_offset (cfg, spill);
1871 while (ins->next != item->prev->data)
1874 load->next = ins->next;
1876 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1877 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1878 g_assert (i == sel);
1884 get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1889 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1890 /* exclude the registers in the current instruction */
1891 if (reg != ins->sreg1 && (reg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg1] >= 0))) {
1892 if (ins->sreg1 >= MONO_MAX_IREGS)
1893 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg1]);
1895 regmask &= ~ (1 << ins->sreg1);
1896 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1898 if (reg != ins->sreg2 && (reg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_IREGS && cfg->rs->iassign [ins->sreg2] >= 0))) {
1899 if (ins->sreg2 >= MONO_MAX_IREGS)
1900 regmask &= ~ (1 << cfg->rs->iassign [ins->sreg2]);
1902 regmask &= ~ (1 << ins->sreg2);
1903 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1905 if (reg != ins->dreg && reg_is_freeable (ins->dreg)) {
1906 regmask &= ~ (1 << ins->dreg);
1907 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1910 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1911 g_assert (regmask); /* need at least a register we can free */
1913 /* we should track prev_use and spill the register that's farther */
1914 for (i = 0; i < MONO_MAX_IREGS; ++i) {
1915 if (regmask & (1 << i)) {
1917 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
1921 i = cfg->rs->isymbolic [sel];
1922 spill = ++cfg->spill_count;
1923 cfg->rs->iassign [i] = -spill - 1;
1924 mono_regstate_free_int (cfg->rs, sel);
1925 /* we need to create a spill var and insert a load to sel after the current instruction */
1926 MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
1928 load->inst_basereg = cfg->frame_reg;
1929 load->inst_offset = mono_spillvar_offset (cfg, spill);
1931 while (ins->next != item->prev->data)
1934 load->next = ins->next;
1936 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1937 i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
1938 g_assert (i == sel);
1944 get_float_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg)
1949 DEBUG (g_print ("start regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
1950 /* exclude the registers in the current instruction */
1951 if (reg != ins->sreg1 && (freg_is_freeable (ins->sreg1) || (ins->sreg1 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg1] >= 0))) {
1952 if (ins->sreg1 >= MONO_MAX_FREGS)
1953 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg1]);
1955 regmask &= ~ (1 << ins->sreg1);
1956 DEBUG (g_print ("excluding sreg1 %s\n", mono_arch_regname (ins->sreg1)));
1958 if (reg != ins->sreg2 && (freg_is_freeable (ins->sreg2) || (ins->sreg2 >= MONO_MAX_FREGS && cfg->rs->fassign [ins->sreg2] >= 0))) {
1959 if (ins->sreg2 >= MONO_MAX_FREGS)
1960 regmask &= ~ (1 << cfg->rs->fassign [ins->sreg2]);
1962 regmask &= ~ (1 << ins->sreg2);
1963 DEBUG (g_print ("excluding sreg2 %s %d\n", mono_arch_regname (ins->sreg2), ins->sreg2));
1965 if (reg != ins->dreg && freg_is_freeable (ins->dreg)) {
1966 regmask &= ~ (1 << ins->dreg);
1967 DEBUG (g_print ("excluding dreg %s\n", mono_arch_regname (ins->dreg)));
1970 DEBUG (g_print ("available regmask: 0x%08x\n", regmask));
1971 g_assert (regmask); /* need at least a register we can free */
1973 /* we should track prev_use and spill the register that's farther */
1974 for (i = 0; i < MONO_MAX_FREGS; ++i) {
1975 if (regmask & (1 << i)) {
1977 DEBUG (g_print ("selected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->fassign [sel]));
1981 i = cfg->rs->fsymbolic [sel];
1982 spill = ++cfg->spill_count;
1983 cfg->rs->fassign [i] = -spill - 1;
1984 mono_regstate_free_float(cfg->rs, sel);
1985 /* we need to create a spill var and insert a load to sel after the current instruction */
1986 MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
1988 load->inst_basereg = cfg->frame_reg;
1989 load->inst_offset = mono_spillvar_offset_float (cfg, spill);
1991 while (ins->next != item->prev->data)
1994 load->next = ins->next;
1996 DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%sp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_arch_regname (sel)));
1997 i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
1998 g_assert (i == sel);
2004 create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins)
2007 MONO_INST_NEW (cfg, copy, OP_MOVE);
2011 copy->next = ins->next;
2014 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
2018 G_GNUC_UNUSED static MonoInst*
2019 create_copy_ins_float (MonoCompile *cfg, int dest, int src, MonoInst *ins)
2022 MONO_INST_NEW (cfg, copy, OP_FMOVE);
2026 copy->next = ins->next;
2029 DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
2034 create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2037 MONO_INST_NEW (cfg, store, OP_STORE_MEMBASE_REG);
2039 store->inst_destbasereg = cfg->frame_reg;
2040 store->inst_offset = mono_spillvar_offset (cfg, spill);
2042 store->next = ins->next;
2045 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2050 create_spilled_store_float (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins)
2053 MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
2055 store->inst_destbasereg = cfg->frame_reg;
2056 store->inst_offset = mono_spillvar_offset_float (cfg, spill);
2058 store->next = ins->next;
2061 DEBUG (g_print ("SPILLED STORE (%d at 0x%08lx(%%sp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_arch_regname (reg)));
2066 insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
2069 g_assert (item->next);
2070 prev = item->next->data;
2072 while (prev->next != ins)
2074 to_insert->next = ins;
2075 prev->next = to_insert;
2077 * needed otherwise in the next instruction we can add an ins to the
2078 * end and that would get past this instruction.
2080 item->data = to_insert;
2083 G_GNUC_UNUSED static int
2084 alloc_int_reg (MonoCompile *cfg, InstList *curinst, MonoInst *ins, int sym_reg, guint32 allow_mask)
2086 int val = cfg->rs->iassign [sym_reg];
2090 /* the register gets spilled after this inst */
2093 val = mono_regstate_alloc_int (cfg->rs, allow_mask);
2095 val = get_register_spilling (cfg, curinst, ins, allow_mask, sym_reg);
2096 cfg->rs->iassign [sym_reg] = val;
2097 /* add option to store before the instruction for src registers */
2099 create_spilled_store (cfg, spill, val, sym_reg, ins);
2101 cfg->rs->isymbolic [val] = sym_reg;
2105 /* FIXME: Strange loads from the stack in basic-float.cs:test_2_rem */
2108 * Local register allocation.
2109 * We first scan the list of instructions and we save the liveness info of
2110 * each register (when the register is first used, when it's value is set etc.).
2111 * We also reverse the list of instructions (in the InstList list) because assigning
2112 * registers backwards allows for more tricks to be used.
2115 mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
2118 MonoRegState *rs = cfg->rs;
2120 RegTrack *reginfo, *reginfof;
2121 RegTrack *reginfo1, *reginfo2, *reginfod;
2122 InstList *tmp, *reversed = NULL;
2124 guint32 src1_mask, src2_mask, dest_mask;
2125 guint32 cur_iregs, cur_fregs;
2127 /* FIXME: Use caller saved regs and %i1-%2 for allocation */
2131 rs->next_vireg = bb->max_ireg;
2132 rs->next_vfreg = bb->max_freg;
2133 mono_regstate_assign (rs);
2134 reginfo = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vireg);
2135 reginfof = mono_mempool_alloc0 (cfg->mempool, sizeof (RegTrack) * rs->next_vfreg);
2136 rs->ifree_mask = ARCH_CALLER_REGS;
2137 rs->ffree_mask = ARCH_CALLER_FREGS;
2141 DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
2142 /* forward pass on the instructions to collect register liveness info */
2144 spec = ins_spec [ins->opcode];
2147 spec = ins_spec [CEE_ADD];
2149 DEBUG (print_ins (i, ins));
2151 if (spec [MONO_INST_SRC1]) {
2152 if (spec [MONO_INST_SRC1] == 'f')
2153 reginfo1 = reginfof;
2156 reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
2157 reginfo1 [ins->sreg1].last_use = i;
2161 if (spec [MONO_INST_SRC2]) {
2162 if (spec [MONO_INST_SRC2] == 'f')
2163 reginfo2 = reginfof;
2166 reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
2167 reginfo2 [ins->sreg2].last_use = i;
2171 if (spec [MONO_INST_DEST]) {
2172 if (spec [MONO_INST_DEST] == 'f')
2173 reginfod = reginfof;
2176 if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
2177 reginfod [ins->dreg].killed_in = i;
2178 reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
2179 reginfod [ins->dreg].last_use = i;
2180 if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
2181 reginfod [ins->dreg].born_in = i;
2182 if (!v64 && (spec [MONO_INST_DEST] == 'l')) {
2183 /* result in a regpair, the virtual register is allocated sequentially */
2184 reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
2185 reginfod [ins->dreg + 1].last_use = i;
2186 if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
2187 reginfod [ins->dreg + 1].born_in = i;
2192 reversed = inst_list_prepend (cfg->mempool, reversed, ins);
2197 cur_iregs = ARCH_CALLER_REGS;
2198 cur_fregs = ARCH_CALLER_FREGS;
2200 DEBUG (print_regtrack (reginfo, rs->next_vireg));
2201 DEBUG (print_regtrack (reginfof, rs->next_vfreg));
2204 int prev_dreg, prev_sreg1, prev_sreg2;
2207 spec = ins_spec [ins->opcode];
2209 spec = ins_spec [CEE_ADD];
2210 DEBUG (g_print ("processing:"));
2211 DEBUG (print_ins (i, ins));
2213 /* make the register available for allocation: FIXME add fp reg */
2214 if (ins->opcode == OP_SETREG || ins->opcode == OP_SETREGIMM) {
2215 /* Dont free register which can't be allocated */
2216 if (reg_is_freeable (ins->dreg)) {
2217 cur_iregs |= 1 << ins->dreg;
2218 DEBUG (g_print ("adding %d to cur_iregs\n", ins->dreg));
2220 } else if (ins->opcode == OP_SETFREG) {
2221 if (freg_is_freeable (ins->dreg)) {
2222 cur_fregs |= 1 << ins->dreg;
2223 DEBUG (g_print ("adding %d to cur_fregs\n", ins->dreg));
2225 } else if (spec [MONO_INST_CLOB] == 'c') {
2226 MonoCallInst *cinst = (MonoCallInst*)ins;
2227 DEBUG (g_print ("excluding regs 0x%lx from cur_iregs (0x%x)\n", (long)cinst->used_iregs, cur_iregs));
2228 cur_iregs &= ~cinst->used_iregs;
2229 cur_fregs &= ~cinst->used_fregs;
2230 DEBUG (g_print ("available cur_iregs: 0x%x\n", cur_iregs));
2231 /* registers used by the calling convention are excluded from
2232 * allocation: they will be selectively enabled when they are
2233 * assigned by the special SETREG opcodes.
2236 dest_mask = src1_mask = src2_mask = cur_iregs;
2241 /* update for use with FP regs... */
2242 if (spec [MONO_INST_DEST] == 'f') {
2243 if (ins->dreg >= MONO_MAX_FREGS) {
2244 val = rs->fassign [ins->dreg];
2245 prev_dreg = ins->dreg;
2249 /* the register gets spilled after this inst */
2252 dest_mask = cur_fregs;
2253 val = mono_regstate_alloc_float (rs, dest_mask);
2255 val = get_float_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2256 rs->fassign [ins->dreg] = val;
2258 create_spilled_store_float (cfg, spill, val, prev_dreg, ins);
2260 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2261 rs->fsymbolic [val] = prev_dreg;
2266 if (freg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i || !(cur_fregs & (1 << ins->dreg)))) {
2267 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2268 mono_regstate_free_float (rs, ins->dreg);
2270 } else if (ins->dreg >= MONO_MAX_IREGS) {
2271 val = rs->iassign [ins->dreg];
2272 prev_dreg = ins->dreg;
2276 /* the register gets spilled after this inst */
2279 val = mono_regstate_alloc_int (rs, dest_mask);
2281 val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg);
2282 rs->iassign [ins->dreg] = val;
2284 create_spilled_store (cfg, spill, val, prev_dreg, ins);
2286 DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
2287 rs->isymbolic [val] = prev_dreg;
2289 if (!v64 && spec [MONO_INST_DEST] == 'l') {
2290 int hreg = prev_dreg + 1;
2291 val = rs->iassign [hreg];
2295 /* the register gets spilled after this inst */
2298 /* The second register must be a pair of the first */
2299 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2300 val = mono_regstate_alloc_int (rs, dest_mask);
2302 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2303 rs->iassign [hreg] = val;
2305 create_spilled_store (cfg, spill, val, hreg, ins);
2308 /* The second register must be a pair of the first */
2309 if (val != rs->iassign [prev_dreg] + 1) {
2310 dest_mask = 1 << (rs->iassign [prev_dreg] + 1);
2312 val = mono_regstate_alloc_int (rs, dest_mask);
2314 val = get_register_spilling (cfg, tmp, ins, dest_mask, hreg);
2316 create_copy_ins (cfg, rs->iassign [hreg], val, ins);
2318 rs->iassign [hreg] = val;
2322 DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
2323 rs->isymbolic [val] = hreg;
2325 if (reg_is_freeable (val) && hreg >= 0 && (reginfo [hreg].born_in >= i && !(cur_iregs & (1 << val)))) {
2326 DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
2327 mono_regstate_free_int (rs, val);
2333 if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg) && prev_dreg >= 0 && (reginfo [prev_dreg].born_in >= i)) {
2334 DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
2335 mono_regstate_free_int (rs, ins->dreg);
2341 if (spec [MONO_INST_SRC1] == 'f') {
2342 if (ins->sreg1 >= MONO_MAX_FREGS) {
2343 val = rs->fassign [ins->sreg1];
2344 prev_sreg1 = ins->sreg1;
2348 /* the register gets spilled after this inst */
2351 //g_assert (val == -1); /* source cannot be spilled */
2352 src1_mask = cur_fregs;
2353 val = mono_regstate_alloc_float (rs, src1_mask);
2355 val = get_float_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2356 rs->fassign [ins->sreg1] = val;
2357 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2359 MonoInst *store = create_spilled_store_float (cfg, spill, val, prev_sreg1, NULL);
2360 insert_before_ins (ins, tmp, store);
2363 rs->fsymbolic [val] = prev_sreg1;
2368 } else if (ins->sreg1 >= MONO_MAX_IREGS) {
2369 val = rs->iassign [ins->sreg1];
2370 prev_sreg1 = ins->sreg1;
2374 /* the register gets spilled after this inst */
2377 if (0 && (ins->opcode == OP_MOVE) && reg_is_freeable (ins->dreg)) {
2379 * small optimization: the dest register is already allocated
2380 * but the src one is not: we can simply assign the same register
2381 * here and peephole will get rid of the instruction later.
2382 * This optimization may interfere with the clobbering handling:
2383 * it removes a mov operation that will be added again to handle clobbering.
2384 * There are also some other issues that should with make testjit.
2386 mono_regstate_alloc_int (rs, 1 << ins->dreg);
2387 val = rs->iassign [ins->sreg1] = ins->dreg;
2388 //g_assert (val >= 0);
2389 DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2391 //g_assert (val == -1); /* source cannot be spilled */
2392 val = mono_regstate_alloc_int (rs, src1_mask);
2394 val = get_register_spilling (cfg, tmp, ins, src1_mask, ins->sreg1);
2395 rs->iassign [ins->sreg1] = val;
2396 DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
2399 MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL);
2400 insert_before_ins (ins, tmp, store);
2403 rs->isymbolic [val] = prev_sreg1;
2412 if (spec [MONO_INST_SRC2] == 'f') {
2413 if (ins->sreg2 >= MONO_MAX_FREGS) {
2414 val = rs->fassign [ins->sreg2];
2415 prev_sreg2 = ins->sreg2;
2419 /* the register gets spilled after this inst */
2422 src2_mask = cur_fregs;
2423 val = mono_regstate_alloc_float (rs, src2_mask);
2425 val = get_float_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2426 rs->fassign [ins->sreg2] = val;
2427 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2429 create_spilled_store_float (cfg, spill, val, prev_sreg2, ins);
2431 rs->fsymbolic [val] = prev_sreg2;
2436 } else if (ins->sreg2 >= MONO_MAX_IREGS) {
2437 val = rs->iassign [ins->sreg2];
2438 prev_sreg2 = ins->sreg2;
2442 /* the register gets spilled after this inst */
2445 val = mono_regstate_alloc_int (rs, src2_mask);
2447 val = get_register_spilling (cfg, tmp, ins, src2_mask, ins->sreg2);
2448 rs->iassign [ins->sreg2] = val;
2449 DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
2451 create_spilled_store (cfg, spill, val, prev_sreg2, ins);
2453 rs->isymbolic [val] = prev_sreg2;
2459 if (spec [MONO_INST_CLOB] == 'c') {
2461 guint32 clob_mask = ARCH_CALLER_REGS;
2462 for (j = 0; j < MONO_MAX_IREGS; ++j) {
2464 if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
2465 //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
2469 /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
2470 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
2471 mono_regstate_free_int (rs, ins->sreg1);
2473 if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
2474 DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
2475 mono_regstate_free_int (rs, ins->sreg2);
2478 //DEBUG (print_ins (i, ins));
2485 sparc_patch (guint32 *code, const gpointer target)
2488 guint32 ins = *code;
2489 guint32 op = ins >> 30;
2490 guint32 op2 = (ins >> 22) & 0x7;
2491 guint32 rd = (ins >> 25) & 0x1f;
2492 guint8* target8 = (guint8*)target;
2493 gint64 disp = (target8 - (guint8*)code) >> 2;
2496 // g_print ("patching 0x%08x (0x%08x) to point to 0x%08x\n", code, ins, target);
2498 if ((op == 0) && (op2 == 2)) {
2499 if (!sparc_is_imm22 (disp))
2502 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2504 else if ((op == 0) && (op2 == 1)) {
2505 if (!sparc_is_imm19 (disp))
2508 *code = ((ins >> 19) << 19) | (disp & 0x7ffff);
2510 else if ((op == 0) && (op2 == 3)) {
2511 if (!sparc_is_imm16 (disp))
2514 *code &= ~(0x180000 | 0x3fff);
2515 *code |= ((disp << 21) & (0x180000)) | (disp & 0x3fff);
2517 else if ((op == 0) && (op2 == 6)) {
2518 if (!sparc_is_imm22 (disp))
2521 *code = ((ins >> 22) << 22) | (disp & 0x3fffff);
2523 else if ((op == 0) && (op2 == 4)) {
2524 guint32 ins2 = code [1];
2526 if (((ins2 >> 30) == 2) && (((ins2 >> 19) & 0x3f) == 2)) {
2527 /* sethi followed by or */
2529 sparc_set (p, target8, rd);
2530 while (p <= (code + 1))
2533 else if (ins2 == 0x01000000) {
2534 /* sethi followed by nop */
2536 sparc_set (p, target8, rd);
2537 while (p <= (code + 1))
2540 else if ((sparc_inst_op (ins2) == 3) && (sparc_inst_imm (ins2))) {
2541 /* sethi followed by load/store */
2543 guint32 t = (guint32)target8;
2544 *code &= ~(0x3fffff);
2546 *(code + 1) &= ~(0x3ff);
2547 *(code + 1) |= (t & 0x3ff);
2551 (sparc_inst_rd (ins) == sparc_g1) &&
2552 (sparc_inst_op (c [1]) == 0) && (sparc_inst_op2 (c [1]) == 4) &&
2553 (sparc_inst_op (c [2]) == 2) && (sparc_inst_op3 (c [2]) == 2) &&
2554 (sparc_inst_op (c [3]) == 2) && (sparc_inst_op3 (c [3]) == 2))
2558 reg = sparc_inst_rd (c [1]);
2559 sparc_set (p, target8, reg);
2563 else if ((sparc_inst_op (ins2) == 2) && (sparc_inst_op3 (ins2) == 0x38) &&
2564 (sparc_inst_imm (ins2))) {
2565 /* sethi followed by jmpl */
2567 guint32 t = (guint32)target8;
2568 *code &= ~(0x3fffff);
2570 *(code + 1) &= ~(0x3ff);
2571 *(code + 1) |= (t & 0x3ff);
2577 else if (op == 01) {
2578 gint64 disp = (target8 - (guint8*)code) >> 2;
2580 if (!sparc_is_imm30 (disp))
2582 sparc_call_simple (code, target8 - (guint8*)code);
2584 else if ((op == 2) && (sparc_inst_op3 (ins) == 0x2) && sparc_inst_imm (ins)) {
2586 g_assert (sparc_is_imm13 (target8));
2588 *code |= (guint32)target8;
2590 else if ((sparc_inst_op (ins) == 2) && (sparc_inst_op3 (ins) == 0x7)) {
2591 /* sparc_set case 5. */
2595 reg = sparc_inst_rd (c [3]);
2596 sparc_set (p, target, reg);
2603 // g_print ("patched with 0x%08x\n", ins);
2607 * mono_sparc_emit_save_lmf:
2609 * Emit the code neccesary to push a new entry onto the lmf stack. Used by
2610 * trampolines as well.
2613 mono_sparc_emit_save_lmf (guint32 *code, guint32 lmf_offset)
2616 sparc_sti_imm (code, sparc_o0, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr));
2617 /* Save previous_lmf */
2618 sparc_ldi (code, sparc_o0, sparc_g0, sparc_o7);
2619 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf));
2621 sparc_add_imm (code, FALSE, sparc_fp, lmf_offset, sparc_o7);
2622 sparc_sti (code, sparc_o7, sparc_o0, sparc_g0);
2628 mono_sparc_emit_restore_lmf (guint32 *code, guint32 lmf_offset)
2630 /* Load previous_lmf */
2631 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sparc_l0);
2633 sparc_ldi_imm (code, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sparc_l1);
2634 /* *(lmf) = previous_lmf */
2635 sparc_sti (code, sparc_l0, sparc_l1, sparc_g0);
2640 emit_save_sp_to_lmf (MonoCompile *cfg, guint32 *code)
2643 * Since register windows are saved to the current value of %sp, we need to
2644 * set the sp field in the lmf before the call, not in the prolog.
2646 if (cfg->method->save_lmf) {
2647 gint32 lmf_offset = MONO_SPARC_STACK_BIAS - cfg->arch.lmf_offset;
2650 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
2657 emit_vret_token (MonoInst *ins, guint32 *code)
2659 MonoCallInst *call = (MonoCallInst*)ins;
2663 * The sparc ABI requires that calls to functions which return a structure
2664 * contain an additional unimpl instruction which is checked by the callee.
2666 if (call->signature->pinvoke && MONO_TYPE_ISSTRUCT(call->signature->ret)) {
2667 if (call->signature->ret->type == MONO_TYPE_TYPEDBYREF)
2668 size = mono_type_stack_size (call->signature->ret, NULL);
2670 size = mono_class_native_size (call->signature->ret->data.klass, NULL);
2671 sparc_unimp (code, size & 0xfff);
2678 emit_move_return_value (MonoInst *ins, guint32 *code)
2680 /* Move return value to the target register */
2681 /* FIXME: do this in the local reg allocator */
2682 switch (ins->opcode) {
2684 case OP_VOIDCALL_REG:
2685 case OP_VOIDCALL_MEMBASE:
2689 case OP_CALL_MEMBASE:
2690 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2694 case OP_LCALL_MEMBASE:
2696 * ins->dreg is the least significant reg due to the lreg: LCALL rule
2700 sparc_mov_reg_reg (code, sparc_o0, ins->dreg);
2702 sparc_mov_reg_reg (code, sparc_o0, ins->dreg + 1);
2703 sparc_mov_reg_reg (code, sparc_o1, ins->dreg);
2708 case OP_FCALL_MEMBASE:
2710 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
2711 sparc_fmovs (code, sparc_f0, ins->dreg);
2712 sparc_fstod (code, ins->dreg, ins->dreg);
2715 sparc_fmovd (code, sparc_f0, ins->dreg);
2717 sparc_fmovs (code, sparc_f0, ins->dreg);
2718 if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4)
2719 sparc_fstod (code, ins->dreg, ins->dreg);
2721 sparc_fmovs (code, sparc_f1, ins->dreg + 1);
2726 case OP_VCALL_MEMBASE:
2736 * emit_load_volatile_arguments:
2738 * Load volatile arguments from the stack to the original input registers.
2739 * Required before a tail call.
2742 emit_load_volatile_arguments (MonoCompile *cfg, guint32 *code)
2744 MonoMethod *method = cfg->method;
2745 MonoMethodSignature *sig;
2750 /* FIXME: Generate intermediate code instead */
2752 sig = method->signature;
2754 cinfo = get_call_info (sig, FALSE);
2756 /* This is the opposite of the code in emit_prolog */
2758 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
2759 ArgInfo *ainfo = cinfo->args + i;
2760 gint32 stack_offset;
2762 inst = cfg->varinfo [i];
2764 if (sig->hasthis && (i == 0))
2765 arg_type = &mono_defaults.object_class->byval_arg;
2767 arg_type = sig->params [i - sig->hasthis];
2769 stack_offset = ainfo->offset + ARGS_OFFSET;
2770 ireg = sparc_i0 + ainfo->reg;
2772 if (ainfo->storage == ArgInSplitRegStack) {
2773 g_assert (inst->opcode == OP_REGOFFSET);
2775 if (!sparc_is_imm13 (stack_offset))
2777 sparc_st_imm (code, inst->inst_basereg, stack_offset, sparc_i5);
2780 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
2781 if (ainfo->storage == ArgInIRegPair) {
2782 if (!sparc_is_imm13 (inst->inst_offset + 4))
2784 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2785 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2788 if (ainfo->storage == ArgInSplitRegStack) {
2789 if (stack_offset != inst->inst_offset) {
2790 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_i5);
2791 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2792 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2797 if (ainfo->storage == ArgOnStackPair) {
2798 if (stack_offset != inst->inst_offset) {
2799 /* stack_offset is not dword aligned, so we need to make a copy */
2800 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, sparc_o7);
2801 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset);
2803 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset + 4, sparc_o7);
2804 sparc_st_imm (code, sparc_o7, sparc_fp, stack_offset + 4);
2809 g_assert_not_reached ();
2812 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
2813 /* Argument in register, but need to be saved to stack */
2814 if (!sparc_is_imm13 (stack_offset))
2816 if ((stack_offset - ARGS_OFFSET) & 0x1)
2817 /* FIXME: Is this ldsb or ldub ? */
2818 sparc_ldsb_imm (code, inst->inst_basereg, stack_offset, ireg);
2820 if ((stack_offset - ARGS_OFFSET) & 0x2)
2821 sparc_ldsh_imm (code, inst->inst_basereg, stack_offset, ireg);
2823 if ((stack_offset - ARGS_OFFSET) & 0x4)
2824 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2827 sparc_ldx_imm (code, inst->inst_basereg, stack_offset, ireg);
2829 sparc_ld_imm (code, inst->inst_basereg, stack_offset, ireg);
2832 else if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
2833 /* Argument in regpair, but need to be saved to stack */
2834 if (!sparc_is_imm13 (inst->inst_offset + 4))
2836 sparc_ld_imm (code, inst->inst_basereg, inst->inst_offset, ireg);
2837 sparc_st_imm (code, inst->inst_basereg, inst->inst_offset + 4, ireg + 1);
2839 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
2842 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
2846 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
2847 if (inst->opcode == OP_REGVAR)
2848 /* FIXME: Load the argument into memory */
2858 * mono_sparc_is_virtual_call:
2860 * Determine whenever the instruction at CODE is a virtual call.
2863 mono_sparc_is_virtual_call (guint32 *code)
2870 if ((sparc_inst_op (*code) == 0x2) && (sparc_inst_op3 (*code) == 0x38)) {
2872 * Register indirect call. If it is a virtual call, then the
2873 * instruction in the delay slot is a special kind of nop.
2876 /* Construct special nop */
2877 sparc_or_imm (p, FALSE, sparc_g0, 0xca, sparc_g0);
2880 if (code [1] == p [0])
2888 * mono_sparc_get_vcall_slot_addr:
2890 * Determine the vtable slot used by a virtual call.
2893 mono_sparc_get_vcall_slot_addr (guint32 *code, gpointer *fp)
2895 guint32 ins = code [0];
2896 guint32 prev_ins = code [-1];
2898 mono_sparc_flushw ();
2900 fp = (gpointer*)((guint8*)fp + MONO_SPARC_STACK_BIAS);
2902 if ((sparc_inst_op (ins) == 0x2) && (sparc_inst_op3 (ins) == 0x38)) {
2903 if ((sparc_inst_op (prev_ins) == 0x3) && (sparc_inst_op3 (prev_ins) == 0 || sparc_inst_op3 (prev_ins) == 0xb)) {
2904 /* ld [r1 + CONST ], r2; call r2 */
2905 guint32 base = sparc_inst_rs1 (prev_ins);
2906 guint32 disp = sparc_inst_imm13 (prev_ins);
2909 g_assert (sparc_inst_rd (prev_ins) == sparc_inst_rs1 (ins));
2911 g_assert ((base >= sparc_o0) && (base <= sparc_i7));
2913 base_val = fp [base - 16];
2915 return (gpointer)((guint8*)base_val + disp);
2918 g_assert_not_reached ();
2921 g_assert_not_reached ();
2927 * Some conventions used in the following code.
2928 * 2) The only scratch registers we have are o7 and g1. We try to
2929 * stick to o7 when we can, and use g1 when necessary.
2933 mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
2938 guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
2939 MonoInst *last_ins = NULL;
2943 if (cfg->opt & MONO_OPT_PEEPHOLE)
2944 peephole_pass (cfg, bb);
2946 if (cfg->verbose_level > 2)
2947 g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
2949 cpos = bb->max_offset;
2951 if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
2959 offset = (guint8*)code - cfg->native_code;
2961 spec = ins_spec [ins->opcode];
2963 spec = ins_spec [CEE_ADD];
2965 max_len = ((guint8 *)spec)[MONO_INST_LEN];
2967 if (offset > (cfg->code_size - max_len - 16)) {
2968 cfg->code_size *= 2;
2969 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
2970 code = (guint32*)(cfg->native_code + offset);
2972 code_start = (guint8*)code;
2973 // if (ins->cil_code)
2974 // g_print ("cil code\n");
2975 mono_debug_record_line_number (cfg, ins, offset);
2977 switch (ins->opcode) {
2978 case OP_STOREI1_MEMBASE_IMM:
2979 EMIT_STORE_MEMBASE_IMM (ins, stb);
2981 case OP_STOREI2_MEMBASE_IMM:
2982 EMIT_STORE_MEMBASE_IMM (ins, sth);
2984 case OP_STORE_MEMBASE_IMM:
2985 EMIT_STORE_MEMBASE_IMM (ins, sti);
2987 case OP_STOREI4_MEMBASE_IMM:
2988 EMIT_STORE_MEMBASE_IMM (ins, st);
2990 case OP_STOREI8_MEMBASE_IMM:
2992 EMIT_STORE_MEMBASE_IMM (ins, stx);
2994 /* Only generated by peephole opts */
2995 g_assert ((ins->inst_offset % 8) == 0);
2996 g_assert (ins->inst_imm == 0);
2997 EMIT_STORE_MEMBASE_IMM (ins, stx);
3000 case OP_STOREI1_MEMBASE_REG:
3001 EMIT_STORE_MEMBASE_REG (ins, stb);
3003 case OP_STOREI2_MEMBASE_REG:
3004 EMIT_STORE_MEMBASE_REG (ins, sth);
3006 case OP_STOREI4_MEMBASE_REG:
3007 EMIT_STORE_MEMBASE_REG (ins, st);
3009 case OP_STOREI8_MEMBASE_REG:
3011 EMIT_STORE_MEMBASE_REG (ins, stx);
3013 /* Only used by OP_MEMSET */
3014 EMIT_STORE_MEMBASE_REG (ins, std);
3017 case OP_STORE_MEMBASE_REG:
3018 EMIT_STORE_MEMBASE_REG (ins, sti);
3022 sparc_ldx (code, ins->inst_c0, sparc_g0, ins->dreg);
3024 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3029 sparc_ldsw (code, ins->inst_c0, sparc_g0, ins->dreg);
3031 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3035 sparc_ld (code, ins->inst_c0, sparc_g0, ins->dreg);
3038 sparc_set (code, ins->inst_c0, ins->dreg);
3039 sparc_ld (code, ins->dreg, sparc_g0, ins->dreg);
3041 case OP_LOADI4_MEMBASE:
3043 EMIT_LOAD_MEMBASE (ins, ldsw);
3045 EMIT_LOAD_MEMBASE (ins, ld);
3048 case OP_LOADU4_MEMBASE:
3049 EMIT_LOAD_MEMBASE (ins, ld);
3051 case OP_LOADU1_MEMBASE:
3052 EMIT_LOAD_MEMBASE (ins, ldub);
3054 case OP_LOADI1_MEMBASE:
3055 EMIT_LOAD_MEMBASE (ins, ldsb);
3057 case OP_LOADU2_MEMBASE:
3058 EMIT_LOAD_MEMBASE (ins, lduh);
3060 case OP_LOADI2_MEMBASE:
3061 EMIT_LOAD_MEMBASE (ins, ldsh);
3063 case OP_LOAD_MEMBASE:
3065 EMIT_LOAD_MEMBASE (ins, ldx);
3067 EMIT_LOAD_MEMBASE (ins, ld);
3071 case OP_LOADI8_MEMBASE:
3072 EMIT_LOAD_MEMBASE (ins, ldx);
3076 sparc_sll_imm (code, ins->sreg1, 24, sparc_o7);
3077 sparc_sra_imm (code, sparc_o7, 24, ins->dreg);
3080 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3081 sparc_sra_imm (code, sparc_o7, 16, ins->dreg);
3084 sparc_and_imm (code, FALSE, ins->sreg1, 0xff, ins->dreg);
3087 sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
3088 sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
3090 case CEE_CONV_OVF_U4:
3091 /* Only used on V9 */
3092 sparc_cmp_imm (code, ins->sreg1, 0);
3093 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3094 MONO_PATCH_INFO_EXC, "OverflowException");
3095 sparc_branchp (code, 0, sparc_bl, sparc_xcc_short, 0, 0);
3097 sparc_set (code, 1, sparc_o7);
3098 sparc_sllx_imm (code, sparc_o7, 32, sparc_o7);
3099 sparc_cmp (code, ins->sreg1, sparc_o7);
3100 mono_add_patch_info (cfg, (guint8*)(code) - (cfg)->native_code,
3101 MONO_PATCH_INFO_EXC, "OverflowException");
3102 sparc_branchp (code, 0, sparc_bge, sparc_xcc_short, 0, 0);
3104 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3106 case CEE_CONV_OVF_I4_UN:
3107 /* Only used on V9 */
3112 /* Only used on V9 */
3113 sparc_srl_imm (code, ins->sreg1, 0, ins->dreg);
3117 /* Only used on V9 */
3118 sparc_sra_imm (code, ins->sreg1, 0, ins->dreg);
3123 sparc_cmp (code, ins->sreg1, ins->sreg2);
3125 case OP_COMPARE_IMM:
3126 case OP_ICOMPARE_IMM:
3127 if (sparc_is_imm13 (ins->inst_imm))
3128 sparc_cmp_imm (code, ins->sreg1, ins->inst_imm);
3130 sparc_set (code, ins->inst_imm, sparc_o7);
3131 sparc_cmp (code, ins->sreg1, sparc_o7);
3134 case OP_X86_TEST_NULL:
3135 sparc_cmp_imm (code, ins->sreg1, 0);
3139 * gdb does not like encountering 'ta 1' in the debugged code. So
3140 * instead of emitting a trap, we emit a call a C function and place a
3143 //sparc_ta (code, 1);
3144 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_ABS, mono_sparc_break);
3149 sparc_add (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3153 sparc_add (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3158 /* according to inssel-long32.brg, this should set cc */
3159 EMIT_ALU_IMM (ins, add, TRUE);
3163 /* according to inssel-long32.brg, this should set cc */
3164 sparc_addx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3168 EMIT_ALU_IMM (ins, addx, TRUE);
3172 sparc_sub (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3176 sparc_sub (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3181 /* according to inssel-long32.brg, this should set cc */
3182 EMIT_ALU_IMM (ins, sub, TRUE);
3186 /* according to inssel-long32.brg, this should set cc */
3187 sparc_subx (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3191 EMIT_ALU_IMM (ins, subx, TRUE);
3195 sparc_and (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3199 EMIT_ALU_IMM (ins, and, FALSE);
3203 /* Sign extend sreg1 into %y */
3204 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3205 sparc_wry (code, sparc_o7, sparc_g0);
3206 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3207 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3211 sparc_wry (code, sparc_g0, sparc_g0);
3212 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3217 /* Transform division into a shift */
3218 for (i = 1; i < 30; ++i) {
3220 if (ins->inst_imm == imm)
3226 sparc_srl_imm (code, ins->sreg1, 31, sparc_o7);
3227 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3228 sparc_sra_imm (code, ins->dreg, 1, ins->dreg);
3231 /* http://compilers.iecc.com/comparch/article/93-04-079 */
3232 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3233 sparc_srl_imm (code, sparc_o7, 32 - i, sparc_o7);
3234 sparc_add (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3235 sparc_sra_imm (code, ins->dreg, i, ins->dreg);
3239 /* Sign extend sreg1 into %y */
3240 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3241 sparc_wry (code, sparc_o7, sparc_g0);
3242 EMIT_ALU_IMM (ins, sdiv, TRUE);
3243 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3249 /* Sign extend sreg1 into %y */
3250 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3251 sparc_wry (code, sparc_o7, sparc_g0);
3252 sparc_sdiv (code, TRUE, ins->sreg1, ins->sreg2, sparc_o7);
3253 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3254 sparc_smul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3255 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3259 sparc_wry (code, sparc_g0, sparc_g0);
3260 sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, sparc_o7);
3261 sparc_umul (code, FALSE, ins->sreg2, sparc_o7, sparc_o7);
3262 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3266 /* Sign extend sreg1 into %y */
3267 sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
3268 sparc_wry (code, sparc_o7, sparc_g0);
3269 if (!sparc_is_imm13 (ins->inst_imm)) {
3270 sparc_set (code, ins->inst_imm, GP_SCRATCH_REG);
3271 sparc_sdiv (code, TRUE, ins->sreg1, GP_SCRATCH_REG, sparc_o7);
3272 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3273 sparc_smul (code, FALSE, sparc_o7, GP_SCRATCH_REG, sparc_o7);
3276 sparc_sdiv_imm (code, TRUE, ins->sreg1, ins->inst_imm, sparc_o7);
3277 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (code, sparc_boverflow, "ArithmeticException", TRUE, sparc_icc_short);
3278 sparc_smul_imm (code, FALSE, sparc_o7, ins->inst_imm, sparc_o7);
3280 sparc_sub (code, FALSE, ins->sreg1, sparc_o7, ins->dreg);
3284 sparc_or (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3288 EMIT_ALU_IMM (ins, or, FALSE);
3292 sparc_xor (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3296 EMIT_ALU_IMM (ins, xor, FALSE);
3300 sparc_sll (code, ins->sreg1, ins->sreg2, ins->dreg);
3304 if (ins->inst_imm < (1 << 5))
3305 sparc_sll_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3307 sparc_set (code, ins->inst_imm, sparc_o7);
3308 sparc_sll (code, ins->sreg1, sparc_o7, ins->dreg);
3313 sparc_sra (code, ins->sreg1, ins->sreg2, ins->dreg);
3317 if (ins->inst_imm < (1 << 5))
3318 sparc_sra_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3320 sparc_set (code, ins->inst_imm, sparc_o7);
3321 sparc_sra (code, ins->sreg1, sparc_o7, ins->dreg);
3325 case OP_ISHR_UN_IMM:
3326 if (ins->inst_imm < (1 << 5))
3327 sparc_srl_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3329 sparc_set (code, ins->inst_imm, sparc_o7);
3330 sparc_srl (code, ins->sreg1, sparc_o7, ins->dreg);
3335 sparc_srl (code, ins->sreg1, ins->sreg2, ins->dreg);
3338 sparc_sllx (code, ins->sreg1, ins->sreg2, ins->dreg);
3341 if (ins->inst_imm < (1 << 6))
3342 sparc_sllx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3344 sparc_set (code, ins->inst_imm, sparc_o7);
3345 sparc_sllx (code, ins->sreg1, sparc_o7, ins->dreg);
3349 sparc_srax (code, ins->sreg1, ins->sreg2, ins->dreg);
3352 if (ins->inst_imm < (1 << 6))
3353 sparc_srax_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3355 sparc_set (code, ins->inst_imm, sparc_o7);
3356 sparc_srax (code, ins->sreg1, sparc_o7, ins->dreg);
3360 sparc_srlx (code, ins->sreg1, ins->sreg2, ins->dreg);
3362 case OP_LSHR_UN_IMM:
3363 if (ins->inst_imm < (1 << 6))
3364 sparc_srlx_imm (code, ins->sreg1, ins->inst_imm, ins->dreg);
3366 sparc_set (code, ins->inst_imm, sparc_o7);
3367 sparc_srlx (code, ins->sreg1, sparc_o7, ins->dreg);
3372 /* can't use sparc_not */
3373 sparc_xnor (code, FALSE, ins->sreg1, sparc_g0, ins->dreg);
3377 /* can't use sparc_neg */
3378 sparc_sub (code, FALSE, sparc_g0, ins->sreg1, ins->dreg);
3382 sparc_smul (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
3388 if ((ins->inst_imm == 1) && (ins->sreg1 == ins->dreg))
3391 /* Transform multiplication into a shift */
3392 for (i = 0; i < 30; ++i) {
3394 if (ins->inst_imm == imm)
3398 sparc_sll_imm (code, ins->sreg1, i, ins->dreg);
3400 EMIT_ALU_IMM (ins, smul, FALSE);
3405 sparc_smul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3406 sparc_rdy (code, sparc_g1);
3407 sparc_sra_imm (code, ins->dreg, 31, sparc_o7);
3408 sparc_cmp (code, sparc_g1, sparc_o7);
3409 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3411 case CEE_MUL_OVF_UN:
3412 case OP_IMUL_OVF_UN:
3413 sparc_umul (code, TRUE, ins->sreg1, ins->sreg2, ins->dreg);
3414 sparc_rdy (code, sparc_o7);
3415 sparc_cmp (code, sparc_o7, sparc_g0);
3416 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, sparc_bne, "OverflowException", TRUE, sparc_icc_short);
3420 sparc_set (code, ins->inst_c0, ins->dreg);
3423 sparc_set (code, ins->inst_l, ins->dreg);
3426 mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
3427 sparc_set_template (code, ins->dreg);
3433 if (ins->sreg1 != ins->dreg)
3434 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
3437 /* Only used on V9 */
3438 if (ins->sreg1 != ins->dreg)
3439 sparc_fmovd (code, ins->sreg1, ins->dreg);
3441 case OP_SPARC_SETFREG_FLOAT:
3442 /* Only used on V9 */
3443 sparc_fdtos (code, ins->sreg1, ins->dreg);
3446 if (cfg->method->save_lmf)
3449 code = emit_load_volatile_arguments (cfg, code);
3450 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
3451 sparc_set_template (code, sparc_o7);
3452 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_g0);
3453 /* Restore parent frame in delay slot */
3454 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
3457 /* ensure ins->sreg1 is not NULL */
3458 sparc_ld_imm (code, ins->sreg1, 0, sparc_g0);
3461 sparc_add_imm (code, FALSE, sparc_fp, cfg->sig_cookie, sparc_o7);
3462 sparc_sti_imm (code, sparc_o7, ins->sreg1, 0);
3469 call = (MonoCallInst*)ins;
3470 g_assert (!call->virtual);
3471 code = emit_save_sp_to_lmf (cfg, code);
3472 if (ins->flags & MONO_INST_HAS_METHOD)
3473 code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
3475 code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
3477 code = emit_vret_token (ins, code);
3478 code = emit_move_return_value (ins, code);
3483 case OP_VOIDCALL_REG:
3485 call = (MonoCallInst*)ins;
3486 code = emit_save_sp_to_lmf (cfg, code);
3487 sparc_jmpl (code, ins->sreg1, sparc_g0, sparc_callsite);
3489 * We emit a special kind of nop in the delay slot to tell the
3490 * trampoline code that this is a virtual call, thus an unbox
3491 * trampoline might need to be called.
3494 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3498 code = emit_vret_token (ins, code);
3499 code = emit_move_return_value (ins, code);
3501 case OP_FCALL_MEMBASE:
3502 case OP_LCALL_MEMBASE:
3503 case OP_VCALL_MEMBASE:
3504 case OP_VOIDCALL_MEMBASE:
3505 case OP_CALL_MEMBASE:
3506 call = (MonoCallInst*)ins;
3507 g_assert (sparc_is_imm13 (ins->inst_offset));
3508 code = emit_save_sp_to_lmf (cfg, code);
3509 sparc_ldi_imm (code, ins->inst_basereg, ins->inst_offset, sparc_o7);
3510 sparc_jmpl (code, sparc_o7, sparc_g0, sparc_callsite);
3512 sparc_or_imm (code, FALSE, sparc_g0, 0xca, sparc_g0);
3516 code = emit_vret_token (ins, code);
3517 code = emit_move_return_value (ins, code);
3520 if (cfg->method->signature->ret->type == MONO_TYPE_R4)
3521 sparc_fdtos (code, ins->sreg1, sparc_f0);
3524 sparc_fmovd (code, ins->sreg1, ins->dreg);
3526 /* FIXME: Why not use fmovd ? */
3527 sparc_fmovs (code, ins->sreg1, ins->dreg);
3528 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3533 g_assert_not_reached ();
3538 /* Keep alignment */
3539 sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1, ins->dreg);
3540 sparc_set (code, ~(MONO_ARCH_FRAME_ALIGNMENT - 1), sparc_o7);
3541 sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
3543 if ((ins->flags & MONO_INST_INIT) && (ins->sreg1 == ins->dreg)) {
3545 size_reg = sparc_g4;
3547 size_reg = sparc_g1;
3549 sparc_mov_reg_reg (code, ins->dreg, size_reg);
3552 size_reg = ins->sreg1;
3554 sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
3555 /* Keep %sp valid at all times */
3556 sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
3557 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
3558 sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3560 if (ins->flags & MONO_INST_INIT) {
3562 /* Initialize memory region */
3563 sparc_cmp_imm (code, size_reg, 0);
3565 sparc_branch (code, 0, sparc_be, 0);
3567 sparc_set (code, 0, sparc_o7);
3568 sparc_sub_imm (code, 0, size_reg, sparcv9 ? 8 : 4, size_reg);
3572 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3574 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3575 sparc_cmp (code, sparc_o7, size_reg);
3577 sparc_branch (code, 0, sparc_bl, 0);
3578 sparc_patch (br [2], br [1]);
3580 sparc_add_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3581 sparc_patch (br [0], code);
3585 case OP_SPARC_LOCALLOC_IMM: {
3586 gint32 offset = ins->inst_c0;
3587 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
3588 if (sparc_is_imm13 (offset))
3589 sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
3591 sparc_set (code, offset, sparc_o7);
3592 sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
3594 g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
3595 sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
3596 if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
3602 while (i < offset) {
3604 sparc_stx_imm (code, sparc_g0, ins->dreg, i);
3608 sparc_st_imm (code, sparc_g0, ins->dreg, i);
3614 sparc_set (code, offset, sparc_o7);
3615 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3616 /* beginning of loop */
3619 sparc_stx (code, sparc_g0, ins->dreg, sparc_o7);
3621 sparc_st (code, sparc_g0, ins->dreg, sparc_o7);
3622 sparc_cmp_imm (code, sparc_o7, 0);
3624 sparc_branch (code, 0, sparc_bne, 0);
3626 sparc_sub_imm (code, 0, sparc_o7, sparcv9 ? 8 : 4, sparc_o7);
3627 sparc_patch (br [1], br [0]);
3633 /* The return is done in the epilog */
3634 g_assert_not_reached ();
3637 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3638 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3639 (gpointer)"mono_arch_throw_exception");
3643 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3644 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
3645 (gpointer)"mono_arch_rethrow_exception");
3648 case OP_START_HANDLER: {
3650 * The START_HANDLER instruction marks the beginning of a handler
3651 * block. It is called using a call instruction, so %o7 contains
3652 * the return address. Since the handler executes in the same stack
3653 * frame as the method itself, we can't use save/restore to save
3654 * the return address. Instead, we save it into a dedicated
3657 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3658 if (!sparc_is_imm13 (spvar->inst_offset)) {
3659 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3660 sparc_sti (code, sparc_o7, spvar->inst_basereg, GP_SCRATCH_REG);
3663 sparc_sti_imm (code, sparc_o7, spvar->inst_basereg, spvar->inst_offset);
3666 case OP_ENDFILTER: {
3667 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3668 if (!sparc_is_imm13 (spvar->inst_offset)) {
3669 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3670 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3673 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3674 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3676 sparc_mov_reg_reg (code, ins->sreg1, sparc_o0);
3679 case CEE_ENDFINALLY: {
3680 MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
3681 if (!sparc_is_imm13 (spvar->inst_offset)) {
3682 sparc_set (code, spvar->inst_offset, GP_SCRATCH_REG);
3683 sparc_ldi (code, spvar->inst_basereg, GP_SCRATCH_REG, sparc_o7);
3686 sparc_ldi_imm (code, spvar->inst_basereg, spvar->inst_offset, sparc_o7);
3687 sparc_jmpl_imm (code, sparc_o7, 8, sparc_g0);
3691 case OP_CALL_HANDLER:
3692 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3693 /* This is a jump inside the method, so call_simple works even on V9 */
3694 sparc_call_simple (code, 0);
3698 ins->inst_c0 = (guint8*)code - cfg->native_code;
3701 //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
3702 if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
3704 if (ins->flags & MONO_INST_BRLABEL) {
3705 if (ins->inst_i0->inst_c0) {
3706 gint32 disp = (ins->inst_i0->inst_c0 - ((guint8*)code - cfg->native_code)) >> 2;
3707 g_assert (sparc_is_imm22 (disp));
3708 sparc_branch (code, 1, sparc_ba, disp);
3710 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
3711 sparc_branch (code, 1, sparc_ba, 0);
3714 if (ins->inst_target_bb->native_offset) {
3715 gint32 disp = (ins->inst_target_bb->native_offset - ((guint8*)code - cfg->native_code)) >> 2;
3716 g_assert (sparc_is_imm22 (disp));
3717 sparc_branch (code, 1, sparc_ba, disp);
3719 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
3720 sparc_branch (code, 1, sparc_ba, 0);
3726 sparc_jmp (code, ins->sreg1, sparc_g0);
3734 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3735 sparc_clr_reg (code, ins->dreg);
3736 sparc_movcc_imm (code, sparc_xcc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3739 sparc_clr_reg (code, ins->dreg);
3741 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), DEFAULT_ICC, 0, 2);
3743 sparc_branch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
3746 sparc_set (code, 1, ins->dreg);
3754 if (v64 && (cfg->opt & MONO_OPT_CMOV)) {
3755 sparc_clr_reg (code, ins->dreg);
3756 sparc_movcc_imm (code, sparc_icc, opcode_to_sparc_cond (ins->opcode), 1, ins->dreg);
3759 sparc_clr_reg (code, ins->dreg);
3760 sparc_branchp (code, 1, opcode_to_sparc_cond (ins->opcode), sparc_icc_short, 0, 2);
3762 sparc_set (code, 1, ins->dreg);
3765 case OP_COND_EXC_EQ:
3766 case OP_COND_EXC_NE_UN:
3767 case OP_COND_EXC_LT:
3768 case OP_COND_EXC_LT_UN:
3769 case OP_COND_EXC_GT:
3770 case OP_COND_EXC_GT_UN:
3771 case OP_COND_EXC_GE:
3772 case OP_COND_EXC_GE_UN:
3773 case OP_COND_EXC_LE:
3774 case OP_COND_EXC_LE_UN:
3775 case OP_COND_EXC_OV:
3776 case OP_COND_EXC_NO:
3778 case OP_COND_EXC_NC:
3779 EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
3781 case OP_SPARC_COND_EXC_EQZ:
3782 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
3784 case OP_SPARC_COND_EXC_GEZ:
3785 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgez, ins->inst_p1);
3787 case OP_SPARC_COND_EXC_GTZ:
3788 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brgz, ins->inst_p1);
3790 case OP_SPARC_COND_EXC_LEZ:
3791 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlez, ins->inst_p1);
3793 case OP_SPARC_COND_EXC_LTZ:
3794 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brlz, ins->inst_p1);
3796 case OP_SPARC_COND_EXC_NEZ:
3797 EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
3799 case OP_COND_EXC_IOV:
3800 case OP_COND_EXC_IC:
3801 EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
3814 EMIT_COND_BRANCH_PREDICTED (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3816 EMIT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
3830 /* Only used on V9 */
3831 EMIT_COND_BRANCH_ICC (ins, opcode_to_sparc_cond (ins->opcode), 1, 1, sparc_icc_short);
3836 EMIT_COND_BRANCH_BPR (ins, brz, 1, 1, 1);
3838 case OP_SPARC_BRLEZ:
3839 EMIT_COND_BRANCH_BPR (ins, brlez, 1, 1, 1);
3842 EMIT_COND_BRANCH_BPR (ins, brlz, 1, 1, 1);
3845 EMIT_COND_BRANCH_BPR (ins, brnz, 1, 1, 1);
3848 EMIT_COND_BRANCH_BPR (ins, brgz, 1, 1, 1);
3850 case OP_SPARC_BRGEZ:
3851 EMIT_COND_BRANCH_BPR (ins, brgez, 1, 1, 1);
3854 /* floating point opcodes */
3856 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R8, ins->inst_p0);
3858 sparc_set_template (code, sparc_o7);
3860 sparc_sethi (code, 0, sparc_o7);
3862 sparc_lddf_imm (code, sparc_o7, 0, ins->dreg);
3865 mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_R4, ins->inst_p0);
3867 sparc_set_template (code, sparc_o7);
3869 sparc_sethi (code, 0, sparc_o7);
3871 sparc_ldf_imm (code, sparc_o7, 0, FP_SCRATCH_REG);
3873 /* Extend to double */
3874 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3876 case OP_STORER8_MEMBASE_REG:
3877 if (!sparc_is_imm13 (ins->inst_offset + 4)) {
3878 sparc_set (code, ins->inst_offset, sparc_o7);
3879 /* SPARCV9 handles misaligned fp loads/stores */
3880 if (!v64 && (ins->inst_offset % 8)) {
3882 sparc_add (code, FALSE, ins->inst_destbasereg, sparc_o7, sparc_o7);
3883 sparc_stf (code, ins->sreg1, sparc_o7, sparc_g0);
3884 sparc_stf_imm (code, ins->sreg1 + 1, sparc_o7, 4);
3886 sparc_stdf (code, ins->sreg1, ins->inst_destbasereg, sparc_o7);
3889 if (!v64 && (ins->inst_offset % 8)) {
3891 sparc_stf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3892 sparc_stf_imm (code, ins->sreg1 + 1, ins->inst_destbasereg, ins->inst_offset + 4);
3894 sparc_stdf_imm (code, ins->sreg1, ins->inst_destbasereg, ins->inst_offset);
3897 case OP_LOADR8_MEMBASE:
3898 EMIT_LOAD_MEMBASE (ins, lddf);
3900 case OP_STORER4_MEMBASE_REG:
3901 /* This requires a double->single conversion */
3902 sparc_fdtos (code, ins->sreg1, FP_SCRATCH_REG);
3903 if (!sparc_is_imm13 (ins->inst_offset)) {
3904 sparc_set (code, ins->inst_offset, sparc_o7);
3905 sparc_stf (code, FP_SCRATCH_REG, ins->inst_destbasereg, sparc_o7);
3908 sparc_stf_imm (code, FP_SCRATCH_REG, ins->inst_destbasereg, ins->inst_offset);
3910 case OP_LOADR4_MEMBASE: {
3911 /* ldf needs a single precision register */
3912 int dreg = ins->dreg;
3913 ins->dreg = FP_SCRATCH_REG;
3914 EMIT_LOAD_MEMBASE (ins, ldf);
3916 /* Extend to double */
3917 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3922 sparc_fmovd (code, ins->sreg1, ins->dreg);
3924 sparc_fmovs (code, ins->sreg1, ins->dreg);
3925 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
3929 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3930 if (!sparc_is_imm13 (offset))
3933 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3934 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3935 sparc_fxtos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3937 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3938 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3939 sparc_fitos (code, FP_SCRATCH_REG, FP_SCRATCH_REG);
3941 sparc_fstod (code, FP_SCRATCH_REG, ins->dreg);
3945 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3946 if (!sparc_is_imm13 (offset))
3949 sparc_stx_imm (code, ins->sreg1, sparc_sp, offset);
3950 sparc_lddf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3951 sparc_fxtod (code, FP_SCRATCH_REG, ins->dreg);
3953 sparc_st_imm (code, ins->sreg1, sparc_sp, offset);
3954 sparc_ldf_imm (code, sparc_sp, offset, FP_SCRATCH_REG);
3955 sparc_fitod (code, FP_SCRATCH_REG, ins->dreg);
3959 case OP_FCONV_TO_I1:
3960 case OP_FCONV_TO_U1:
3961 case OP_FCONV_TO_I2:
3962 case OP_FCONV_TO_U2:
3967 case OP_FCONV_TO_I4:
3968 case OP_FCONV_TO_U4: {
3969 gint32 offset = mono_spillvar_offset_float (cfg, 0);
3970 if (!sparc_is_imm13 (offset))
3972 sparc_fdtoi (code, ins->sreg1, FP_SCRATCH_REG);
3973 sparc_stdf_imm (code, FP_SCRATCH_REG, sparc_sp, offset);
3974 sparc_ld_imm (code, sparc_sp, offset, ins->dreg);
3976 switch (ins->opcode) {
3977 case OP_FCONV_TO_I1:
3978 case OP_FCONV_TO_U1:
3979 sparc_and_imm (code, 0, ins->dreg, 0xff, ins->dreg);
3981 case OP_FCONV_TO_I2:
3982 case OP_FCONV_TO_U2:
3983 sparc_set (code, 0xffff, sparc_o7);
3984 sparc_and (code, 0, ins->dreg, sparc_o7, ins->dreg);
3991 case OP_FCONV_TO_I8:
3992 case OP_FCONV_TO_U8:
3994 g_assert_not_reached ();
3998 g_assert_not_reached ();
4000 case OP_LCONV_TO_R_UN: {
4002 g_assert_not_reached ();
4005 case OP_LCONV_TO_OVF_I: {
4006 guint32 *br [3], *label [1];
4009 * Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000
4011 sparc_cmp_imm (code, ins->sreg1, 0);
4013 sparc_branch (code, 1, sparc_bneg, 0);
4017 /* ms word must be 0 */
4018 sparc_cmp_imm (code, ins->sreg2, 0);
4020 sparc_branch (code, 1, sparc_be, 0);
4025 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_ba, "OverflowException");
4028 sparc_patch (br [0], code);
4030 /* ms word must 0xfffffff */
4031 sparc_cmp_imm (code, ins->sreg2, -1);
4033 sparc_branch (code, 1, sparc_bne, 0);
4035 sparc_patch (br [2], label [0]);
4038 sparc_patch (br [1], code);
4039 if (ins->sreg1 != ins->dreg)
4040 sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
4044 sparc_faddd (code, ins->sreg1, ins->sreg2, ins->dreg);
4047 sparc_fsubd (code, ins->sreg1, ins->sreg2, ins->dreg);
4050 sparc_fmuld (code, ins->sreg1, ins->sreg2, ins->dreg);
4053 sparc_fdivd (code, ins->sreg1, ins->sreg2, ins->dreg);
4057 sparc_fnegd (code, ins->sreg1, ins->dreg);
4059 /* FIXME: why don't use fnegd ? */
4060 sparc_fnegs (code, ins->sreg1, ins->dreg);
4064 sparc_fdivd (code, ins->sreg1, ins->sreg2, FP_SCRATCH_REG);
4065 sparc_fmuld (code, ins->sreg2, FP_SCRATCH_REG, FP_SCRATCH_REG);
4066 sparc_fsubd (code, ins->sreg1, FP_SCRATCH_REG, ins->dreg);
4069 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
4076 sparc_fcmpd (code, ins->sreg1, ins->sreg2);
4077 sparc_clr_reg (code, ins->dreg);
4078 switch (ins->opcode) {
4081 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 4);
4083 sparc_set (code, 1, ins->dreg);
4084 sparc_fbranch (code, 1, sparc_fbu, 2);
4086 sparc_set (code, 1, ins->dreg);
4089 sparc_fbranch (code, 1, opcode_to_sparc_cond (ins->opcode), 2);
4091 sparc_set (code, 1, ins->dreg);
4097 EMIT_FLOAT_COND_BRANCH (ins, opcode_to_sparc_cond (ins->opcode), 1, 1);
4100 /* clt.un + brfalse */
4102 sparc_fbranch (code, 1, sparc_fbul, 0);
4105 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
4106 sparc_patch (p, (guint8*)code);
4110 /* cgt.un + brfalse */
4112 sparc_fbranch (code, 1, sparc_fbug, 0);
4115 EMIT_FLOAT_COND_BRANCH (ins, sparc_fba, 1, 1);
4116 sparc_patch (p, (guint8*)code);
4120 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbne, 1, 1);
4121 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4124 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbl, 1, 1);
4125 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4128 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbg, 1, 1);
4129 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4132 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbge, 1, 1);
4133 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4136 EMIT_FLOAT_COND_BRANCH (ins, sparc_fble, 1, 1);
4137 EMIT_FLOAT_COND_BRANCH (ins, sparc_fbu, 1, 1);
4139 case CEE_CKFINITE: {
4140 gint32 offset = mono_spillvar_offset_float (cfg, 0);
4141 if (!sparc_is_imm13 (offset))
4143 sparc_stdf_imm (code, ins->sreg1, sparc_sp, offset);
4144 sparc_lduh_imm (code, sparc_sp, offset, sparc_o7);
4145 sparc_srl_imm (code, sparc_o7, 4, sparc_o7);
4146 sparc_and_imm (code, FALSE, sparc_o7, 2047, sparc_o7);
4147 sparc_cmp_imm (code, sparc_o7, 2047);
4148 EMIT_COND_SYSTEM_EXCEPTION (ins, sparc_be, "ArithmeticException");
4150 sparc_fmovd (code, ins->sreg1, ins->dreg);
4152 sparc_fmovs (code, ins->sreg1, ins->dreg);
4153 sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
4159 g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
4161 g_warning ("%s:%d: unknown opcode %s\n", __FILE__, __LINE__, mono_inst_name (ins->opcode));
4163 g_assert_not_reached ();
4166 if ((((guint8*)code) - code_start) > max_len) {
4167 g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
4168 mono_inst_name (ins->opcode), max_len, ((guint8*)code) - code_start);
4169 g_assert_not_reached ();
4179 cfg->code_len = (guint8*)code - cfg->native_code;
4183 mono_arch_register_lowlevel_calls (void)
4185 mono_register_jit_icall (mono_sparc_break, "mono_sparc_break", NULL, TRUE);
4186 mono_register_jit_icall (mono_arch_get_lmf_addr, "mono_arch_get_lmf_addr", NULL, TRUE);
4190 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
4192 MonoJumpInfo *patch_info;
4194 /* FIXME: Move part of this to arch independent code */
4195 for (patch_info = ji; patch_info; patch_info = patch_info->next) {
4196 unsigned char *ip = patch_info->ip.i + code;
4199 target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
4201 switch (patch_info->type) {
4202 case MONO_PATCH_INFO_NONE:
4204 case MONO_PATCH_INFO_CLASS_INIT: {
4205 guint32 *ip2 = (guint32*)ip;
4206 /* Might already been changed to a nop */
4208 sparc_set_template (ip2, sparc_o7);
4209 sparc_jmpl (ip2, sparc_o7, sparc_g0, sparc_o7);
4211 sparc_call_simple (ip2, 0);
4215 case MONO_PATCH_INFO_METHOD_JUMP: {
4216 guint32 *ip2 = (guint32*)ip;
4217 /* Might already been patched */
4218 sparc_set_template (ip2, sparc_o7);
4224 sparc_patch ((guint32*)ip, target);
4229 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4232 guint32 *code = (guint32*)p;
4233 MonoMethodSignature *sig = cfg->method->signature;
4236 /* Save registers to stack */
4237 for (i = 0; i < 6; ++i)
4238 sparc_sti_imm (code, sparc_i0 + i, sparc_fp, ARGS_OFFSET + (i * sizeof (gpointer)));
4240 cinfo = get_call_info (sig, FALSE);
4242 /* Save float regs on V9, since they are caller saved */
4243 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4244 ArgInfo *ainfo = cinfo->args + i;
4245 gint32 stack_offset;
4247 stack_offset = ainfo->offset + ARGS_OFFSET;
4249 if (ainfo->storage == ArgInFloatReg) {
4250 if (!sparc_is_imm13 (stack_offset))
4252 sparc_stf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4254 else if (ainfo->storage == ArgInDoubleReg) {
4255 /* The offset is guaranteed to be aligned by the ABI rules */
4256 sparc_stdf_imm (code, ainfo->reg, sparc_fp, stack_offset);
4260 sparc_set (code, cfg->method, sparc_o0);
4261 sparc_add_imm (code, FALSE, sparc_fp, MONO_SPARC_STACK_BIAS, sparc_o1);
4263 mono_add_patch_info (cfg, (guint8*)code-cfg->native_code, MONO_PATCH_INFO_ABS, func);
4266 /* Restore float regs on V9 */
4267 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4268 ArgInfo *ainfo = cinfo->args + i;
4269 gint32 stack_offset;
4271 stack_offset = ainfo->offset + ARGS_OFFSET;
4273 if (ainfo->storage == ArgInFloatReg) {
4274 if (!sparc_is_imm13 (stack_offset))
4276 sparc_ldf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4278 else if (ainfo->storage == ArgInDoubleReg) {
4279 /* The offset is guaranteed to be aligned by the ABI rules */
4280 sparc_lddf_imm (code, sparc_fp, stack_offset, ainfo->reg);
4298 mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
4300 guint32 *code = (guint32*)p;
4301 int save_mode = SAVE_NONE;
4302 MonoMethod *method = cfg->method;
4304 switch (mono_type_get_underlying_type (method->signature->ret)->type) {
4305 case MONO_TYPE_VOID:
4306 /* special case string .ctor icall */
4307 if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class)
4308 save_mode = SAVE_ONE;
4310 save_mode = SAVE_NONE;
4315 save_mode = SAVE_ONE;
4317 save_mode = SAVE_TWO;
4322 save_mode = SAVE_FP;
4324 case MONO_TYPE_VALUETYPE:
4325 save_mode = SAVE_STRUCT;
4328 save_mode = SAVE_ONE;
4332 /* Save the result to the stack and also put it into the output registers */
4334 switch (save_mode) {
4337 sparc_st_imm (code, sparc_i0, sparc_fp, 68);
4338 sparc_st_imm (code, sparc_i0, sparc_fp, 72);
4339 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4340 sparc_mov_reg_reg (code, sparc_i1, sparc_o2);
4343 sparc_sti_imm (code, sparc_i0, sparc_fp, ARGS_OFFSET);
4344 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4348 sparc_stdf_imm (code, sparc_f0, sparc_fp, ARGS_OFFSET);
4350 sparc_stdf_imm (code, sparc_f0, sparc_fp, 72);
4351 sparc_ld_imm (code, sparc_fp, 72, sparc_o1);
4352 sparc_ld_imm (code, sparc_fp, 72 + 4, sparc_o2);
4357 sparc_mov_reg_reg (code, sparc_i0, sparc_o1);
4359 sparc_ld_imm (code, sparc_fp, 64, sparc_o1);
4367 sparc_set (code, cfg->method, sparc_o0);
4369 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, func);
4372 /* Restore result */
4374 switch (save_mode) {
4376 sparc_ld_imm (code, sparc_fp, 68, sparc_i0);
4377 sparc_ld_imm (code, sparc_fp, 72, sparc_i0);
4380 sparc_ldi_imm (code, sparc_fp, ARGS_OFFSET, sparc_i0);
4383 sparc_lddf_imm (code, sparc_fp, ARGS_OFFSET, sparc_f0);
4394 mono_arch_emit_prolog (MonoCompile *cfg)
4396 MonoMethod *method = cfg->method;
4397 MonoMethodSignature *sig;
4403 cfg->code_size = 256;
4404 cfg->native_code = g_malloc (cfg->code_size);
4405 code = (guint32*)cfg->native_code;
4407 /* FIXME: Generate intermediate code instead */
4409 offset = cfg->stack_offset;
4410 offset += (16 * sizeof (gpointer)); /* register save area */
4412 offset += 4; /* struct/union return pointer */
4415 /* add parameter area size for called functions */
4416 if (cfg->param_area < (6 * sizeof (gpointer)))
4417 /* Reserve space for the first 6 arguments even if it is unused */
4418 offset += 6 * sizeof (gpointer);
4420 offset += cfg->param_area;
4422 /* align the stack size */
4423 offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
4426 * localloc'd memory is stored between the local variables (whose
4427 * size is given by cfg->stack_offset), and between the space reserved
4430 cfg->arch.localloc_offset = offset - cfg->stack_offset;
4432 cfg->stack_offset = offset;
4434 if (!sparc_is_imm13 (- cfg->stack_offset)) {
4435 /* Can't use sparc_o7 here, since we're still in the caller's frame */
4436 sparc_set (code, (- cfg->stack_offset), GP_SCRATCH_REG);
4437 sparc_save (code, sparc_sp, GP_SCRATCH_REG, sparc_sp);
4440 sparc_save_imm (code, sparc_sp, - cfg->stack_offset, sparc_sp);
4443 if (strstr (cfg->method->name, "foo")) {
4444 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4445 sparc_call_simple (code, 0);
4450 sig = method->signature;
4452 cinfo = get_call_info (sig, FALSE);
4454 /* Keep in sync with emit_load_volatile_arguments */
4455 for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
4456 ArgInfo *ainfo = cinfo->args + i;
4457 gint32 stack_offset;
4459 inst = cfg->varinfo [i];
4461 if (sig->hasthis && (i == 0))
4462 arg_type = &mono_defaults.object_class->byval_arg;
4464 arg_type = sig->params [i - sig->hasthis];
4466 stack_offset = ainfo->offset + ARGS_OFFSET;
4468 /* Save the split arguments so they will reside entirely on the stack */
4469 if (ainfo->storage == ArgInSplitRegStack) {
4470 /* Save the register to the stack */
4471 g_assert (inst->opcode == OP_REGOFFSET);
4472 if (!sparc_is_imm13 (stack_offset))
4474 sparc_st_imm (code, sparc_i5, inst->inst_basereg, stack_offset);
4477 if (!v64 && !arg_type->byref && (arg_type->type == MONO_TYPE_R8)) {
4478 /* Save the argument to a dword aligned stack location */
4480 * stack_offset contains the offset of the argument on the stack.
4481 * inst->inst_offset contains the dword aligned offset where the value
4484 if (ainfo->storage == ArgInIRegPair) {
4485 if (!sparc_is_imm13 (inst->inst_offset + 4))
4487 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4488 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4491 if (ainfo->storage == ArgInSplitRegStack) {
4493 g_assert_not_reached ();
4495 if (stack_offset != inst->inst_offset) {
4496 /* stack_offset is not dword aligned, so we need to make a copy */
4497 sparc_st_imm (code, sparc_i5, inst->inst_basereg, inst->inst_offset);
4498 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4499 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4503 if (ainfo->storage == ArgOnStackPair) {
4505 g_assert_not_reached ();
4507 if (stack_offset != inst->inst_offset) {
4508 /* stack_offset is not dword aligned, so we need to make a copy */
4509 sparc_ld_imm (code, sparc_fp, stack_offset, sparc_o7);
4510 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset);
4511 sparc_ld_imm (code, sparc_fp, stack_offset + 4, sparc_o7);
4512 sparc_st_imm (code, sparc_o7, inst->inst_basereg, inst->inst_offset + 4);
4516 g_assert_not_reached ();
4519 if ((ainfo->storage == ArgInIReg) && (inst->opcode != OP_REGVAR)) {
4520 /* Argument in register, but need to be saved to stack */
4521 if (!sparc_is_imm13 (stack_offset))
4523 if ((stack_offset - ARGS_OFFSET) & 0x1)
4524 sparc_stb_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4526 if ((stack_offset - ARGS_OFFSET) & 0x2)
4527 sparc_sth_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4529 if ((stack_offset - ARGS_OFFSET) & 0x4)
4530 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4533 sparc_stx_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4535 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, stack_offset);
4539 if ((ainfo->storage == ArgInIRegPair) && (inst->opcode != OP_REGVAR)) {
4543 /* Argument in regpair, but need to be saved to stack */
4544 if (!sparc_is_imm13 (inst->inst_offset + 4))
4546 sparc_st_imm (code, sparc_i0 + ainfo->reg, inst->inst_basereg, inst->inst_offset);
4547 sparc_st_imm (code, sparc_i0 + ainfo->reg + 1, inst->inst_basereg, inst->inst_offset + 4);
4549 else if ((ainfo->storage == ArgInFloatReg) && (inst->opcode != OP_REGVAR)) {
4550 if (!sparc_is_imm13 (stack_offset))
4552 sparc_stf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4554 else if ((ainfo->storage == ArgInDoubleReg) && (inst->opcode != OP_REGVAR)) {
4555 /* The offset is guaranteed to be aligned by the ABI rules */
4556 sparc_stdf_imm (code, ainfo->reg, inst->inst_basereg, inst->inst_offset);
4559 if ((ainfo->storage == ArgInFloatReg) && (inst->opcode == OP_REGVAR)) {
4560 /* Need to move into the a double precision register */
4561 sparc_fstod (code, ainfo->reg, ainfo->reg - 1);
4564 if ((ainfo->storage == ArgInSplitRegStack) || (ainfo->storage == ArgOnStack))
4565 if (inst->opcode == OP_REGVAR)
4566 /* FIXME: Load the argument into memory */
4572 if (cfg->method->save_lmf) {
4573 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4576 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
4577 sparc_set_template (code, sparc_o7);
4578 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ip));
4580 sparc_sti_imm (code, sparc_sp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, sp));
4582 sparc_sti_imm (code, sparc_fp, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp));
4584 /* FIXME: add a relocation for this */
4585 sparc_set (code, cfg->method, sparc_o7);
4586 sparc_sti_imm (code, sparc_o7, sparc_fp, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method));
4588 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_INTERNAL_METHOD,
4589 (gpointer)"mono_arch_get_lmf_addr");
4592 code = (guint32*)mono_sparc_emit_save_lmf (code, lmf_offset);
4595 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4596 code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
4598 cfg->code_len = (guint8*)code - cfg->native_code;
4600 g_assert (cfg->code_len <= cfg->code_size);
4602 return (guint8*)code;
4606 mono_arch_emit_epilog (MonoCompile *cfg)
4608 MonoMethod *method = cfg->method;
4611 int max_epilog_size = 16 + 20 * 4;
4613 if (cfg->method->save_lmf)
4614 max_epilog_size += 128;
4616 if (mono_jit_trace_calls != NULL)
4617 max_epilog_size += 50;
4619 if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
4620 max_epilog_size += 50;
4622 while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
4623 cfg->code_size *= 2;
4624 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4625 mono_jit_stats.code_reallocs++;
4628 code = (guint32*)(cfg->native_code + cfg->code_len);
4630 if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
4631 code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
4633 if (cfg->method->save_lmf) {
4634 gint32 lmf_offset = STACK_BIAS - cfg->arch.lmf_offset;
4636 code = mono_sparc_emit_restore_lmf (code, lmf_offset);
4640 * The V8 ABI requires that calls to functions which return a structure
4643 if (!v64 && cfg->method->signature->pinvoke && MONO_TYPE_ISSTRUCT(cfg->method->signature->ret))
4644 sparc_jmpl_imm (code, sparc_i7, 12, sparc_g0);
4648 /* Only fold last instruction into the restore if the exit block has an in count of 1
4649 and the previous block hasn't been optimized away since it may have an in count > 1 */
4650 if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
4653 /* Try folding last instruction into the restore */
4654 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4655 /* or reg, imm, %i0 */
4656 int reg = sparc_inst_rs1 (code [-2]);
4657 int imm = sparc_inst_imm13 (code [-2]);
4658 code [-2] = code [-1];
4660 sparc_restore_imm (code, reg, imm, sparc_o0);
4663 if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && (!sparc_inst_imm (code [-2])) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
4664 /* or reg, reg, %i0 */
4665 int reg1 = sparc_inst_rs1 (code [-2]);
4666 int reg2 = sparc_inst_rs2 (code [-2]);
4667 code [-2] = code [-1];
4669 sparc_restore (code, reg1, reg2, sparc_o0);
4672 sparc_restore_imm (code, sparc_g0, 0, sparc_g0);
4674 cfg->code_len = (guint8*)code - cfg->native_code;
4676 g_assert (cfg->code_len < cfg->code_size);
4681 mono_arch_emit_exceptions (MonoCompile *cfg)
4683 MonoJumpInfo *patch_info;
4688 MonoClass *exc_classes [16];
4689 guint8 *exc_throw_start [16], *exc_throw_end [16];
4691 /* Compute needed space */
4692 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4693 if (patch_info->type == MONO_PATCH_INFO_EXC)
4698 * make sure we have enough space for exceptions
4701 code_size = exc_count * (20 * 4);
4703 code_size = exc_count * 24;
4706 while (cfg->code_len + code_size > (cfg->code_size - 16)) {
4707 cfg->code_size *= 2;
4708 cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
4709 mono_jit_stats.code_reallocs++;
4712 code = (guint32*)(cfg->native_code + cfg->code_len);
4714 for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
4715 switch (patch_info->type) {
4716 case MONO_PATCH_INFO_EXC: {
4717 MonoClass *exc_class;
4718 guint32 *buf, *buf2;
4719 guint32 throw_ip, type_idx;
4722 sparc_patch ((guint32*)(cfg->native_code + patch_info->ip.i), code);
4724 exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
4725 type_idx = exc_class->type_token - MONO_TOKEN_TYPE_DEF;
4726 g_assert (exc_class);
4727 throw_ip = patch_info->ip.i;
4729 /* Find a throw sequence for the same exception class */
4730 for (i = 0; i < nthrows; ++i)
4731 if (exc_classes [i] == exc_class)
4735 guint32 throw_offset = (((guint8*)exc_throw_end [i] - cfg->native_code) - throw_ip) >> 2;
4736 if (!sparc_is_imm13 (throw_offset))
4737 sparc_set32 (code, throw_offset, sparc_o1);
4739 disp = (exc_throw_start [i] - (guint8*)code) >> 2;
4740 g_assert (sparc_is_imm22 (disp));
4741 sparc_branch (code, 0, sparc_ba, disp);
4742 if (sparc_is_imm13 (throw_offset))
4743 sparc_set32 (code, throw_offset, sparc_o1);
4746 patch_info->type = MONO_PATCH_INFO_NONE;
4749 /* Emit the template for setting o1 */
4751 if (sparc_is_imm13 (((((guint8*)code - cfg->native_code) - throw_ip) >> 2) - 8))
4752 /* Can use a short form */
4755 sparc_set_template (code, sparc_o1);
4759 exc_classes [nthrows] = exc_class;
4760 exc_throw_start [nthrows] = (guint8*)code;
4764 mono_add_patch_info (cfg, (guint8*)code - cfg->native_code, MONO_PATCH_INFO_ABS, mono_sparc_break);
4768 /* first arg = type token */
4769 /* Pass the type index to reduce the size of the sparc_set */
4770 if (!sparc_is_imm13 (type_idx))
4771 sparc_set32 (code, type_idx, sparc_o0);
4773 /* second arg = offset between the throw ip and the current ip */
4774 /* On sparc, the saved ip points to the call instruction */
4775 disp = (((guint8*)code - cfg->native_code) - throw_ip) >> 2;
4776 sparc_set32 (buf, disp, sparc_o1);
4781 exc_throw_end [nthrows] = (guint8*)code;
4785 patch_info->data.name = "mono_arch_throw_corlib_exception";
4786 patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
4787 patch_info->ip.i = (guint8*)code - cfg->native_code;
4791 if (sparc_is_imm13 (type_idx)) {
4792 /* Put it into the delay slot */
4795 sparc_set32 (code, type_idx, sparc_o0);
4796 g_assert (code - buf == 1);
4807 cfg->code_len = (guint8*)code - cfg->native_code;
4809 g_assert (cfg->code_len < cfg->code_size);
4813 gboolean lmf_addr_key_inited = FALSE;
4815 #ifdef MONO_SPARC_THR_TLS
4816 thread_key_t lmf_addr_key;
4818 pthread_key_t lmf_addr_key;
4822 mono_arch_get_lmf_addr (void)
4824 /* This is perf critical so we bypass the IO layer */
4825 /* The thr_... functions seem to be somewhat faster */
4826 #ifdef MONO_SPARC_THR_TLS
4828 thr_getspecific (lmf_addr_key, &res);
4831 return pthread_getspecific (lmf_addr_key);
4836 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
4838 #ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
4840 struct sigaltstack sa;
4845 printf ("SIGALT!\n");
4846 /* Setup an alternate signal stack */
4847 tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
4848 tls->signal_stack_size = SIGNAL_STACK_SIZE;
4851 sa.ss_sp = tls->signal_stack;
4852 sa.ss_size = SIGNAL_STACK_SIZE;
4854 g_assert (sigaltstack (&sa, NULL) == 0);
4856 sigstk.ss_sp = tls->signal_stack;
4857 sigstk.ss_size = SIGNAL_STACK_SIZE;
4858 sigstk.ss_flags = 0;
4859 g_assert (sigaltstack (&sigstk, NULL) == 0);
4863 if (!lmf_addr_key_inited) {
4866 lmf_addr_key_inited = TRUE;
4868 #ifdef MONO_SPARC_THR_TLS
4869 res = thr_keycreate (&lmf_addr_key, NULL);
4871 res = pthread_key_create (&lmf_addr_key, NULL);
4873 g_assert (res == 0);
4877 #ifdef MONO_SPARC_THR_TLS
4878 thr_setspecific (lmf_addr_key, &tls->lmf);
4880 pthread_setspecific (lmf_addr_key, &tls->lmf);
4885 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
4890 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
4892 int this_out_reg = sparc_o0;
4897 MONO_INST_NEW (cfg, ins, OP_SETREG);
4898 ins->sreg1 = vt_reg;
4899 ins->dreg = sparc_o0;
4900 mono_bblock_add_inst (cfg->cbb, ins);
4901 this_out_reg = sparc_o1;
4903 /* Set the 'struct/union return pointer' location on the stack */
4904 MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, vt_reg);
4908 /* add the this argument */
4909 if (this_reg != -1) {
4911 MONO_INST_NEW (cfg, this, OP_SETREG);
4912 this->type = this_type;
4913 this->sreg1 = this_reg;
4914 this->dreg = this_out_reg;
4915 mono_bblock_add_inst (cfg->cbb, this);
4921 mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
4927 * mono_arch_get_argument_info:
4928 * @csig: a method signature
4929 * @param_count: the number of parameters to consider
4930 * @arg_info: an array to store the result infos
4932 * Gathers information on parameters such as size, alignment and
4933 * padding. arg_info should be large enought to hold param_count + 1 entries.
4935 * Returns the size of the activation frame.
4938 mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
4944 cinfo = get_call_info (csig, FALSE);
4946 if (csig->hasthis) {
4947 ainfo = &cinfo->args [0];
4948 arg_info [0].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4951 for (k = 0; k < param_count; k++) {
4952 ainfo = &cinfo->args [k + csig->hasthis];
4954 arg_info [k + 1].offset = ARGS_OFFSET - MONO_SPARC_STACK_BIAS + ainfo->offset;
4955 arg_info [k + 1].size = mono_type_size (csig->params [k], &align);
4964 mono_arch_print_tree (MonoInst *tree, int arity)
4969 MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
4974 MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)