* *: Merge the Linear IR branch.
The original branch is at trunk/branches/vargaz/mini-linear-il, and
the ChangeLog file there describes all the changes done over the years.
Further documentation can be found at www.mono-project.com/Linear_IL.
svn path=/trunk/mono/; revision=108479
+2008-07-22 Zoltan Varga <vargaz@gmail.com>
+
+ * *: Merge the Linear IR branch.
+
+ The original branch is at trunk/branches/vargaz/mini-linear-il, and
+ the ChangeLog file there describes all the changes done over the years.
+ Further documentation can be found at www.mono-project.com/Linear_IL.
+
2008-07-21 Bill Holmes <billholmes54@gmail.com>
* mini-amd64.c (get_call_info): Winx64 fix for passing floats.
common_sources = \
mini.c \
+ ir-emit.h \
+ method-to-ir.c \
+ decompose.c \
mini.h \
version.h \
optflags-def.h \
declsec.h \
wapihandles.c \
branch-opts.c \
- generic-sharing.c
+ generic-sharing.c \
+ ssa2.c \
+ abcremoval2.c \
+ regalloc2.c
test_sources = \
basic-calls.cs \
mono_debugger_sources =
endif
-regtests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe iltests.exe devirtualization.exe generics.exe bench.exe
+regtests=basic.exe basic-float.exe basic-long.exe basic-calls.exe objects.exe arrays.exe basic-math.exe exceptions.exe iltests.exe devirtualization.exe generics.exe
common_BURGSRC= $(srcdir)/inssel.brg $(srcdir)/inssel-float.brg
if (bb->in_count == 1) { /* Should write the code to "sum" conditions... */
in_bb = bb->in_bb [0];
- branch = mono_inst_list_last (&in_bb->ins_list);
- if (branch == NULL)
- return;
+ branch = in_bb->last_ins;
+ if (branch == NULL) return;
branch_relation = get_relation_from_branch_instruction (branch->opcode);
if ((branch_relation != MONO_ANY_RELATION) && (branch->inst_left->opcode == OP_COMPARE)) {
MonoSummarizedValue left_value;
process_inst (current_inst, area);
}
+
if (TRACE_ABC_REMOVAL) {
printf ("Processing block %d [dfn %d] done.\n", bb->block_num, bb->dfn);
}
possibly_dead_assignments = alloca (cfg->num_varinfo * sizeof (MonoInst*));
if (LOG_DEADCE) {
- printf ("BEFORE DEADCE START\n");
- mono_print_code (cfg);
- printf ("BEFORE DEADCE END\n");
+ mono_print_code (cfg, "BEFORE DEADCE START");
}
#if (MONO_APPLY_DEADCE_TO_SINGLE_METHOD)
}
if (LOG_DEADCE) {
- printf ("AFTER DEADCE START\n");
- mono_print_code (cfg);
- printf ("AFTER DEADCE END\n");
+ mono_print_code (cfg, "AFTER DEADCE");
}
}
return 1;
else
return 0;
- }
+ }
+
+ public static int test_127_iconv_to_i1 () {
+ int i = 0x100017f;
+ sbyte s = (sbyte)i;
+
+ return s;
+ }
+
+ public static int test_384_iconv_to_i2 () {
+ int i = 0x1000180;
+ short s = (short)i;
+
+ return s;
+ }
public static int test_15_for_loop () {
int i;
return NULL;
}
+static const int int_cmov_opcodes [] = {
+ OP_CMOV_IEQ,
+ OP_CMOV_INE_UN,
+ OP_CMOV_ILE,
+ OP_CMOV_IGE,
+ OP_CMOV_ILT,
+ OP_CMOV_IGT,
+ OP_CMOV_ILE_UN,
+ OP_CMOV_IGE_UN,
+ OP_CMOV_ILT_UN,
+ OP_CMOV_IGT_UN
+};
+
+static const int long_cmov_opcodes [] = {
+ OP_CMOV_LEQ,
+ OP_CMOV_LNE_UN,
+ OP_CMOV_LLE,
+ OP_CMOV_LGE,
+ OP_CMOV_LLT,
+ OP_CMOV_LGT,
+ OP_CMOV_LLE_UN,
+ OP_CMOV_LGE_UN,
+ OP_CMOV_LLT_UN,
+ OP_CMOV_LGT_UN
+};
+
+void
+mono_if_conversion (MonoCompile *cfg)
+{
+#ifdef MONO_ARCH_HAVE_CMOV_OPS
+ MonoBasicBlock *bb;
+ gboolean changed = FALSE;
+
+ if (!(cfg->opt & MONO_OPT_CMOV))
+ return;
+
+ // FIXME: Make this work with extended bblocks
+
+ /*
+ * This pass requires somewhat optimized IR code so it should be run after
+ * local cprop/deadce. Also, it should be run before dominator computation, since
+ * it changes control flow.
+ */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoBasicBlock *bb1, *bb2;
+
+ restart:
+ /* Look for the IR code generated from cond ? a : b
+ * which is:
+ * BB:
+ * b<cond> [BB1BB2]
+ * BB1:
+ * <var> <- <a>
+ * br BB3
+ * BB2:
+ * <var> <- <b>
+ * br BB3
+ */
+ if (!(bb->out_count == 2 && !bb->extended))
+ continue;
+
+ bb1 = bb->out_bb [0];
+ bb2 = bb->out_bb [1];
+
+ if (bb1->in_count == 1 && bb2->in_count == 1 && bb1->out_count == 1 && bb2->out_count == 1 && bb1->out_bb [0] == bb2->out_bb [0]) {
+ MonoInst *prev, *compare, *branch, *ins1, *ins2, *cmov, *move, *tmp;
+ gboolean simple, ret;
+ int dreg, tmp_reg;
+ CompType comp_type;
+
+ /*
+ * Check that bb1 and bb2 are 'simple' and both assign to the same
+ * variable.
+ */
+ /* FIXME: Get rid of the nops earlier */
+ ins1 = bb1->code;
+ while (ins1 && ins1->opcode == OP_NOP)
+ ins1 = ins1->next;
+ ins2 = bb2->code;
+ while (ins2 && ins2->opcode == OP_NOP)
+ ins2 = ins2->next;
+ if (!(ins1 && ins2 && ins1->dreg == ins2->dreg && ins1->dreg != -1))
+ continue;
+
+ simple = TRUE;
+ for (tmp = ins1->next; tmp; tmp = tmp->next)
+ if (!((tmp->opcode == OP_NOP) || (tmp->opcode == OP_BR)))
+ simple = FALSE;
+
+ for (tmp = ins2->next; tmp; tmp = tmp->next)
+ if (!((tmp->opcode == OP_NOP) || (tmp->opcode == OP_BR)))
+ simple = FALSE;
+
+ if (!simple)
+ continue;
+
+ /* We move ins1/ins2 before the compare so they should have no side effect */
+ if (!(MONO_INS_HAS_NO_SIDE_EFFECT (ins1) && MONO_INS_HAS_NO_SIDE_EFFECT (ins2)))
+ continue;
+
+ if (bb->last_ins && (bb->last_ins->opcode == OP_BR_REG || bb->last_ins->opcode == OP_BR))
+ continue;
+
+ /* Find the compare instruction */
+ /* FIXME: Optimize this using prev */
+ prev = NULL;
+ compare = bb->code;
+ g_assert (compare);
+ while (compare->next && !MONO_IS_COND_BRANCH_OP (compare->next)) {
+ prev = compare;
+ compare = compare->next;
+ }
+ g_assert (compare->next && MONO_IS_COND_BRANCH_OP (compare->next));
+ branch = compare->next;
+
+ /* Moving ins1/ins2 could change the comparison */
+ /* FIXME: */
+ if (!((compare->sreg1 != ins1->dreg) && (compare->sreg2 != ins1->dreg)))
+ continue;
+
+ /* FIXME: */
+ comp_type = mono_opcode_to_type (branch->opcode, compare->opcode);
+ if (!((comp_type == CMP_TYPE_I) || (comp_type == CMP_TYPE_L)))
+ continue;
+
+ /* FIXME: */
+ /* ins->type might not be set */
+ if (INS_INFO (ins1->opcode) [MONO_INST_DEST] != 'i')
+ continue;
+
+ if (cfg->verbose_level > 2) {
+ printf ("\tBranch -> CMove optimization in BB%d on\n", bb->block_num);
+ printf ("\t\t"); mono_print_ins (compare);
+ printf ("\t\t"); mono_print_ins (compare->next);
+ printf ("\t\t"); mono_print_ins (ins1);
+ printf ("\t\t"); mono_print_ins (ins2);
+ }
+
+ changed = TRUE;
+
+ //printf ("HIT!\n");
+
+ /* Assignments to the return register must remain at the end of bbs */
+ if (cfg->ret)
+ ret = ins1->dreg == cfg->ret->dreg;
+ else
+ ret = FALSE;
+
+ tmp_reg = mono_alloc_dreg (cfg, STACK_I4);
+ dreg = ins1->dreg;
+
+ /* Rewrite ins1 to emit to tmp_reg */
+ ins1->dreg = tmp_reg;
+
+ if (ret) {
+ dreg = mono_alloc_dreg (cfg, STACK_I4);
+ ins2->dreg = dreg;
+ }
+
+ /* Remove ins1/ins2 from bb1/bb2 */
+ MONO_REMOVE_INS (bb1, ins1);
+ MONO_REMOVE_INS (bb2, ins2);
+
+ /* Move ins1 and ins2 before the comparison */
+ /* ins1 comes first to avoid ins1 overwriting an argument of ins2 */
+ mono_bblock_insert_before_ins (bb, compare, ins2);
+ mono_bblock_insert_before_ins (bb, ins2, ins1);
+
+ /* Add cmov instruction */
+ MONO_INST_NEW (cfg, cmov, OP_NOP);
+ cmov->dreg = dreg;
+ cmov->sreg1 = dreg;
+ cmov->sreg2 = tmp_reg;
+ switch (mono_opcode_to_type (branch->opcode, compare->opcode)) {
+ case CMP_TYPE_I:
+ cmov->opcode = int_cmov_opcodes [mono_opcode_to_cond (branch->opcode)];
+ break;
+ case CMP_TYPE_L:
+ cmov->opcode = long_cmov_opcodes [mono_opcode_to_cond (branch->opcode)];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ mono_bblock_insert_after_ins (bb, compare, cmov);
+
+ if (ret) {
+ /* Add an extra move */
+ MONO_INST_NEW (cfg, move, OP_MOVE);
+ move->dreg = cfg->ret->dreg;
+ move->sreg1 = dreg;
+ mono_bblock_insert_after_ins (bb, cmov, move);
+ }
+
+ /* Rewrite the branch */
+ branch->opcode = OP_BR;
+ branch->inst_target_bb = bb1->out_bb [0];
+ mono_link_bblock (cfg, bb, branch->inst_target_bb);
+
+ /* Reorder bblocks */
+ mono_unlink_bblock (cfg, bb, bb1);
+ mono_unlink_bblock (cfg, bb, bb2);
+ mono_unlink_bblock (cfg, bb1, bb1->out_bb [0]);
+ mono_unlink_bblock (cfg, bb2, bb2->out_bb [0]);
+ mono_remove_bblock (cfg, bb1);
+ mono_remove_bblock (cfg, bb2);
+
+ /* Merge bb and its successor if possible */
+ if ((bb->out_bb [0]->in_count == 1) && (bb->out_bb [0] != cfg->bb_exit) &&
+ (bb->region == bb->out_bb [0]->region)) {
+ mono_merge_basic_blocks (cfg, bb, bb->out_bb [0]);
+ goto restart;
+ }
+ }
+
+ /* Look for the IR code generated from if (cond) <var> <- <a>
+ * which is:
+ * BB:
+ * b<cond> [BB1BB2]
+ * BB1:
+ * <var> <- <a>
+ * br BB2
+ */
+
+ if ((bb2->in_count == 1 && bb2->out_count == 1 && bb2->out_bb [0] == bb1) ||
+ (bb1->in_count == 1 && bb1->out_count == 1 && bb1->out_bb [0] == bb2)) {
+ MonoInst *prev, *compare, *branch, *ins1, *cmov, *tmp;
+ gboolean simple;
+ int dreg, tmp_reg;
+ CompType comp_type;
+ CompRelation cond;
+ MonoBasicBlock *next_bb, *code_bb;
+
+ /* code_bb is the bblock containing code, next_bb is the successor bblock */
+ if (bb2->in_count == 1 && bb2->out_count == 1 && bb2->out_bb [0] == bb1) {
+ code_bb = bb2;
+ next_bb = bb1;
+ } else {
+ code_bb = bb1;
+ next_bb = bb2;
+ }
+
+ ins1 = code_bb->code;
+
+ if (!ins1)
+ continue;
+
+ /* Check that code_bb is simple */
+ simple = TRUE;
+ for (tmp = ins1->next; tmp; tmp = tmp->next)
+ if (!((tmp->opcode == OP_NOP) || (tmp->opcode == OP_BR)))
+ simple = FALSE;
+
+ if (!simple)
+ continue;
+
+ /* We move ins1 before the compare so it should have no side effect */
+ if (!MONO_INS_HAS_NO_SIDE_EFFECT (ins1))
+ continue;
+
+ if (bb->last_ins && bb->last_ins->opcode == OP_BR_REG)
+ continue;
+
+ /* Find the compare instruction */
+ /* FIXME: Optimize this using prev */
+ prev = NULL;
+ compare = bb->code;
+ g_assert (compare);
+ while (compare->next && !MONO_IS_COND_BRANCH_OP (compare->next)) {
+ prev = compare;
+ compare = compare->next;
+ }
+ g_assert (compare->next && MONO_IS_COND_BRANCH_OP (compare->next));
+ branch = compare->next;
+
+ /* FIXME: */
+ comp_type = mono_opcode_to_type (branch->opcode, compare->opcode);
+ if (!((comp_type == CMP_TYPE_I) || (comp_type == CMP_TYPE_L)))
+ continue;
+
+ /* FIXME: */
+ /* ins->type might not be set */
+ if (INS_INFO (ins1->opcode) [MONO_INST_DEST] != 'i')
+ continue;
+
+ /* FIXME: */
+ if (cfg->ret && ins1->dreg == cfg->ret->dreg)
+ continue;
+
+ if (cfg->verbose_level > 2) {
+ printf ("\tBranch -> CMove optimization (2) in BB%d on\n", bb->block_num);
+ printf ("\t\t"); mono_print_ins (compare);
+ printf ("\t\t"); mono_print_ins (compare->next);
+ printf ("\t\t"); mono_print_ins (ins1);
+ }
+
+ changed = TRUE;
+
+ //printf ("HIT!\n");
+
+ tmp_reg = mono_alloc_dreg (cfg, STACK_I4);
+ dreg = ins1->dreg;
+
+ /* Rewrite ins1 to emit to tmp_reg */
+ ins1->dreg = tmp_reg;
+
+ /* Remove ins1 from code_bb */
+ MONO_REMOVE_INS (code_bb, ins1);
+
+ /* Move ins1 before the comparison */
+ mono_bblock_insert_before_ins (bb, compare, ins1);
+
+ /* Add cmov instruction */
+ MONO_INST_NEW (cfg, cmov, OP_NOP);
+ cmov->dreg = dreg;
+ cmov->sreg1 = dreg;
+ cmov->sreg2 = tmp_reg;
+ cond = mono_opcode_to_cond (branch->opcode);
+ if (branch->inst_false_bb == code_bb)
+ cond = mono_negate_cond (cond);
+ switch (mono_opcode_to_type (branch->opcode, compare->opcode)) {
+ case CMP_TYPE_I:
+ cmov->opcode = int_cmov_opcodes [cond];
+ break;
+ case CMP_TYPE_L:
+ cmov->opcode = long_cmov_opcodes [cond];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ mono_bblock_insert_after_ins (bb, compare, cmov);
+
+ /* Rewrite the branch */
+ branch->opcode = OP_BR;
+ branch->inst_target_bb = next_bb;
+ mono_link_bblock (cfg, bb, branch->inst_target_bb);
+
+ /* Nullify the branch at the end of code_bb */
+ if (code_bb->code) {
+ branch = code_bb->code;
+ MONO_DELETE_INS (code_bb, branch);
+ }
+
+ /* Reorder bblocks */
+ mono_unlink_bblock (cfg, bb, code_bb);
+ mono_unlink_bblock (cfg, code_bb, next_bb);
+
+ /* Merge bb and its successor if possible */
+ if ((bb->out_bb [0]->in_count == 1) && (bb->out_bb [0] != cfg->bb_exit) &&
+ (bb->region == bb->out_bb [0]->region)) {
+ mono_merge_basic_blocks (cfg, bb, bb->out_bb [0]);
+ goto restart;
+ }
+ }
+ }
+
+#if 0
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoBasicBlock *bb1, *bb2;
+ MonoInst *prev, *compare, *branch, *ins1, *ins2, *cmov, *move, *tmp;
+ gboolean simple, ret;
+ int dreg, tmp_reg;
+ CompType comp_type;
+
+ /* Look for the IR code generated from if (cond) <var> <- <a>
+ * after branch opts which is:
+ * BB:
+ * compare
+ * b<cond> [BB1]
+ * <var> <- <a>
+ * BB1:
+ */
+ if (!(bb->out_count == 1 && bb->extended && bb->code && bb->code->next && bb->code->next->next))
+ continue;
+
+ mono_print_bb (bb, "");
+
+ /* Find the compare instruction */
+ prev = NULL;
+ compare = bb->code;
+ g_assert (compare);
+ while (compare->next->next && compare->next->next != bb->last_ins) {
+ prev = compare;
+ compare = compare->next;
+ }
+ branch = compare->next;
+ if (!MONO_IS_COND_BRANCH_OP (branch))
+ continue;
+ }
+#endif
+
+ if (changed) {
+ if (cfg->opt & MONO_OPT_BRANCH)
+ mono_optimize_branches (cfg);
+ /* Merging bblocks could make some variables local */
+ mono_handle_global_vregs (cfg);
+ if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
+ mono_local_cprop2 (cfg);
+ mono_local_deadce (cfg);
+ }
+#endif
+}
} \
return;
-#define FOLD_BINOPA(name,op,cast) \
- case name: \
- if (inst->inst_i0->opcode != OP_ICONST) \
- return; \
- if (inst->inst_i1->opcode == OP_ICONST) { \
- inst->opcode = OP_ICONST; \
- inst->inst_c0 = (cast)inst->inst_i0->inst_c0 op (cast)inst->inst_i1->inst_c0; \
- } \
- return;
-
#define FOLD_CXX(name,op,cast) \
case name: \
if (inst->inst_i0->opcode != OP_COMPARE) \
return BRANCH_UNDEF;
}
+#ifndef G_MININT32
+#define MYGINT32_MAX 2147483647
+#define G_MININT32 (-MYGINT32_MAX -1)
+#endif
+
+#define FOLD_UNOP2(name,op) \
+ case name: \
+ dest->inst_c0 = op arg1->inst_c0; \
+ break;
+
+#define FOLD_BINOP2(name, op) \
+ case name: \
+ dest->inst_c0 = arg1->inst_c0 op arg2->inst_c0; \
+ break;
+
+#define FOLD_BINOPC2(name,op,cast) \
+ case name: \
+ dest->inst_c0 = (cast)arg1->inst_c0 op (cast)arg2->inst_c0; \
+ break;
+
+#define FOLD_BINOP2_IMM(name, op) \
+ case name: \
+ dest->inst_c0 = arg1->inst_c0 op ins->inst_imm; \
+ break;
+
+#define FOLD_BINOPC2_IMM(name, op, cast) \
+ case name: \
+ dest->inst_c0 = (cast)arg1->inst_c0 op (cast)ins->inst_imm; \
+ break;
+
+#define FOLD_BINOPCXX2(name,op,cast) \
+ case name: \
+ res = (cast)arg1->inst_c0 op (cast)arg2->inst_c0; \
+ break; \
+
+#undef MONO_INST_NEW
+#define MONO_INST_NEW(cfg,dest,op) do { \
+ (dest) = mono_mempool_alloc ((cfg)->mempool, sizeof (MonoInst)); \
+ (dest)->inst_p0 = (dest)->inst_p1 = (dest)->next = NULL; \
+ (dest)->opcode = (op); \
+ (dest)->flags = 0; \
+ (dest)->dreg = (dest)->sreg1 = (dest)->sreg2 = -1; \
+ } while (0)
+
+#define ALLOC_DEST(cfg, dest, ins) do { \
+ if (!(dest)) { \
+ MONO_INST_NEW ((cfg), (dest), -1); \
+ (dest)->dreg = (ins)->dreg; \
+ } \
+} while (0)
+
+/**
+ * mono_constant_fold_ins2:
+ *
+ * Perform constant folding on INS, using ARG1 and ARG2 as the arguments. If OVERWRITE is
+ * true, then store the result back into INS and return INS. Otherwise allocate a new ins,
+ * store the result into it and return it. If constant folding cannot be performed, return
+ * NULL.
+ */
+MonoInst*
+mono_constant_fold_ins2 (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite)
+{
+ MonoInst *dest = NULL;
+
+ if (overwrite)
+ dest = ins;
+
+ switch (ins->opcode) {
+ case OP_IMUL:
+ case OP_IADD:
+ case OP_IAND:
+ case OP_IOR:
+ case OP_IXOR:
+ if (arg2->opcode == OP_ICONST) {
+ if (arg1->opcode == OP_ICONST) {
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_BINOP2 (OP_IMUL, *);
+ FOLD_BINOP2 (OP_IADD, +);
+ FOLD_BINOP2 (OP_IAND, &);
+ FOLD_BINOP2 (OP_IOR, |);
+ FOLD_BINOP2 (OP_IXOR, ^);
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ } else if (arg1->opcode == OP_ICONST) {
+ /*
+ * This is commutative so swap the arguments, allowing the _imm variant
+ * to be used later.
+ */
+ if (mono_op_to_op_imm (ins->opcode) != -1) {
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = mono_op_to_op_imm (ins->opcode);
+ dest->sreg1 = ins->sreg2;
+ dest->sreg2 = -1;
+ dest->inst_imm = arg1->inst_c0;
+ }
+ }
+ break;
+ case OP_IMUL_IMM:
+ case OP_IADD_IMM:
+ case OP_IAND_IMM:
+ case OP_IOR_IMM:
+ case OP_IXOR_IMM:
+ case OP_ISUB_IMM:
+ case OP_ISHL_IMM:
+ case OP_ISHR_IMM:
+ case OP_ISHR_UN_IMM:
+ case OP_SHL_IMM:
+ if (arg1->opcode == OP_ICONST) {
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_BINOP2_IMM (OP_IMUL_IMM, *);
+ FOLD_BINOP2_IMM (OP_IADD_IMM, +);
+ FOLD_BINOP2_IMM (OP_IAND_IMM, &);
+ FOLD_BINOP2_IMM (OP_IOR_IMM, |);
+ FOLD_BINOP2_IMM (OP_IXOR_IMM, ^);
+ FOLD_BINOP2_IMM (OP_ISUB_IMM, -);
+ FOLD_BINOP2_IMM (OP_ISHL_IMM, <<);
+ FOLD_BINOP2_IMM (OP_ISHR_IMM, >>);
+ FOLD_BINOPC2_IMM (OP_ISHR_UN_IMM, >>, guint32);
+ FOLD_BINOP2_IMM (OP_SHL_IMM, <<);
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ break;
+ case OP_ISUB:
+ case OP_ISHL:
+ case OP_ISHR:
+ case OP_ISHR_UN:
+ if ((arg1->opcode == OP_ICONST) && (arg2->opcode == OP_ICONST)) {
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_BINOP2 (OP_ISUB, -);
+ FOLD_BINOP2 (OP_ISHL, <<);
+ FOLD_BINOP2 (OP_ISHR, >>);
+ FOLD_BINOPC2 (OP_ISHR_UN, >>, guint32);
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ break;
+ case OP_IDIV:
+ case OP_IDIV_UN:
+ case OP_IREM:
+ case OP_IREM_UN:
+ if ((arg1->opcode == OP_ICONST) && (arg2->opcode == OP_ICONST)) {
+ if ((arg2->inst_c0 == 0) || ((arg1->inst_c0 == G_MININT32) && (arg2->inst_c0 == -1)))
+ return NULL;
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_BINOPC2 (OP_IDIV, /, gint32);
+ FOLD_BINOPC2 (OP_IDIV_UN, /, guint32);
+ FOLD_BINOPC2 (OP_IREM, %, gint32);
+ FOLD_BINOPC2 (OP_IREM_UN, %, guint32);
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ break;
+ case OP_IDIV_IMM:
+ case OP_IDIV_UN_IMM:
+ case OP_IREM_IMM:
+ case OP_IREM_UN_IMM:
+ if (arg1->opcode == OP_ICONST) {
+ if ((ins->inst_imm == 0) || ((arg1->inst_c0 == G_MININT32) && (ins->inst_imm == -1)))
+ return NULL;
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_BINOPC2_IMM (OP_IDIV_IMM, /, gint32);
+ FOLD_BINOPC2_IMM (OP_IDIV_UN_IMM, /, guint32);
+ FOLD_BINOPC2_IMM (OP_IREM_IMM, %, gint32);
+ FOLD_BINOPC2_IMM (OP_IREM_UN_IMM, %, guint32);
+ default:
+ g_assert_not_reached ();
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ break;
+ /* case OP_INEG: */
+ case OP_INOT:
+ case OP_INEG:
+ if (arg1->opcode == OP_ICONST) {
+ /* INEG sets cflags on x86, and the LNEG decomposition depends on that */
+ if ((ins->opcode == OP_INEG) && ins->next && (ins->next->opcode == OP_ADC_IMM))
+ return NULL;
+ ALLOC_DEST (cfg, dest, ins);
+ switch (ins->opcode) {
+ FOLD_UNOP2 (OP_INEG,-);
+ FOLD_UNOP2 (OP_INOT,~);
+ }
+ dest->opcode = OP_ICONST;
+ dest->sreg1 = dest->sreg2 = -1;
+ }
+ break;
+ case OP_MOVE:
+#if SIZEOF_VOID_P == 8
+ if ((arg1->opcode == OP_ICONST) || (arg1->opcode == OP_I8CONST)) {
+#else
+ if (arg1->opcode == OP_ICONST) {
+#endif
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = arg1->opcode;
+ dest->sreg1 = dest->sreg2 = -1;
+ dest->inst_c0 = arg1->inst_c0;
+ }
+ break;
+ case OP_VMOVE:
+ if (arg1->opcode == OP_VZERO) {
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = OP_VZERO;
+ dest->sreg1 = -1;
+ }
+ break;
+ case OP_COMPARE:
+ case OP_ICOMPARE:
+ case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM: {
+ MonoInst dummy_arg2;
+ if (ins->sreg2 == -1) {
+ arg2 = &dummy_arg2;
+ arg2->opcode = OP_ICONST;
+ arg2->inst_c0 = ins->inst_imm;
+ }
+
+ if ((arg1->opcode == OP_ICONST) && (arg2->opcode == OP_ICONST) && ins->next) {
+ MonoInst *next = ins->next;
+ gboolean res = FALSE;
+
+ switch (next->opcode) {
+ case OP_CEQ:
+ case OP_ICEQ:
+ case OP_CGT:
+ case OP_ICGT:
+ case OP_CGT_UN:
+ case OP_ICGT_UN:
+ case OP_CLT:
+ case OP_ICLT:
+ case OP_CLT_UN:
+ case OP_ICLT_UN:
+ switch (next->opcode) {
+ FOLD_BINOPCXX2 (OP_CEQ,==,gint32);
+ FOLD_BINOPCXX2 (OP_ICEQ,==,gint32);
+ FOLD_BINOPCXX2 (OP_CGT,>,gint32);
+ FOLD_BINOPCXX2 (OP_ICGT,>,gint32);
+ FOLD_BINOPCXX2 (OP_CGT_UN,>,guint32);
+ FOLD_BINOPCXX2 (OP_ICGT_UN,>,guint32);
+ FOLD_BINOPCXX2 (OP_CLT,<,gint32);
+ FOLD_BINOPCXX2 (OP_ICLT,<,gint32);
+ FOLD_BINOPCXX2 (OP_CLT_UN,<,guint32);
+ FOLD_BINOPCXX2 (OP_ICLT_UN,<,guint32);
+ }
+
+ if (overwrite) {
+ NULLIFY_INS (ins);
+ next->opcode = OP_ICONST;
+ next->inst_c0 = res;
+ next->sreg1 = next->sreg2 = -1;
+ } else {
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = OP_ICONST;
+ dest->inst_c0 = res;
+ }
+ break;
+ case OP_IBEQ:
+ case OP_IBNE_UN:
+ case OP_IBGT:
+ case OP_IBGT_UN:
+ case OP_IBGE:
+ case OP_IBGE_UN:
+ case OP_IBLT:
+ case OP_IBLT_UN:
+ case OP_IBLE:
+ case OP_IBLE_UN:
+ switch (next->opcode) {
+ FOLD_BINOPCXX2 (OP_IBEQ,==,gint32);
+ FOLD_BINOPCXX2 (OP_IBNE_UN,!=,guint32);
+ FOLD_BINOPCXX2 (OP_IBGT,>,gint32);
+ FOLD_BINOPCXX2 (OP_IBGT_UN,>,guint32);
+ FOLD_BINOPCXX2 (OP_IBGE,>=,gint32);
+ FOLD_BINOPCXX2 (OP_IBGE_UN,>=,guint32);
+ FOLD_BINOPCXX2 (OP_IBLT,<,gint32);
+ FOLD_BINOPCXX2 (OP_IBLT_UN,<,guint32);
+ FOLD_BINOPCXX2 (OP_IBLE,<=,gint32);
+ FOLD_BINOPCXX2 (OP_IBLE_UN,<=,guint32);
+ }
+
+ if (overwrite) {
+ /*
+ * Can't nullify OP_COMPARE here since the decompose long branch
+ * opcodes depend on it being executed. Also, the branch might not
+ * be eliminated after all if loop opts is disabled, for example.
+ */
+ if (res)
+ next->flags |= MONO_INST_CFOLD_TAKEN;
+ else
+ next->flags |= MONO_INST_CFOLD_NOT_TAKEN;
+ } else {
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = OP_ICONST;
+ dest->inst_c0 = res;
+ }
+ break;
+ case OP_NOP:
+ case OP_BR:
+ /* This happens when a conditional branch is eliminated */
+ if (next->next == NULL) {
+ /* Last ins */
+ if (overwrite)
+ NULLIFY_INS (ins);
+ }
+ break;
+ default:
+ return NULL;
+ }
+ }
+ break;
+ }
+ case OP_FMOVE:
+ if (arg1->opcode == OP_R8CONST) {
+ ALLOC_DEST (cfg, dest, ins);
+ dest->opcode = OP_R8CONST;
+ dest->sreg1 = -1;
+ dest->inst_p0 = arg1->inst_p0;
+ }
+ break;
+
+ /*
+ * TODO:
+ * conv.* opcodes.
+ * *ovf* opcodes? It's slow and hard to do in C.
+ * switch can be replaced by a simple jump
+ */
+ default:
+ return NULL;
+ }
+
+ return dest;
+}
#
break: len:2
jmp: len:120
+tailcall: len:120 clob:c
br: len:6
label: len:0
int_ble: len:8
int_ble_un: len:8
+# Linear IR opcodes
+nop: len:0
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+long_ceq: dest:c len:64
+long_cgt: dest:c len:64
+long_cgt_un: dest:c len:64
+long_clt: dest:c len:64
+long_clt_un: dest:c len:64
+
+int_conv_to_i1: dest:i src1:i len:4
+int_conv_to_i2: dest:i src1:i len:4
+int_conv_to_i4: dest:i src1:i len:3
+int_conv_to_i8: dest:i src1:i len:3
+int_conv_to_u4: dest:i src1:i len:3
+int_conv_to_u8: dest:i src1:i len:3
+
+int_conv_to_u: dest:i src1:i len:4
+int_conv_to_u2: dest:i src1:i len:4
+int_conv_to_u1: dest:i src1:i len:4
+int_conv_to_i: dest:i src1:i len:4
+
+cond_exc_ieq: len:8
+cond_exc_ine_un: len:8
+cond_exc_ilt: len:8
+cond_exc_ilt_un: len:8
+cond_exc_igt: len:8
+cond_exc_igt_un: len:8
+cond_exc_ige: len:8
+cond_exc_ige_un: len:8
+cond_exc_ile: len:8
+cond_exc_ile_un: len:8
+cond_exc_ino: len:8
+cond_exc_inc: len:8
+
+x86_compare_membase8_imm: src1:b len:9
+
+jump_table: dest:i len:18
+
+cmov_ieq: dest:i src1:i src2:i len:16 clob:1
+cmov_ige: dest:i src1:i src2:i len:16 clob:1
+cmov_igt: dest:i src1:i src2:i len:16 clob:1
+cmov_ile: dest:i src1:i src2:i len:16 clob:1
+cmov_ilt: dest:i src1:i src2:i len:16 clob:1
+cmov_ine_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ige_un: dest:i src1:i src2:i len:16 clob:1
+cmov_igt_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ile_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1
+
+cmov_leq: dest:i src1:i src2:i len:16 clob:1
+cmov_lge: dest:i src1:i src2:i len:16 clob:1
+cmov_lgt: dest:i src1:i src2:i len:16 clob:1
+cmov_lle: dest:i src1:i src2:i len:16 clob:1
+cmov_llt: dest:i src1:i src2:i len:16 clob:1
+cmov_lne_un: dest:i src1:i src2:i len:16 clob:1
+cmov_lge_un: dest:i src1:i src2:i len:16 clob:1
+cmov_lgt_un: dest:i src1:i src2:i len:16 clob:1
+cmov_lle_un: dest:i src1:i src2:i len:16 clob:1
+cmov_llt_un: dest:i src1:i src2:i len:16 clob:1
+
+long_add_imm: dest:i src1:i clob:1 len:12
+long_sub_imm: dest:i src1:i clob:1 len:12
+long_and_imm: dest:i src1:i clob:1 len:12
+long_or_imm: dest:i src1:i clob:1 len:12
+long_xor_imm: dest:i src1:i clob:1 len:12
+
+lcompare_imm: src1:i len:13
+
+amd64_compare_membase_reg: src1:b src2:i len:9
+amd64_compare_membase_imm: src1:b len:14
+amd64_compare_reg_membase: src1:i src2:b len:9
+
+amd64_add_reg_membase: dest:i src1:i src2:b clob:1 len:14
+amd64_sub_reg_membase: dest:i src1:i src2:b clob:1 len:14
+amd64_and_reg_membase: dest:i src1:i src2:b clob:1 len:14
+amd64_or_reg_membase: dest:i src1:i src2:b clob:1 len:14
+amd64_xor_reg_membase: dest:i src1:i src2:b clob:1 len:14
+
+amd64_add_membase_imm: src1:b len:16
+amd64_sub_membase_imm: src1:b len:16
+amd64_and_membase_imm: src1:b len:13
+amd64_or_membase_imm: src1:b len:13
+amd64_xor_membase_imm: src1:b len:13
+
+x86_and_membase_imm: src1:b len:12
+x86_or_membase_imm: src1:b len:12
+x86_xor_membase_imm: src1:b len:12
+
+x86_add_membase_reg: src1:b src2:i len:12
+x86_sub_membase_reg: src1:b src2:i len:12
+x86_and_membase_reg: src1:b src2:i len:12
+x86_or_membase_reg: src1:b src2:i len:12
+x86_xor_membase_reg: src1:b src2:i len:12
+x86_mul_membase_reg: src1:b src2:i len:14
+
+amd64_add_membase_reg: src1:b src2:i len:13
+amd64_sub_membase_reg: src1:b src2:i len:13
+amd64_and_membase_reg: src1:b src2:i len:13
+amd64_or_membase_reg: src1:b src2:i len:13
+amd64_xor_membase_reg: src1:b src2:i len:13
+amd64_mul_membase_reg: src1:b src2:i len:15
+
+float_conv_to_r4: dest:f src1:f
+
+vcall2: len:64 clob:c
+vcall2_reg: src1:i len:64 clob:c
+vcall2_membase: src1:b len:64 clob:c
+
+localloc_imm: dest:i len:84
+
+load_mem: dest:i len:16
+loadi8_mem: dest:i len:16
+loadi4_mem: dest:i len:16
+loadu1_mem: dest:i len:16
+loadu2_mem: dest:i len:16
cond_exc_no: len:8
cond_exc_c: len:12
cond_exc_nc: len:8
-float_beq: src1:f src2:f len:20
-float_bne_un: src1:f src2:f len:20
-float_blt: src1:f src2:f len:20
-float_blt_un: src1:f src2:f len:20
-float_bgt: src1:f src2:f len:20
-float_bgt_un: src1:f src2:f len:20
-float_bge: src1:f src2:f len:20
-float_bge_un: src1:f src2:f len:20
-float_ble: src1:f src2:f len:20
-float_ble_un: src1:f src2:f len:20
+#float_beq: src1:f src2:f len:20
+#float_bne_un: src1:f src2:f len:20
+#float_blt: src1:f src2:f len:20
+#float_blt_un: src1:f src2:f len:20
+#float_bgt: src1:f src2:f len:20
+#float_bgt_un: src1:f src2:f len:20
+#float_bge: src1:f src2:f len:20
+#float_bge_un: src1:f src2:f len:20
+#float_ble: src1:f src2:f len:20
+#float_ble_un: src1:f src2:f len:20
float_add: dest:f src1:f src2:f len:4
float_sub: dest:f src1:f src2:f len:4
float_mul: dest:f src1:f src2:f len:4
arm_rsbs_imm: dest:i src1:i len:4
arm_rsc_imm: dest:i src1:i len:4
+
+# Linear IR opcodes
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+int_adc: dest:i src1:i src2:i len:4
+int_addcc: dest:i src1:i src2:i len:4
+int_subcc: dest:i src1:i src2:i len:4
+int_sbb: dest:i src1:i src2:i len:4
+int_adc_imm: dest:i src1:i len:12
+int_sbb_imm: dest:i src1:i len:12
+
+int_add_imm: dest:i src1:i len:12
+int_sub_imm: dest:i src1:i len:12
+int_mul_imm: dest:i src1:i len:12
+int_div_imm: dest:i src1:i len:20
+int_div_un_imm: dest:i src1:i len:12
+int_rem_imm: dest:i src1:i len:28
+int_rem_un_imm: dest:i src1:i len:16
+int_and_imm: dest:i src1:i len:12
+int_or_imm: dest:i src1:i len:12
+int_xor_imm: dest:i src1:i len:12
+int_shl_imm: dest:i src1:i len:8
+int_shr_imm: dest:i src1:i len:8
+int_shr_un_imm: dest:i src1:i len:8
+
+int_ceq: dest:i len:12
+int_cgt: dest:i len:12
+int_cgt_un: dest:i len:12
+int_clt: dest:i len:12
+int_clt_un: dest:i len:12
+
+cond_exc_ieq: len:8
+cond_exc_ine_un: len:8
+cond_exc_ilt: len:8
+cond_exc_ilt_un: len:8
+cond_exc_igt: len:8
+cond_exc_igt_un: len:8
+cond_exc_ige: len:8
+cond_exc_ige_un: len:8
+cond_exc_ile: len:8
+cond_exc_ile_un: len:8
+cond_exc_iov: len:12
+cond_exc_ino: len:8
+cond_exc_ic: len:12
+cond_exc_inc: len:8
+
+icompare: src1:i src2:i len:4
+icompare_imm: src1:i len:12
+
+long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30
+
+vcall2: len:20 clob:c
+vcall2_reg: src1:i len:8 clob:c
+vcall2_membase: src1:b len:12 clob:c
+
+# This is different from the original JIT opcodes
+float_beq: len:20
+float_bne_un: len:20
+float_blt: len:20
+float_blt_un: len:20
+float_bgt: len:20
+float_bgt_un: len:20
+float_bge: len:20
+float_bge_un: len:20
+float_ble: len:20
+float_ble_un: len:20
ia64_loadi8_membase_inc: dest:b src1:i len:48
ia64_loadr4_membase_inc: dest:b src1:i len:48
ia64_loadr8_membase_inc: dest:b src1:i len:48
+
+# Linear IR opcodes
+nop: len:0
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+jump_table: dest:i len:48
+
+localloc_imm: dest:i len:92
+
+vcall2: len:80 clob:c
+vcall2_reg: src1:i len:80 clob:c
+vcall2_membase: src1:b len:80 clob:c
+
+int_conv_to_i1: dest:i src1:i len:48
+int_conv_to_u1: dest:i src1:i len:48
+int_conv_to_i2: dest:i src1:i len:48
+int_conv_to_u2: dest:i src1:i len:48
+int_conv_to_i4: dest:i src1:i len:48
+int_conv_to_u4: dest:i src1:i len:48
+int_conv_to_i8: dest:i src1:i len:48
+int_conv_to_u8: dest:i src1:i len:48
+
+long_add_imm: dest:i src1:i len:48
+long_sub_imm: dest:i src1:i len:48
+long_and_imm: dest:i src1:i len:48
+long_or_imm: dest:i src1:i len:48
+long_xor_imm: dest:i src1:i len:48
+
jmp: len:92
call: dest:a clob:c len:16
br: len:4
-beq: len:8
-bge: len:8
-bgt: len:8
-ble: len:8
-blt: len:8
-bne.un: len:8
-bge.un: len:8
-bgt.un: len:8
-ble.un: len:8
-blt.un: len:8
-add: dest:i src1:i src2:i len:4
-sub: dest:i src1:i src2:i len:4
-mul: dest:i src1:i src2:i len:4
-div: dest:i src1:i src2:i len:40
-div.un: dest:i src1:i src2:i len:16
-rem: dest:i src1:i src2:i len:48
-rem.un: dest:i src1:i src2:i len:24
-and: dest:i src1:i src2:i len:4
-or: dest:i src1:i src2:i len:4
-xor: dest:i src1:i src2:i len:4
-shl: dest:i src1:i src2:i len:4
-shr: dest:i src1:i src2:i len:4
-shr.un: dest:i src1:i src2:i len:4
-neg: dest:i src1:i len:4
-not: dest:i src1:i len:4
-conv.i1: dest:i src1:i len:4
-conv.i2: dest:i src1:i len:4
-conv.i4: dest:i src1:i len:4
-conv.r4: dest:f src1:i len:36
-conv.r8: dest:f src1:i len:36
-conv.u4: dest:i src1:i
-conv.r.un: dest:f src1:i len:32
throw: src1:i len:20
rethrow: src1:i len:20
ckfinite: src1:f
ppc_check_finite: src1:i len:16
-conv.u2: dest:i src1:i len:4
-conv.u1: dest:i src1:i len:4
-conv.i: dest:i src1:i len:4
-add.ovf: dest:i src1:i src2:i len:16
-add.ovf.un: dest:i src1:i src2:i len:16
-mul.ovf: dest:i src1:i src2:i len:16
-# this opcode is handled specially in the code generator
-mul.ovf.un: dest:i src1:i src2:i len:16
-sub.ovf: dest:i src1:i src2:i len:16
-sub.ovf.un: dest:i src1:i src2:i len:16
add_ovf_carry: dest:i src1:i src2:i len:16
sub_ovf_carry: dest:i src1:i src2:i len:16
add_ovf_un_carry: dest:i src1:i src2:i len:16
sub_ovf_un_carry: dest:i src1:i src2:i len:16
start_handler: len:16
endfinally: len:12
-conv.u: dest:i src1:i len:4
ceq: dest:i len:12
cgt: dest:i len:12
cgt.un: dest:i len:12
vcall_membase: src1:b len:12 clob:c
call_reg: dest:a src1:i len:8 clob:c
call_membase: dest:a src1:b len:12 clob:c
-iconst: dest:i len:12
+iconst: dest:i len:8
r4const: dest:f len:12
r8const: dest:f len:12
label: len:0
bigmul: len:12 dest:l src1:i src2:i
bigmul_un: len:12 dest:l src1:i src2:i
tls_get: len:8 dest:i
+
+# Linear IR opcodes
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+# 32 bit opcodes
+int_add: dest:i src1:i src2:i len:4
+int_sub: dest:i src1:i src2:i len:4
+int_mul: dest:i src1:i src2:i len:4
+int_div: dest:i src1:i src2:i len:40
+int_div_un: dest:i src1:i src2:i len:16
+int_rem: dest:i src1:i src2:i len:48
+int_rem_un: dest:i src1:i src2:i len:24
+int_and: dest:i src1:i src2:i len:4
+int_or: dest:i src1:i src2:i len:4
+int_xor: dest:i src1:i src2:i len:4
+int_shl: dest:i src1:i src2:i len:4
+int_shr: dest:i src1:i src2:i len:4
+int_shr_un: dest:i src1:i src2:i len:4
+int_neg: dest:i src1:i len:4
+int_not: dest:i src1:i len:4
+int_conv_to_i1: dest:i src1:i len:8
+int_conv_to_i2: dest:i src1:i len:8
+int_conv_to_i4: dest:i src1:i len:4
+int_conv_to_r4: dest:f src1:i len:36
+int_conv_to_r8: dest:f src1:i len:36
+int_conv_to_u4: dest:i src1:i
+int_conv_to_u2: dest:i src1:i len:8
+int_conv_to_u1: dest:i src1:i len:4
+int_beq: len:8
+int_bge: len:8
+int_bgt: len:8
+int_ble: len:8
+int_blt: len:8
+int_bne_un: len:8
+int_bge_un: len:8
+int_bgt_un: len:8
+int_ble_un: len:8
+int_blt_un: len:8
+int_add_ovf: dest:i src1:i src2:i len:16
+int_add_ovf_un: dest:i src1:i src2:i len:16
+int_mul_ovf: dest:i src1:i src2:i len:16
+int_mul_ovf_un: dest:i src1:i src2:i len:16
+int_sub_ovf: dest:i src1:i src2:i len:16
+int_sub_ovf_un: dest:i src1:i src2:i len:16
+
+int_adc: dest:i src1:i src2:i len:4
+int_addcc: dest:i src1:i src2:i len:4
+int_subcc: dest:i src1:i src2:i len:4
+int_sbb: dest:i src1:i src2:i len:4
+int_adc_imm: dest:i src1:i len:12
+int_sbb_imm: dest:i src1:i len:12
+
+int_add_imm: dest:i src1:i len:12
+int_sub_imm: dest:i src1:i len:12
+int_mul_imm: dest:i src1:i len:12
+int_div_imm: dest:i src1:i len:20
+int_div_un_imm: dest:i src1:i len:12
+int_rem_imm: dest:i src1:i len:28
+int_rem_un_imm: dest:i src1:i len:16
+int_and_imm: dest:i src1:i len:12
+int_or_imm: dest:i src1:i len:12
+int_xor_imm: dest:i src1:i len:12
+int_shl_imm: dest:i src1:i len:8
+int_shr_imm: dest:i src1:i len:8
+int_shr_un_imm: dest:i src1:i len:8
+
+int_ceq: dest:i len:12
+int_cgt: dest:i len:12
+int_cgt_un: dest:i len:12
+int_clt: dest:i len:12
+int_clt_un: dest:i len:12
+
+cond_exc_ieq: len:8
+cond_exc_ine_un: len:8
+cond_exc_ilt: len:8
+cond_exc_ilt_un: len:8
+cond_exc_igt: len:8
+cond_exc_igt_un: len:8
+cond_exc_ige: len:8
+cond_exc_ige_un: len:8
+cond_exc_ile: len:8
+cond_exc_ile_un: len:8
+cond_exc_iov: len:12
+cond_exc_ino: len:8
+cond_exc_ic: len:12
+cond_exc_inc: len:8
+
+icompare: src1:i src2:i len:4
+icompare_imm: src1:i len:12
+
+long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30
+
+vcall2: len:20 clob:c
+vcall2_reg: src1:i len:8 clob:c
+vcall2_membase: src1:b len:12 clob:c
+
+jump_table: dest:i len:8
+
int_neg: dest:i src1:i len:4
int_not: dest:i src1:i len:8
-int_conv_to_i1: dest:i src1:i len:26
-int_conv_to_i2: dest:i src1:i len:26
+int_conv_to_i1: dest:i src1:i len:16
+int_conv_to_i2: dest:i src1:i len:16
int_conv_to_i4: dest:i src1:i len:2
-int_conv_to_i: dest:i src1:i len:2
int_conv_to_r4: dest:f src1:i len:4
int_conv_to_r8: dest:f src1:i len:4
int_conv_to_u1: dest:i src1:i len:8
add_imm: dest:i src1:i len:18
addcc_imm: dest:i src1:i len:18
and_imm: dest:i src1:i len:16
-div_imm: dest:i src1:i src2:i len:24
-div_un_imm: dest:i src1:i src2:i len:24
+div_imm: dest:i src1:i len:24
+div_un_imm: dest:i src1:i len:24
or_imm: dest:i src1:i len:16
-rem_imm: dest:i src1:i src2:i len:24
-rem_un_imm: dest:i src1:i src2:i len:24
+rem_imm: dest:i src1:i len:24
+rem_un_imm: dest:i src1:i len:24
sbb_imm: dest:i src1:i len:18
shl_imm: dest:i src1:i len:8
shr_imm: dest:i src1:i len:8
sub_imm: dest:i src1:i len:18
subcc_imm: dest:i src1:i len:18
xor_imm: dest:i src1:i len:16
+
+# Linear IR opcodes
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+jump_table: dest:i len:16
+
+icompare: src1:i src2:i len:4
+icompare_imm: src1:i len:14
+
+int_ceq: dest:i len:12
+int_cgt_un: dest:i len:12
+int_cgt: dest:i len:12
+int_clt_un: dest:i len:12
+int_clt: dest:i len:12
+
+cond_exc_ic: len:8
+cond_exc_ieq: len:8
+cond_exc_ige: len:8
+cond_exc_ige_un: len:8
+cond_exc_igt: len:8
+cond_exc_igt_un: len:8
+cond_exc_ile: len:8
+cond_exc_ile_un: len:8
+cond_exc_ilt: len:8
+cond_exc_ilt_un: len:8
+cond_exc_inc: len:8
+cond_exc_ine_un: len:8
+cond_exc_ino: len:8
+cond_exc_iov: len:8
+
+int_add_imm: dest:i src1:i len:18
+int_sub_imm: dest:i src1:i len:18
+int_mul_imm: dest:i src1:i len:20
+int_div_imm: dest:i src1:i len:24
+int_div_un_imm: dest:i src1:i len:24
+int_rem_imm: dest:i src1:i len:24
+int_rem_un_imm: dest:i src1:i len:24
+int_and_imm: dest:i src1:i len:16
+int_or_imm: dest:i src1:i len:16
+int_xor_imm: dest:i src1:i len:16
+int_adc_imm: dest:i src1:i len:18
+int_sbb_imm: dest:i src1:i len:18
+int_shl_imm: dest:i src1:i len:8
+int_shr_imm: dest:i src1:i len:8
+int_shr_un_imm: dest:i src1:i len:8
+
+int_adc: dest:i src1:i src2:i len:6
+int_sbb: dest:i src1:i src2:i len:8
+int_addcc: dest:i src1:i src2:i len:6
+int_subcc: dest:i src1:i src2:i len:6
+
+long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:44
+
+vcall2: len:8 clob:c
+vcall2_membase: src1:b len:12 clob:c
+vcall2_reg: src1:i len:8 clob:c
+
+s390_long_add: dest:l src1:i src2:i len:18
+s390_long_add_ovf: dest:l src1:i src2:i len:32
+s390_long_add_ovf_un: dest:l src1:i src2:i len:32
+s390_long_sub: dest:l src1:i src2:i len:18
+s390_long_sub_ovf: dest:l src1:i src2:i len:32
+s390_long_sub_ovf_un: dest:l src1:i src2:i len:32
+s390_long_neg: dest:l src1:i src2:i len:18
+
+s390_int_add_ovf: len:24 dest:i src1:i src2:i
+s390_int_add_ovf_un: len:10 dest:i src1:i src2:i
+s390_int_sub_ovf: len:24 dest:i src1:i src2:i
+s390_int_sub_ovf_un: len:10 dest:i src1:i src2:i
fmove: dest:f src1:f len:4
i8const: dest:i len:20
icompare: src1:i src2:i len:4
-icompare_imm: src1:i src2:i len:14
+icompare_imm: src1:i len:14
iconst: dest:i len:40
setret: dest:a src1:i len:4
sext_i4: dest:i src1:i len:4
zext_i4: dest:i src1:i len:4
-shl_imm: dest:i src1:i len:8
+shl_imm: dest:i src1:i len:10
shr_imm: dest:i src1:i len:10
shr_un_imm: dest:i src1:i len:8
sqrt: dest:f src1:f len:4
int_rem_un_imm: dest:d src1:i len:24
int_sbb: dest:i src1:i src2:i len:6
int_sbb_imm: dest:i src1:i len:14
-int_shl: dest:i src1:i src2:i clob:s len:10
+int_shl: dest:i src1:i src2:i clob:s len:12
int_shl_imm: dest:i src1:i len:10
-int_shr: dest:i src1:i src2:i clob:s len:10
+int_shr: dest:i src1:i src2:i clob:s len:12
int_shr_imm: dest:i src1:i len:10
-int_shr_un: dest:i src1:i src2:i clob:s len:10
+int_shr_un: dest:i src1:i src2:i clob:s len:12
int_shr_un_imm: dest:i src1:i len:10
int_subcc: dest:i src1:i src2:i len:12
int_sub: dest:i src1:i src2:i len:12
long_div: dest:i src1:i src2:i len:12
long_div_un: dest:i src1:i src2:i len:16
long_mul: dest:i src1:i src2:i len:12
-long_mul_imm: dest:i src1:i src2:i len:20
+long_mul_imm: dest:i src1:i len:20
long_mul_ovf: dest:i src1:i src2:i len:56
long_mul_ovf_un: dest:i src1:i src2:i len:64
long_and: dest:i src1:i src2:i len:8
long_conv_to_u4: dest:i src1:i len:4
long_conv_to_u8: dest:i src1:i len:4
long_conv_to_u: dest:i src1:i len:4
-long_conv_to_r_un: dest:f src1:i src2:i len:37
+long_conv_to_r_un: dest:f src1:i len:37
long_beq: len:8
long_bge_un: len:8
long_blt: len:8
long_bne_un: len:8
+# Linear IR opcodes
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+jump_table: dest:i len:24
+
+int_conv_to_i1: dest:i src1:i len:26
+int_conv_to_i2: dest:i src1:i len:26
+int_conv_to_i4: dest:i src1:i len:2
+int_conv_to_i: dest:i src1:i len:2
+int_conv_to_u1: dest:i src1:i len:8
+int_conv_to_u2: dest:i src1:i len:16
+int_conv_to_u4: dest:i src1:i
+int_conv_to_r_un: dest:f src1:i len:37
+
+cond_exc_ic: len:8
+cond_exc_ieq: len:8
+cond_exc_ige: len:8
+cond_exc_ige_un: len:8
+cond_exc_igt: len:8
+cond_exc_igt_un: len:8
+cond_exc_ile: len:8
+cond_exc_ile_un: len:8
+cond_exc_ilt: len:8
+cond_exc_ilt_un: len:8
+cond_exc_inc: len:8
+cond_exc_ine_un: len:8
+cond_exc_ino: len:8
+cond_exc_iov: len:8
+
+lcompare_imm: src1:i len:20
+
+long_add_imm: dest:i src1:i len:20
+
+long_ceq: dest:i len:12
+long_cgt_un: dest:i len:12
+long_cgt: dest:i len:12
+long_clt_un: dest:i len:12
+long_clt: dest:i len:12
+
+vcall2: len:22 clob:c
+vcall2_membase: src1:b len:12 clob:c
+vcall2_reg: src1:i len:8 clob:c
+
+s390_int_add_ovf: len:32 dest:i src1:i src2:i
+s390_int_add_ovf_un: len:32 dest:i src1:i src2:i
+s390_int_sub_ovf: len:32 dest:i src1:i src2:i
+s390_int_sub_ovf_un: len:32 dest:i src1:i src2:i
+
+s390_long_add_ovf: dest:i src1:i src2:i len:32
+s390_long_add_ovf_un: dest:i src1:i src2:i len:32
+s390_long_sub_ovf: dest:i src1:i src2:i len:32
+s390_long_sub_ovf_un: dest:i src1:i src2:i len:32
sparc_cond_exc_gtz: src1:i len:64
sparc_cond_exc_gez: src1:i len:64
sparc_cond_exc_lez: src1:i len:64
+
+# Linear IR opcodes
+nop: len:0
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+jump_table: dest:i len:64
+
+cond_exc_ieq: len:64
+cond_exc_ine_un: len:64
+cond_exc_ilt: len:64
+cond_exc_ilt_un: len:64
+cond_exc_igt: len:64
+cond_exc_igt_un: len:64
+cond_exc_ige: len:64
+cond_exc_ige_un: len:64
+cond_exc_ile: len:64
+cond_exc_ile_un: len:64
+cond_exc_iov: len:64
+cond_exc_ino: len:64
+cond_exc_ic: len:64
+cond_exc_inc: len:64
+
+long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:48
+
+vcall2: len:40 clob:c
+vcall2_reg: src1:i len:64 clob:c
+vcall2_membase: src1:b len:64 clob:c
+
+localloc_imm: dest:i len:64
\ No newline at end of file
outarg_imm: len:5
setret: dest:a src1:i len:2
setlret: dest:l src1:i src2:i len:4
-checkthis: src1:b len:2
+checkthis: src1:b len:3
voidcall: len:17 clob:c
voidcall_reg: src1:i len:11 clob:c
voidcall_membase: src1:b len:16 clob:c
atomic_cas_imm_i4: src1:b src2:i dest:a len:24
memory_barrier: len:16
+# Linear IR opcodes
+nop: len:0
+dummy_use: len:0
+dummy_store: len:0
+not_reached: len:0
+not_null: src1:i len:0
+
+jump_table: dest:i len:5
+
+int_adc: dest:i src1:i src2:i len:2 clob:1
+int_addcc: dest:i src1:i src2:i len:2 clob:1
+int_subcc: dest:i src1:i src2:i len:2 clob:1
+int_sbb: dest:i src1:i src2:i len:2 clob:1
+
+int_add_imm: dest:i src1:i len:6 clob:1
+int_sub_imm: dest:i src1:i len:6 clob:1
+int_mul_imm: dest:i src1:i len:9
+int_div_imm: dest:a src1:a len:15 clob:d
+int_div_un_imm: dest:a src1:a len:15 clob:d
+int_rem_imm: dest:a src1:a len:15 clob:d
+int_rem_un_imm: dest:d src1:a len:15 clob:a
+int_and_imm: dest:i src1:i len:6 clob:1
+int_or_imm: dest:i src1:i len:6 clob:1
+int_xor_imm: dest:i src1:i len:6 clob:1
+int_shl_imm: dest:i src1:i len:6 clob:1
+int_shr_imm: dest:i src1:i len:6 clob:1
+int_shr_un_imm: dest:i src1:i len:6 clob:1
+
+int_conv_to_r_un: dest:f src1:i len:32
+
+int_ceq: dest:y len:6
+int_cgt: dest:y len:6
+int_cgt_un: dest:y len:6
+int_clt: dest:y len:6
+int_clt_un: dest:y len:6
+
+cond_exc_ieq: len:6
+cond_exc_ine_un: len:6
+cond_exc_ilt: len:6
+cond_exc_ilt_un: len:6
+cond_exc_igt: len:6
+cond_exc_igt_un: len:6
+cond_exc_ige: len:6
+cond_exc_ige_un: len:6
+cond_exc_ile: len:6
+cond_exc_ile_un: len:6
+cond_exc_iov: len:6
+cond_exc_ino: len:6
+cond_exc_ic: len:6
+cond_exc_inc: len:6
+
+icompare: src1:i src2:i len:2
+icompare_imm: src1:i len:6
+
+cmov_ieq: dest:i src1:i src2:i len:16 clob:1
+cmov_ige: dest:i src1:i src2:i len:16 clob:1
+cmov_igt: dest:i src1:i src2:i len:16 clob:1
+cmov_ile: dest:i src1:i src2:i len:16 clob:1
+cmov_ilt: dest:i src1:i src2:i len:16 clob:1
+cmov_ine_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ige_un: dest:i src1:i src2:i len:16 clob:1
+cmov_igt_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ile_un: dest:i src1:i src2:i len:16 clob:1
+cmov_ilt_un: dest:i src1:i src2:i len:16 clob:1
+
+long_conv_to_ovf_i4_2: dest:i src1:i src2:i len:30
+long_conv_to_r8_2: dest:f src1:i src2:i len:37
+long_conv_to_r4_2: dest:f src1:i src2:i len:64
+long_conv_to_r_un_2: dest:f src1:i src2:i len:37
+
+fmove: dest:f src1:f
+float_conv_to_r4: dest:f src1:f
+
+load_mem: dest:i len:9
+loadi4_mem: dest:i len:9
+loadu1_mem: dest:i len:9
+loadu2_mem: dest:i len:9
+
+vcall2: len:17 clob:c
+vcall2_reg: src1:i len:11 clob:c
+vcall2_membase: src1:b len:16 clob:c
+
+localloc_imm: dest:i len:120
+
+x86_add_membase_reg: src1:b src2:i len:11
+x86_sub_membase_reg: src1:b src2:i len:11
+x86_and_membase_reg: src1:b src2:i len:11
+x86_or_membase_reg: src1:b src2:i len:11
+x86_xor_membase_reg: src1:b src2:i len:11
+x86_mul_membase_reg: src1:b src2:i len:13
+
+x86_and_reg_membase: dest:i src1:i src2:b clob:1 len:6
+x86_or_reg_membase: dest:i src1:i src2:b clob:1 len:6
+x86_xor_reg_membase: dest:i src1:i src2:b clob:1 len:6
+
+x86_fxch: len:2
\ No newline at end of file
if (inst->opcode == OP_REGVAR)
var->index = inst->dreg | MONO_DEBUG_VAR_ADDRESS_MODE_REGISTER;
- else {
+ else if (inst->flags & MONO_INST_IS_DEAD) {
+ // FIXME:
+ var->index = 0 | MONO_DEBUG_VAR_ADDRESS_MODE_REGISTER;
+ } else {
/* the debug interface needs fixing to allow 0(%base) address */
var->index = inst->inst_basereg | MONO_DEBUG_VAR_ADDRESS_MODE_REGOFFSET;
var->offset = inst->inst_offset;
jit->params = g_new0 (MonoDebugVarInfo, jit->num_params);
for (i = 0; i < jit->num_locals; i++)
- write_variable (cfg->varinfo [cfg->locals_start + i], &jit->locals [i]);
+ write_variable (cfg->locals [i], &jit->locals [i]);
if (sig->hasthis) {
jit->this_var = g_new0 (MonoDebugVarInfo, 1);
- write_variable (cfg->varinfo [0], jit->this_var);
+ write_variable (cfg->args [0], jit->this_var);
}
for (i = 0; i < jit->num_params; i++)
- write_variable (cfg->varinfo [i + sig->hasthis], &jit->params [i]);
+ write_variable (cfg->args [i + sig->hasthis], &jit->params [i]);
jit->num_line_numbers = info->line_numbers->len;
jit->line_numbers = g_new0 (MonoDebugLineNumberEntry, jit->num_line_numbers);
MONO_OPT_INTRINS | \
MONO_OPT_LOOP | \
MONO_OPT_EXCEPTION | \
+ MONO_OPT_CMOV | \
MONO_OPT_GSHARED | \
MONO_OPT_AOT)
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_SSA,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION,
+ MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_CMOV,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_EXCEPTION | MONO_OPT_ABCREM | MONO_OPT_SSAPRE,
MONO_OPT_BRANCH | MONO_OPT_PEEPHOLE | MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP | MONO_OPT_DEADCE | MONO_OPT_LOOP | MONO_OPT_INLINE | MONO_OPT_INTRINS | MONO_OPT_ABCREM,
return 1;
}
} else if (strcmp (argv [i], "--desktop") == 0) {
-#if defined (HAVE_BOEHM_GC)
GC_dont_expand = 1;
-#endif
/* Put desktop-specific optimizations here */
} else if (strcmp (argv [i], "--server") == 0){
/* Put server-specific optimizations here */
#include "mini.h"
#include "mini-x86.h"
+#if defined(__FreeBSD__)
+#include <ucontext.h>
+#endif
+
#ifdef PLATFORM_WIN32
static void (*restore_stack) (void *);
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
- mctx->eax = UCONTEXT_REG_EAX (ctx);
- mctx->ebx = UCONTEXT_REG_EBX (ctx);
- mctx->ecx = UCONTEXT_REG_ECX (ctx);
- mctx->edx = UCONTEXT_REG_EDX (ctx);
- mctx->ebp = UCONTEXT_REG_EBP (ctx);
- mctx->esp = UCONTEXT_REG_ESP (ctx);
- mctx->esi = UCONTEXT_REG_ESI (ctx);
- mctx->edi = UCONTEXT_REG_EDI (ctx);
- mctx->eip = UCONTEXT_REG_EIP (ctx);
+#if defined(__FreeBSD__)
+ mctx->eax = ctx->uc_mcontext.mc_eax;
+ mctx->ebx = ctx->uc_mcontext.mc_ebx;
+ mctx->ecx = ctx->uc_mcontext.mc_ecx;
+ mctx->edx = ctx->uc_mcontext.mc_edx;
+ mctx->ebp = ctx->uc_mcontext.mc_ebp;
+ mctx->esp = ctx->uc_mcontext.mc_esp;
+ mctx->esi = ctx->uc_mcontext.mc_esi;
+ mctx->edi = ctx->uc_mcontext.mc_edi;
+ mctx->eip = ctx->uc_mcontext.mc_eip;
+#else
+ mctx->eax = ctx->uc_mcontext.gregs [REG_EAX];
+ mctx->ebx = ctx->uc_mcontext.gregs [REG_EBX];
+ mctx->ecx = ctx->uc_mcontext.gregs [REG_ECX];
+ mctx->edx = ctx->uc_mcontext.gregs [REG_EDX];
+ mctx->ebp = ctx->uc_mcontext.gregs [REG_EBP];
+ mctx->esp = ctx->uc_mcontext.gregs [REG_ESP];
+ mctx->esi = ctx->uc_mcontext.gregs [REG_ESI];
+ mctx->edi = ctx->uc_mcontext.gregs [REG_EDI];
+ mctx->eip = ctx->uc_mcontext.gregs [REG_EIP];
+#endif
#else
struct sigcontext *ctx = (struct sigcontext *)sigctx;
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
- UCONTEXT_REG_EAX (ctx) = mctx->eax;
- UCONTEXT_REG_EBX (ctx) = mctx->ebx;
- UCONTEXT_REG_ECX (ctx) = mctx->ecx;
- UCONTEXT_REG_EDX (ctx) = mctx->edx;
- UCONTEXT_REG_EBP (ctx) = mctx->ebp;
- UCONTEXT_REG_ESP (ctx) = mctx->esp;
- UCONTEXT_REG_ESI (ctx) = mctx->esi;
- UCONTEXT_REG_EDI (ctx) = mctx->edi;
- UCONTEXT_REG_EIP (ctx) = mctx->eip;
+#if defined(__FreeBSD__)
+ ctx->uc_mcontext.mc_eax = mctx->eax;
+ ctx->uc_mcontext.mc_ebx = mctx->ebx;
+ ctx->uc_mcontext.mc_ecx = mctx->ecx;
+ ctx->uc_mcontext.mc_edx = mctx->edx;
+ ctx->uc_mcontext.mc_ebp = mctx->ebp;
+ ctx->uc_mcontext.mc_esp = mctx->esp;
+ ctx->uc_mcontext.mc_esi = mctx->esi;
+ ctx->uc_mcontext.mc_edi = mctx->edi;
+ ctx->uc_mcontext.mc_eip = mctx->eip;
+
+#else
+ ctx->uc_mcontext.gregs [REG_EAX] = mctx->eax;
+ ctx->uc_mcontext.gregs [REG_EBX] = mctx->ebx;
+ ctx->uc_mcontext.gregs [REG_ECX] = mctx->ecx;
+ ctx->uc_mcontext.gregs [REG_EDX] = mctx->edx;
+ ctx->uc_mcontext.gregs [REG_EBP] = mctx->ebp;
+ ctx->uc_mcontext.gregs [REG_ESP] = mctx->esp;
+ ctx->uc_mcontext.gregs [REG_ESI] = mctx->esi;
+ ctx->uc_mcontext.gregs [REG_EDI] = mctx->edi;
+ ctx->uc_mcontext.gregs [REG_EIP] = mctx->eip;
+#endif
#else
struct sigcontext *ctx = (struct sigcontext *)sigctx;
{
#ifdef MONO_ARCH_USE_SIGACTION
ucontext_t *ctx = (ucontext_t*)sigctx;
- return (gpointer)UCONTEXT_REG_EIP (ctx);
+#if defined(__FreeBSD__)
+ return (gpointer)ctx->uc_mcontext.mc_eip;
+#else
+ return (gpointer)ctx->uc_mcontext.gregs [REG_EIP];
+#endif
#else
struct sigcontext *ctx = sigctx;
return (gpointer)ctx->SC_EIP;
#define MSGSTRFIELD(line) MSGSTRFIELD1(line)
#define MSGSTRFIELD1(line) str##line
static const struct msgstr_t {
-#define MINI_OP(a,b) char MSGSTRFIELD(__LINE__) [sizeof (b)];
+#define MINI_OP(a,b,dest,src1,src2) char MSGSTRFIELD(__LINE__) [sizeof (b)];
#include "mini-ops.h"
#undef MINI_OP
} opstr = {
-#define MINI_OP(a,b) b,
+#define MINI_OP(a,b,dest,src1,src2) b,
#include "mini-ops.h"
#undef MINI_OP
};
static const gint16 opidx [] = {
-#define MINI_OP(a,b) [a - OP_LOAD] = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
+#define MINI_OP(a,b,dest,src1,src2) [a - OP_LOAD] = offsetof (struct msgstr_t, MSGSTRFIELD(__LINE__)),
#include "mini-ops.h"
#undef MINI_OP
};
#else
-#define MINI_OP(a,b) b,
+#define MINI_OP(a,b,dest,src1,src2) b,
/* keep in sync with the enum in mini.h */
static const char* const
opnames[] = {
}
fprintf (ofd, "\n");
fclose (ofd);
+
#ifdef __APPLE__
#ifdef __ppc64__
#define DIS_CMD "otool64 -v -t"
conv.u4
ret
}
+
+ // Test calling ldfld directly on a vtype instead of a vtype address
+ .method public static int32 test_5_call_ldfld_vtype () cil managed
+ {
+ .maxstack 16
+ .locals init (
+ valuetype Tests/TailCallStruct arg
+ )
+ ldloca 0
+ ldc.i4.2
+ stfld int32 Tests/TailCallStruct::a
+ ldloca 0
+ ldc.i4.4
+ stfld int32 Tests/TailCallStruct::b
+ ldloc.0
+ call valuetype Tests/TailCallStruct Tests::tail2 (valuetype Tests/TailCallStruct)
+ ldfld int32 Tests/TailCallStruct::a
+ ret
+ }
}
stmt: OP_OUTARG_VT (reg) {
tree->opcode = OP_X86_PUSH;
- tree->sreg1 = state->left->tree->dreg;
+ tree->sreg1 = state->left->reg1;
mono_bblock_add_inst (s->cbb, tree);
}
stmt: CEE_STIND_R8 (OP_REGVAR, freg) {
/* nothing to do: the value is already on the FP stack */
}
-
-stmt: CEE_BNE_UN (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBNE_UN, -1, state->left->left->reg1, state->left->right->reg1);
+
+stmt: CEE_BNE_UN (fpcflags) {
+ tree->opcode = OP_FBNE_UN;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BEQ (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBEQ, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BEQ (fpcflags) {
+ tree->opcode = OP_FBEQ;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BLT (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBLT, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BLT (fpcflags) {
+ tree->opcode = OP_FBLT;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BLT_UN (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBLT_UN, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BLT_UN (fpcflags) {
+ tree->opcode = OP_FBLT_UN;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BGT (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBGT, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BGT (fpcflags) {
+ tree->opcode = OP_FBGT;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BGT_UN (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBGT_UN, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BGT_UN (fpcflags) {
+ tree->opcode = OP_FBGT_UN;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BGE (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBGE, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BGE (fpcflags) {
+ tree->opcode = OP_FBGE;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BGE_UN (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBGE_UN, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BGE_UN (fpcflags) {
+ tree->opcode = OP_FBGE_UN;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BLE (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBLE, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BLE (fpcflags) {
+ tree->opcode = OP_FBLE;
+ mono_bblock_add_inst (s->cbb, tree);
}
-stmt: CEE_BLE_UN (OP_COMPARE (freg, freg)) {
- MONO_EMIT_BIALU (s, tree, OP_FBLE_UN, -1, state->left->left->reg1, state->left->right->reg1);
+stmt: CEE_BLE_UN (fpcflags) {
+ tree->opcode = OP_FBLE_UN;
+ mono_bblock_add_inst (s->cbb, tree);
}
stmt: CEE_POP (freg) "0" {
MONO_EMIT_NEW_COND_EXC (s, C, "OverflowException");
}
-reg: OP_LONG_SHRUN_32 (reg) {
+reg: OP_LSHR_UN_32 (reg) {
MONO_EMIT_BIALU_IMM (s, tree, OP_LSHR_UN_IMM, state->reg1, state->left->reg1, 32);
}
MONO_EMIT_NEW_BIALU (s, tree->opcode, state->reg1, state->left->reg1, state->right->reg1);
}
-lreg: OP_LONG_SHRUN_32 (lreg) {
+lreg: OP_LSHR_UN_32 (lreg) {
/* just move the upper half to the lower and zero the high word */
MONO_EMIT_NEW_UNALU (s, OP_MOVE, state->reg1, state->left->reg2);
MONO_EMIT_NEW_ICONST (s, state->reg2, 0);
}
-reg: OP_LCONV_TO_I4 (OP_LONG_SHRUN_32 (lreg)),
-reg: OP_LCONV_TO_U4 (OP_LONG_SHRUN_32 (lreg)) {
+reg: OP_LCONV_TO_I4 (OP_LSHR_UN_32 (lreg)),
+reg: OP_LCONV_TO_U4 (OP_LSHR_UN_32 (lreg)) {
MONO_EMIT_NEW_UNALU (s, OP_MOVE, state->reg1, state->left->left->reg2);
}
-lreg: OP_LONG_SHRUN_32 (CEE_LDIND_I8 (base)) {
+lreg: OP_LSHR_UN_32 (CEE_LDIND_I8 (base)) {
/* just move the upper half to the lower and zero the high word */
MONO_EMIT_NEW_LOAD_MEMBASE_OP (s, OP_LOADI4_MEMBASE, state->reg1,
state->left->left->tree->inst_basereg, state->left->left->tree->inst_offset + MINI_MS_WORD_OFFSET);
MONO_EMIT_NEW_ICONST (s, state->reg2, 0);
}
-reg: OP_LCONV_TO_I4 (OP_LONG_SHRUN_32 (CEE_LDIND_I8 (base))),
-reg: OP_LCONV_TO_U4 (OP_LONG_SHRUN_32 (CEE_LDIND_I8 (base))) {
+reg: OP_LCONV_TO_I4 (OP_LSHR_UN_32 (CEE_LDIND_I8 (base))),
+reg: OP_LCONV_TO_U4 (OP_LSHR_UN_32 (CEE_LDIND_I8 (base))) {
/* just move the upper half to the lower and zero the high word */
MONO_EMIT_NEW_LOAD_MEMBASE_OP (s, OP_LOADI4_MEMBASE, state->reg1,
state->left->left->left->tree->inst_basereg,
tree->sreg1 = state->left->reg1;
} else {
tree->opcode = OP_FMOVE;
+ tree->dreg = s390_f0;
tree->sreg1 = state->left->reg1;
}
mono_bblock_add_inst (s->cbb, tree);
#
stmt: OP_START_HANDLER,
-stmt: OP_ENDFINALLY,
+stmt: OP_ENDFINALLY {
+ mono_bblock_add_inst (s->cbb, tree);
+}
+
stmt: OP_ENDFILTER (reg) {
+ tree->sreg1 = state->left->reg1;
mono_bblock_add_inst (s->cbb, tree);
}
mono_bblock_add_inst (s->cbb, tree);
}
-stmt: OP_X86_OUTARG_ALIGN_STACK {
- MONO_EMIT_NEW_BIALU_IMM (s, OP_SUB_IMM, X86_ESP, X86_ESP, tree->inst_c0);
-}
-
reg: OP_LDADDR (OP_REGOFFSET),
reg: CEE_LDOBJ (OP_REGOFFSET) {
if (state->left->tree->inst_offset) {
# override the rules in inssel-float.brg that work for machines with FP registers
-freg: OP_FCONV_TO_R8 (freg) "0" {
- /* nothing to do */
-}
-
-freg: OP_FCONV_TO_R4 (freg) "0" {
- /* fixme: nothing to do ??*/
-}
-
reg: CEE_ADD(reg, CEE_LDIND_I4 (base)) {
MonoInst *base = state->right->left->tree;
MonoInst *target_label; \
target_label = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
target_label->opcode = OP_LABEL; \
- MONO_INST_LIST_ADD (&target_label->node, \
- &(targetbb)->ins_list); \
+ target_label->next = (targetbb)->code; \
+ (targetbb)->code = target_label; \
target_label->inst_c0 = (targetbb)->native_offset; \
MONO_INST_NEW ((cfg), inst, op); \
inst->inst_i0 = target_label; \
gboolean
mono_fcgt_un (double a, double b)
{
- return a > b;
+ return isunordered (a, b) || a > b;
}
gboolean
gboolean
mono_fclt_un (double a, double b)
{
- return a < b;
+ return isunordered (a, b) || a < b;
}
double
{
return mono_exception_from_token_two_strings (mono_defaults.corlib, token, arg1, arg2);
}
+
+MonoObject*
+mono_object_castclass (MonoObject *obj, MonoClass *klass)
+{
+ if (!obj)
+ return NULL;
+
+ if (mono_object_isinst (obj, klass))
+ return obj;
+
+ mono_raise_exception (mono_exception_from_name (mono_defaults.corlib,
+ "System", "InvalidCastException"));
+
+ return NULL;
+}
MonoException *mono_create_corlib_exception_2 (guint32 token, MonoString *arg1, MonoString *arg2) MONO_INTERNAL;
+MonoObject* mono_object_castclass (MonoObject *obj, MonoClass *klass) MONO_INTERNAL;
+
#endif /* __MONO_JIT_ICALLS_H__ */
#include "mini.h"
#include <mono/metadata/debug-helpers.h>
+static void mono_linear_scan2 (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask);
+
GList *
mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, int sort_type)
{
{
GList *l, *a, *active = NULL;
MonoMethodVar *vmv, *amv;
- int max_regs, gains [sizeof (regmask_t) * 8];
+ int max_regs, n_regvars;
+ int gains [sizeof (regmask_t) * 8];
regmask_t used_regs = 0;
gboolean cost_driven;
+ if (vars && (((MonoMethodVar*)vars->data)->interval != NULL)) {
+ mono_linear_scan2 (cfg, vars, regs, used_mask);
+ return;
+ }
+
cost_driven = TRUE;
#ifdef DEBUG_LSCAN
gains [amv->reg] += amv->spill_costs;
}
+ n_regvars = 0;
for (l = vars; l; l = l->next) {
vmv = l->data;
if (vmv->reg >= 0) {
if ((gains [vmv->reg] > mono_arch_regalloc_cost (cfg, vmv)) && (cfg->varinfo [vmv->idx]->opcode != OP_REGVAR)) {
+ if (cfg->verbose_level > 2) {
+ if (cfg->new_ir)
+ printf ("ALLOCATED R%d(%d) TO HREG %d COST %d\n", cfg->varinfo [vmv->idx]->dreg, vmv->idx, vmv->reg, vmv->spill_costs);
+ else
+ printf ("REGVAR %d C%d R%d\n", vmv->idx, vmv->spill_costs, vmv->reg);
+ }
cfg->varinfo [vmv->idx]->opcode = OP_REGVAR;
cfg->varinfo [vmv->idx]->dreg = vmv->reg;
- if (cfg->verbose_level > 2)
- printf ("REGVAR %d C%d R%d\n", vmv->idx, vmv->spill_costs, vmv->reg);
+ n_regvars ++;
} else {
if (cfg->verbose_level > 2)
- printf ("COSTLY: %s R%d C%d C%d %s\n", mono_method_full_name (cfg->method, TRUE), vmv->idx, vmv->spill_costs, mono_arch_regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg));
+ printf ("COSTLY: R%d C%d C%d %s\n", vmv->idx, vmv->spill_costs, mono_arch_regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg));
vmv->reg = -1;
}
}
if (vmv->reg == -1) {
- if ((vmv->range.first_use.abs_pos >> 16) == (vmv->range.last_use.abs_pos >> 16)) {
- /*
- * This variables is only used in a single basic block so
- * convert it into a virtual register.
- * FIXME: This increases register pressure in the local
- * allocator, leading to the well known 'branches inside
- * basic blocks screw up the allocator' problem.
- */
-#if 0
- cfg->varinfo [vmv->idx]->opcode = OP_REGVAR;
- cfg->varinfo [vmv->idx]->dreg = mono_regstate_next_int (cfg->rs);
-#endif
- }
- else {
- if (cfg->verbose_level > 2)
- printf ("NOT REGVAR: %d\n", vmv->idx);
- }
+ if (cfg->verbose_level > 2)
+ printf ("NOT REGVAR: %d\n", vmv->idx);
}
}
+ mono_jit_stats.regvars += n_regvars;
+
/* Compute used regs */
used_regs = 0;
for (l = vars; l; l = l->next) {
g_list_free (vars);
}
+static gint
+compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
+{
+ MonoMethodVar *v1 = (MonoMethodVar*)a;
+ MonoMethodVar *v2 = (MonoMethodVar*)b;
+
+ if (v1 == v2)
+ return 0;
+ else if (v1->interval->range && v2->interval->range)
+ return v1->interval->range->from - v2->interval->range->from;
+ else if (v1->interval->range)
+ return -1;
+ else
+ return 1;
+}
+
+#if 0
+#define LSCAN_DEBUG(a) do { a; } while (0)
+#else
+#define LSCAN_DEBUG(a)
+#endif
+
+/* FIXME: This is x86 only */
+static inline guint32
+regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv)
+{
+ MonoInst *ins = cfg->varinfo [vmv->idx];
+
+ /* Load if it is an argument */
+ return (ins->opcode == OP_ARG) ? 1 : 0;
+}
+
+void
+mono_linear_scan2 (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask)
+{
+ GList *unhandled, *active, *inactive, *l;
+ MonoMethodVar *vmv;
+ gint32 free_pos [sizeof (regmask_t) * 8];
+ gint32 gains [sizeof (regmask_t) * 8];
+ regmask_t used_regs = 0;
+ int n_regs, n_regvars, i;
+
+ for (l = vars; l; l = l->next) {
+ vmv = l->data;
+ LSCAN_DEBUG (printf ("VAR R%d %08x %08x C%d\n", cfg->varinfo [vmv->idx]->dreg, vmv->range.first_use.abs_pos,
+ vmv->range.last_use.abs_pos, vmv->spill_costs));
+ }
+
+ LSCAN_DEBUG (printf ("Linear Scan 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
+
+ n_regs = g_list_length (regs);
+ memset (gains, 0, n_regs * sizeof (gint32));
+ unhandled = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
+ active = NULL;
+ inactive = NULL;
+
+ while (unhandled) {
+ MonoMethodVar *current = unhandled->data;
+ int pos, reg, max_free_pos;
+ gboolean changed;
+
+ unhandled = g_list_delete_link (unhandled, unhandled);
+
+ LSCAN_DEBUG (printf ("Processing R%d: ", cfg->varinfo [current->idx]->dreg));
+ LSCAN_DEBUG (mono_linterval_print (current->interval));
+ LSCAN_DEBUG (printf ("\n"));
+
+ if (!current->interval->range)
+ continue;
+
+ pos = current->interval->range->from;
+
+ /* Check for intervals in active which expired or inactive */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = active; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ active = g_list_delete_link (active, l);
+ LSCAN_DEBUG (printf ("Interval R%d has expired\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ else if (!mono_linterval_covers (v->interval, pos)) {
+ inactive = g_list_append (inactive, v);
+ active = g_list_delete_link (active, l);
+ LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Check for intervals in inactive which expired or active */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = inactive; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ inactive = g_list_delete_link (inactive, l);
+ LSCAN_DEBUG (printf ("\tInterval R%d has expired\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ else if (mono_linterval_covers (v->interval, pos)) {
+ active = g_list_append (active, v);
+ inactive = g_list_delete_link (inactive, l);
+ LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Find a register for the current interval */
+ for (i = 0; i < n_regs; ++i)
+ free_pos [i] = ((gint32)0x7fffffff);
+
+ for (l = active; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->reg >= 0) {
+ free_pos [v->reg] = 0;
+ LSCAN_DEBUG (printf ("\threg %d is busy (cost %d)\n", v->reg, v->spill_costs));
+ }
+ }
+
+ for (l = inactive; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+ gint32 intersect_pos;
+
+ if (v->reg >= 0) {
+ intersect_pos = mono_linterval_get_intersect_pos (current->interval, v->interval);
+ if (intersect_pos != -1) {
+ free_pos [v->reg] = intersect_pos;
+ LSCAN_DEBUG (printf ("\threg %d becomes free at %d\n", v->reg, intersect_pos));
+ }
+ }
+ }
+
+ max_free_pos = -1;
+ reg = -1;
+ for (i = 0; i < n_regs; ++i)
+ if (free_pos [i] > max_free_pos) {
+ reg = i;
+ max_free_pos = free_pos [i];
+ }
+
+ g_assert (reg != -1);
+
+ if (free_pos [reg] >= current->interval->last_range->to) {
+ /* Register available for whole interval */
+ current->reg = reg;
+ LSCAN_DEBUG (printf ("\tAssigned hreg %d to R%d\n", reg, cfg->varinfo [current->idx]->dreg));
+
+ active = g_list_append (active, current);
+ gains [current->reg] += current->spill_costs;
+ }
+ else {
+ /*
+ * free_pos [reg] > 0 means there is a register available for parts
+ * of the interval, so splitting it is possible. This is not yet
+ * supported, so we spill in this case too.
+ */
+
+ /* Spill an interval */
+
+ /* FIXME: Optimize the selection of the interval */
+
+ if (active) {
+ GList *min_spill_pos;
+#if 0
+ /*
+ * This favors registers with big spill costs, thus larger liveness ranges,
+ * thus actually leading to worse code size.
+ */
+ guint32 min_spill_value = G_MAXINT32;
+
+ for (l = active; l != NULL; l = l->next) {
+ vmv = (MonoMethodVar*)l->data;
+
+ if (vmv->spill_costs < min_spill_value) {
+ min_spill_pos = l;
+ min_spill_value = vmv->spill_costs;
+ }
+ }
+#else
+ /* Spill either the first active or the current interval */
+ min_spill_pos = active;
+#endif
+ vmv = (MonoMethodVar*)min_spill_pos->data;
+ if (vmv->spill_costs < current->spill_costs) {
+ // if (vmv->interval->last_range->to < current->interval->last_range->to) {
+ gains [vmv->reg] -= vmv->spill_costs;
+ vmv->reg = -1;
+ LSCAN_DEBUG (printf ("\tSpilled R%d\n", cfg->varinfo [vmv->idx]->dreg));
+ active = g_list_delete_link (active, min_spill_pos);
+ }
+ else
+ LSCAN_DEBUG (printf ("\tSpilled current (cost %d)\n", current->spill_costs));
+ }
+ else
+ LSCAN_DEBUG (printf ("\tSpilled current\n"));
+ }
+ }
+
+ /* Decrease the gains by the cost of saving+restoring the register */
+ for (i = 0; i < n_regs; ++i) {
+ if (gains [i]) {
+ /* FIXME: This is x86 only */
+ gains [i] -= cfg->method->save_lmf ? 1 : 2;
+ if (gains [i] < 0)
+ gains [i] = 0;
+ }
+ }
+
+ /* Do the actual register assignment */
+ n_regvars = 0;
+ for (l = vars; l; l = l->next) {
+ vmv = l->data;
+
+ if (vmv->reg >= 0) {
+ int reg_index = vmv->reg;
+
+ /* During allocation, vmv->reg is an index into the regs list */
+ vmv->reg = GPOINTER_TO_INT (g_list_nth_data (regs, vmv->reg));
+
+ if ((gains [reg_index] > regalloc_cost (cfg, vmv)) && (cfg->varinfo [vmv->idx]->opcode != OP_REGVAR)) {
+ if (cfg->verbose_level > 2)
+ printf ("REGVAR R%d G%d C%d %s\n", cfg->varinfo [vmv->idx]->dreg, gains [reg_index], regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg));
+ cfg->varinfo [vmv->idx]->opcode = OP_REGVAR;
+ cfg->varinfo [vmv->idx]->dreg = vmv->reg;
+ n_regvars ++;
+ }
+ else {
+ if (cfg->verbose_level > 2)
+ printf ("COSTLY: %s R%d G%d C%d %s\n", mono_method_full_name (cfg->method, TRUE), cfg->varinfo [vmv->idx]->dreg, gains [reg_index], regalloc_cost (cfg, vmv), mono_arch_regname (vmv->reg));
+ vmv->reg = -1;
+ }
+ }
+ }
+
+ mono_jit_stats.regvars += n_regvars;
+
+ /* Compute used regs */
+ used_regs = 0;
+ for (l = vars; l; l = l->next) {
+ vmv = l->data;
+
+ if (vmv->reg >= 0)
+ used_regs |= 1LL << vmv->reg;
+ }
+
+ *used_mask |= used_regs;
+
+ g_list_free (active);
+ g_list_free (inactive);
+}
#define BITS_PER_CHUNK 32
#endif
+static void mono_analyze_liveness2 (MonoCompile *cfg);
+
static void
optimize_initlocals (MonoCompile *cfg);
+static void
+optimize_initlocals2 (MonoCompile *cfg);
+
/* mono_bitset_mp_new:
*
* allocates a MonoBitSet inside a memory pool
*/
static inline MonoBitSet*
-mono_bitset_mp_new (MonoMemPool *mp, guint32 max_size)
+mono_bitset_mp_new (MonoMemPool *mp, guint32 size, guint32 max_size)
{
- int size = mono_bitset_alloc_size (max_size, 0);
- gpointer mem;
-
- mem = mono_mempool_alloc0 (mp, size);
+ guint8 *mem = mono_mempool_alloc0 (mp, size);
return mono_bitset_mem_new (mem, max_size, MONO_BITSET_DONT_FREE);
}
static inline MonoBitSet*
-mono_bitset_mp_new_noinit (MonoMemPool *mp, guint32 max_size)
+mono_bitset_mp_new_noinit (MonoMemPool *mp, guint32 size, guint32 max_size)
{
- int size = mono_bitset_alloc_size (max_size, 0);
- gpointer mem;
-
- mem = mono_mempool_alloc (mp, size);
+ guint8 *mem = mono_mempool_alloc (mp, size);
return mono_bitset_mem_new (mem, max_size, MONO_BITSET_DONT_FREE);
}
static void
update_gen_kill_set (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *inst, int inst_num)
{
- int arity = mono_burg_arity [inst->opcode];
+ int arity;
int max_vars = cfg->num_varinfo;
+ arity = mono_burg_arity [inst->opcode];
if (arity)
update_gen_kill_set (cfg, bb, inst->inst_i0, inst_num);
}
static void
-update_volatile (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *inst, int inst_num)
+update_volatile (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *inst)
{
int arity = mono_burg_arity [inst->opcode];
int max_vars = cfg->num_varinfo;
if (arity)
- update_volatile (cfg, bb, inst->inst_i0, inst_num);
+ update_volatile (cfg, bb, inst->inst_i0);
if (arity > 1)
- update_volatile (cfg, bb, inst->inst_i1, inst_num);
+ update_volatile (cfg, bb, inst->inst_i1);
if (inst->ssa_op & MONO_SSA_LOAD_STORE) {
MonoLocalVariableList* affected_variables;
static void
visit_bb (MonoCompile *cfg, MonoBasicBlock *bb, GSList **visited)
{
- int i, tree_num;
- MonoInst *inst;
+ int i;
+ MonoInst *ins;
if (g_slist_find (*visited, bb))
return;
- if (cfg->aliasing_info != NULL)
- mono_aliasing_initialize_code_traversal (cfg->aliasing_info, bb);
-
- tree_num = 0;
- MONO_BB_FOR_EACH_INS (bb, inst) {
- update_volatile (cfg, bb, inst, tree_num);
- tree_num++;
+ if (cfg->new_ir) {
+ for (ins = bb->code; ins; ins = ins->next) {
+ const char *spec = INS_INFO (ins->opcode);
+ int regtype, srcindex, sreg;
+
+ if (ins->opcode == OP_NOP)
+ continue;
+
+ /* DREG */
+ regtype = spec [MONO_INST_DEST];
+ g_assert (((ins->dreg == -1) && (regtype == ' ')) || ((ins->dreg != -1) && (regtype != ' ')));
+
+ if ((ins->dreg != -1) && get_vreg_to_inst (cfg, ins->dreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ cfg->varinfo [vi->idx]->flags |= MONO_INST_VOLATILE;
+ }
+
+ /* SREGS */
+ for (srcindex = 0; srcindex < 2; ++srcindex) {
+ regtype = spec [(srcindex == 0) ? MONO_INST_SRC1 : MONO_INST_SRC2];
+ sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
+
+ g_assert (((sreg == -1) && (regtype == ' ')) || ((sreg != -1) && (regtype != ' ')));
+ if ((sreg != -1) && get_vreg_to_inst (cfg, sreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ cfg->varinfo [vi->idx]->flags |= MONO_INST_VOLATILE;
+ }
+ }
+ }
+ } else {
+ if (cfg->aliasing_info != NULL)
+ mono_aliasing_initialize_code_traversal (cfg->aliasing_info, bb);
+
+ for (ins = bb->code; ins; ins = ins->next) {
+ update_volatile (cfg, bb, ins);
+ }
}
*visited = g_slist_append (*visited, bb);
g_slist_free (visited);
}
+static inline void
+update_live_range2 (MonoMethodVar *var, int abs_pos)
+{
+ if (var->range.first_use.abs_pos > abs_pos)
+ var->range.first_use.abs_pos = abs_pos;
+
+ if (var->range.last_use.abs_pos < abs_pos)
+ var->range.last_use.abs_pos = abs_pos;
+}
+
+static void
+analyze_liveness_bb (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoInst *ins;
+ int sreg, inst_num;
+ MonoMethodVar *vars = cfg->vars;
+ guint32 abs_pos = (bb->dfn << 16);
+
+ for (inst_num = 0, ins = bb->code; ins; ins = ins->next, inst_num += 2) {
+ const char *spec = INS_INFO (ins->opcode);
+
+#ifdef DEBUG_LIVENESS
+ printf ("\t"); mono_print_ins (ins);
+#endif
+
+ if (ins->opcode == OP_NOP)
+ continue;
+
+ if (ins->opcode == OP_LDADDR) {
+ MonoInst *var = ins->inst_p0;
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+#ifdef DEBUG_LIVENESS
+ printf ("\tGEN: R%d(%d)\n", var->dreg, idx);
+#endif
+ update_live_range2 (&vars [idx], abs_pos + inst_num);
+ if (!mono_bitset_test_fast (bb->kill_set, idx))
+ mono_bitset_set_fast (bb->gen_set, idx);
+ vi->spill_costs += SPILL_COST_INCREMENT;
+ }
+
+ /* SREGs must come first, so MOVE r <- r is handled correctly */
+
+ /* SREG1 */
+ sreg = ins->sreg1;
+ if ((spec [MONO_INST_SRC1] != ' ') && get_vreg_to_inst (cfg, sreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+#ifdef DEBUG_LIVENESS
+ printf ("\tGEN: R%d(%d)\n", sreg, idx);
+#endif
+ update_live_range2 (&vars [idx], abs_pos + inst_num);
+ if (!mono_bitset_test_fast (bb->kill_set, idx))
+ mono_bitset_set_fast (bb->gen_set, idx);
+ vi->spill_costs += SPILL_COST_INCREMENT;
+ }
+
+ /* SREG2 */
+ sreg = ins->sreg2;
+ if ((spec [MONO_INST_SRC2] != ' ') && get_vreg_to_inst (cfg, sreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+#ifdef DEBUG_LIVENESS
+ printf ("\tGEN: R%d(%d)\n", sreg, idx);
+#endif
+ update_live_range2 (&vars [idx], abs_pos + inst_num);
+ if (!mono_bitset_test_fast (bb->kill_set, idx))
+ mono_bitset_set_fast (bb->gen_set, idx);
+ vi->spill_costs += SPILL_COST_INCREMENT;
+ }
+
+ /* DREG */
+ if ((spec [MONO_INST_DEST] != ' ') && get_vreg_to_inst (cfg, ins->dreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ if (MONO_IS_STORE_MEMBASE (ins)) {
+ update_live_range2 (&vars [idx], abs_pos + inst_num);
+ if (!mono_bitset_test_fast (bb->kill_set, idx))
+ mono_bitset_set_fast (bb->gen_set, idx);
+ vi->spill_costs += SPILL_COST_INCREMENT;
+ } else {
+#ifdef DEBUG_LIVENESS
+ printf ("\tKILL: R%d(%d)\n", ins->dreg, idx);
+#endif
+ update_live_range2 (&vars [idx], abs_pos + inst_num + 1);
+ mono_bitset_set_fast (bb->kill_set, idx);
+ vi->spill_costs += SPILL_COST_INCREMENT;
+ }
+ }
+ }
+}
+
/* generic liveness analysis code. CFG specific parts are
* in update_gen_kill_set()
*/
MonoBasicBlock **worklist;
guint32 l_end;
int bitsize;
- guint8 *mem;
#ifdef DEBUG_LIVENESS
printf ("LIVENESS %s\n", mono_method_full_name (cfg->method, TRUE));
return;
bitsize = mono_bitset_alloc_size (max_vars, 0);
- mem = mono_mempool_alloc0 (cfg->mempool, cfg->num_bblocks * bitsize * 4);
- for (i = 0; i < cfg->num_bblocks; ++i) {
- MonoBasicBlock *bb = cfg->bblocks [i];
-
- bb->gen_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
- bb->kill_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
- /* Initialized later */
- bb->live_in_set = NULL;
- bb->live_out_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
- }
for (i = 0; i < max_vars; i ++) {
MONO_VARINFO (cfg, i)->range.first_use.abs_pos = ~ 0;
MONO_VARINFO (cfg, i)->range.last_use .abs_pos = 0;
MonoInst *inst;
int tree_num;
- if (cfg->aliasing_info != NULL)
- mono_aliasing_initialize_code_traversal (cfg->aliasing_info, bb);
-
- tree_num = 0;
- MONO_BB_FOR_EACH_INS (bb, inst) {
+ bb->gen_set = mono_bitset_mp_new (cfg->mempool, bitsize, max_vars);
+ bb->kill_set = mono_bitset_mp_new (cfg->mempool, bitsize, max_vars);
+
+ if (cfg->new_ir) {
+ analyze_liveness_bb (cfg, bb);
+ } else {
+ if (cfg->aliasing_info != NULL)
+ mono_aliasing_initialize_code_traversal (cfg->aliasing_info, bb);
+
+ tree_num = 0;
+ MONO_BB_FOR_EACH_INS (bb, inst) {
#ifdef DEBUG_LIVENESS
- mono_print_tree (inst); printf ("\n");
+ mono_print_tree (inst); printf ("\n");
#endif
- update_gen_kill_set (cfg, bb, inst, tree_num);
- tree_num++;
+ update_gen_kill_set (cfg, bb, inst, tree_num);
+ tree_num ++;
+ }
}
#ifdef DEBUG_LIVENESS
worklist [l_end ++] = bb;
in_worklist [bb->dfn] = TRUE;
+
+ /* Initialized later */
+ bb->live_in_set = NULL;
+ bb->live_out_set = mono_bitset_mp_new (cfg->mempool, bitsize, max_vars);
}
out_iter = 0;
printf ("\n");
#endif
+
if (bb->out_count == 0)
continue;
}
else {
changed = FALSE;
- mono_bitset_copyto (bb->live_out_set, old_live_out_set);
+ mono_bitset_copyto_fast (bb->live_out_set, old_live_out_set);
}
for (j = 0; j < bb->out_count; j++) {
out_bb = bb->out_bb [j];
if (!out_bb->live_in_set) {
- out_bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
+ out_bb->live_in_set = mono_bitset_mp_new_noinit (cfg->mempool, bitsize, max_vars);
- mono_bitset_copyto (out_bb->live_out_set, out_bb->live_in_set);
- mono_bitset_sub (out_bb->live_in_set, out_bb->kill_set);
- mono_bitset_union (out_bb->live_in_set, out_bb->gen_set);
+ mono_bitset_copyto_fast (out_bb->live_out_set, out_bb->live_in_set);
+ mono_bitset_sub_fast (out_bb->live_in_set, out_bb->kill_set);
+ mono_bitset_union_fast (out_bb->live_in_set, out_bb->gen_set);
}
- mono_bitset_union (bb->live_out_set, out_bb->live_in_set);
+ mono_bitset_union_fast (bb->live_out_set, out_bb->live_in_set);
}
if (changed || !mono_bitset_equal (old_live_out_set, bb->live_out_set)) {
- if (!bb->live_in_set) {
- bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
- }
- mono_bitset_copyto (bb->live_out_set, bb->live_in_set);
- mono_bitset_sub (bb->live_in_set, bb->kill_set);
- mono_bitset_union (bb->live_in_set, bb->gen_set);
+ if (!bb->live_in_set)
+ bb->live_in_set = mono_bitset_mp_new_noinit (cfg->mempool, bitsize, max_vars);
+ mono_bitset_copyto_fast (bb->live_out_set, bb->live_in_set);
+ mono_bitset_sub_fast (bb->live_in_set, bb->kill_set);
+ mono_bitset_union_fast (bb->live_in_set, bb->gen_set);
for (j = 0; j < bb->in_count; j++) {
MonoBasicBlock *in_bb = bb->in_bb [j];
}
}
- //printf ("IT: %d %d %d.\n", iterations, in_iter, out_iter);
+#ifdef DEBUG_LIVENESS
+ printf ("IT: %d %d.\n", cfg->num_bblocks, out_iter);
+#endif
mono_bitset_free (old_live_out_set);
MonoBasicBlock *bb = cfg->bblocks [i];
if (!bb->live_in_set) {
- bb->live_in_set = mono_bitset_mem_new (mem, max_vars, MONO_BITSET_DONT_FREE);
- mem += bitsize;
+ bb->live_in_set = mono_bitset_mp_new (cfg->mempool, bitsize, max_vars);
- mono_bitset_copyto (bb->live_out_set, bb->live_in_set);
- mono_bitset_sub (bb->live_in_set, bb->kill_set);
- mono_bitset_union (bb->live_in_set, bb->gen_set);
+ mono_bitset_copyto_fast (bb->live_out_set, bb->live_in_set);
+ mono_bitset_sub_fast (bb->live_in_set, bb->kill_set);
+ mono_bitset_union_fast (bb->live_in_set, bb->gen_set);
}
}
- /*
- * This code can be slow for large methods so inline the calls to
- * mono_bitset_test.
- */
for (i = 0; i < cfg->num_bblocks; ++i) {
MonoBasicBlock *bb = cfg->bblocks [i];
guint32 rem, max;
+ guint32 abs_pos = (bb->dfn << 16);
+ MonoMethodVar *vars = cfg->vars;
if (!bb->live_out_set)
continue;
for (j = 0; j < max; ++j) {
gsize bits_in;
gsize bits_out;
- int k, end;
+ int k;
bits_in = mono_bitset_get_fast (bb->live_in_set, j);
bits_out = mono_bitset_get_fast (bb->live_out_set, j);
- if (j == max)
- end = (j * BITS_PER_CHUNK) + rem;
- else
- end = (j * BITS_PER_CHUNK) + BITS_PER_CHUNK;
-
k = (j * BITS_PER_CHUNK);
while ((bits_in || bits_out)) {
if (bits_in & 1)
- update_live_range (cfg, k, bb->dfn, 0);
+ update_live_range2 (&vars [k], abs_pos + 0);
if (bits_out & 1)
- update_live_range (cfg, k, bb->dfn, 0xffff);
+ update_live_range2 (&vars [k], abs_pos + 0xffff);
bits_in >>= 1;
bits_out >>= 1;
k ++;
}
#endif
- if (!cfg->disable_initlocals_opt)
- optimize_initlocals (cfg);
+ if (cfg->new_ir) {
+ if (!cfg->disable_initlocals_opt)
+ optimize_initlocals2 (cfg);
+
+ /* This improves code size by about 5% but slows down compilation too much */
+ if (cfg->compile_aot)
+ mono_analyze_liveness2 (cfg);
+ }
+ else {
+ if (!cfg->disable_initlocals_opt)
+ optimize_initlocals (cfg);
+ }
+}
+
+/**
+ * optimize_initlocals:
+ *
+ * Try to optimize away some of the redundant initialization code inserted because of
+ * 'locals init' using the liveness information.
+ */
+static void
+optimize_initlocals2 (MonoCompile *cfg)
+{
+ MonoBitSet *used;
+ MonoInst *ins;
+ MonoBasicBlock *initlocals_bb;
+
+ used = mono_bitset_new (cfg->next_vreg + 1, 0);
+
+ mono_bitset_clear_all (used);
+ initlocals_bb = cfg->bb_entry->next_bb;
+ for (ins = initlocals_bb->code; ins; ins = ins->next) {
+ const char *spec = INS_INFO (ins->opcode);
+
+ if (spec [MONO_INST_SRC1] != ' ')
+ mono_bitset_set_fast (used, ins->sreg1);
+ if (spec [MONO_INST_SRC2] != ' ')
+ mono_bitset_set_fast (used, ins->sreg2);
+ if (MONO_IS_STORE_MEMBASE (ins))
+ mono_bitset_set_fast (used, ins->dreg);
+ }
+
+ for (ins = initlocals_bb->code; ins; ins = ins->next) {
+ const char *spec = INS_INFO (ins->opcode);
+
+ /* Look for statements whose dest is not used in this bblock and not live on exit. */
+ if ((spec [MONO_INST_DEST] != ' ') && !MONO_IS_STORE_MEMBASE (ins)) {
+ MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
+
+ if (var && !mono_bitset_test_fast (used, ins->dreg) && !mono_bitset_test_fast (initlocals_bb->live_out_set, var->inst_c0) && (var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
+ //printf ("DEAD: "); mono_print_ins (ins);
+ if ((ins->opcode == OP_ICONST) || (ins->opcode == OP_I8CONST) || (ins->opcode == OP_R8CONST)) {
+ NULLIFY_INS (ins);
+ MONO_VARINFO (cfg, var->inst_c0)->spill_costs -= 1;
+ /*
+ * We should shorten the liveness interval of these vars as well, but
+ * don't have enough info to do that.
+ */
+ }
+ }
+ }
+ }
+
+ g_free (used);
+}
+
+void
+mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to)
+{
+ MonoLiveRange2 *prev_range, *next_range, *new_range;
+
+ g_assert (to >= from);
+
+ /* Optimize for extending the first interval backwards */
+ if (G_LIKELY (interval->range && (interval->range->from > from) && (interval->range->from == to))) {
+ interval->range->from = from;
+ return;
+ }
+
+ /* Find a place in the list for the new range */
+ prev_range = NULL;
+ next_range = interval->range;
+ while ((next_range != NULL) && (next_range->from <= from)) {
+ prev_range = next_range;
+ next_range = next_range->next;
+ }
+
+ if (prev_range && prev_range->to == from) {
+ /* Merge with previous */
+ prev_range->to = to;
+ } else if (next_range && next_range->from == to) {
+ /* Merge with previous */
+ next_range->from = from;
+ } else {
+ /* Insert it */
+ new_range = mono_mempool_alloc (cfg->mempool, sizeof (MonoLiveRange2));
+ new_range->from = from;
+ new_range->to = to;
+ new_range->next = NULL;
+
+ if (prev_range)
+ prev_range->next = new_range;
+ else
+ interval->range = new_range;
+ if (next_range)
+ new_range->next = next_range;
+ else
+ interval->last_range = new_range;
+ }
+
+ /* FIXME: Merge intersecting ranges */
+}
+
+void
+mono_linterval_print (MonoLiveInterval *interval)
+{
+ MonoLiveRange2 *range;
+
+ for (range = interval->range; range != NULL; range = range->next)
+ printf ("[%x-%x] ", range->from, range->to);
+}
+
+void
+mono_linterval_print_nl (MonoLiveInterval *interval)
+{
+ mono_linterval_print (interval);
+ printf ("\n");
+}
+
+/**
+ * mono_linterval_convers:
+ *
+ * Return whenever INTERVAL covers the position POS.
+ */
+gboolean
+mono_linterval_covers (MonoLiveInterval *interval, int pos)
+{
+ MonoLiveRange2 *range;
+
+ for (range = interval->range; range != NULL; range = range->next) {
+ if (pos >= range->from && pos <= range->to)
+ return TRUE;
+ if (range->from > pos)
+ return FALSE;
+ }
+
+ return FALSE;
+}
+
+/**
+ * mono_linterval_get_intersect_pos:
+ *
+ * Determine whenever I1 and I2 intersect, and if they do, return the first
+ * point of intersection. Otherwise, return -1.
+ */
+gint32
+mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2)
+{
+ MonoLiveRange2 *r1, *r2;
+
+ /* FIXME: Optimize this */
+ for (r1 = i1->range; r1 != NULL; r1 = r1->next) {
+ for (r2 = i2->range; r2 != NULL; r2 = r2->next) {
+ if (r2->to > r1->from && r2->from < r1->to) {
+ if (r2->from <= r1->from)
+ return r1->from;
+ else
+ return r2->from;
+ }
+ }
+ }
+
+ return -1;
+}
+
+/**
+ * mono_linterval_split
+ *
+ * Split L at POS and store the newly created intervals into L1 and L2. POS becomes
+ * part of L2.
+ */
+void
+mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos)
+{
+ MonoLiveRange2 *r;
+
+ g_assert (pos > interval->range->from && pos <= interval->last_range->to);
+
+ *i1 = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLiveInterval));
+ *i2 = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLiveInterval));
+
+ for (r = interval->range; r; r = r->next) {
+ if (pos > r->to) {
+ /* Add it to the first child */
+ mono_linterval_add_range (cfg, *i1, r->from, r->to);
+ } else if (pos > r->from && pos <= r->to) {
+ /* Split at pos */
+ mono_linterval_add_range (cfg, *i1, r->from, pos - 1);
+ mono_linterval_add_range (cfg, *i2, pos, r->to);
+ } else {
+ /* Add it to the second child */
+ mono_linterval_add_range (cfg, *i2, r->from, r->to);
+ }
+ }
+}
+
+#if 0
+#define LIVENESS_DEBUG(a) do { a; } while (0)
+#else
+#define LIVENESS_DEBUG(a)
+#endif
+
+static inline void
+update_liveness2 (MonoCompile *cfg, MonoInst *ins, gboolean set_volatile, int inst_num, gint32 *last_use)
+{
+ const char *spec = INS_INFO (ins->opcode);
+ int sreg;
+
+ LIVENESS_DEBUG (printf ("\t%x: ", inst_num); mono_print_ins (ins));
+
+ if (ins->opcode == OP_NOP)
+ return;
+
+ /* DREG */
+ if ((spec [MONO_INST_DEST] != ' ') && get_vreg_to_inst (cfg, ins->dreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, ins->dreg);
+ int idx = var->inst_c0;
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ if (MONO_IS_STORE_MEMBASE (ins)) {
+ if (last_use [idx] == 0) {
+ LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", ins->dreg, inst_num));
+ last_use [idx] = inst_num;
+ }
+ } else {
+ if (last_use [idx] > 0) {
+ LIVENESS_DEBUG (printf ("\tadd range to R%d: [%x, %x)\n", ins->dreg, inst_num, last_use [idx]));
+ mono_linterval_add_range (cfg, vi->interval, inst_num, last_use [idx]);
+ last_use [idx] = 0;
+ }
+ else {
+ /* Try dead code elimination */
+ if ((var != cfg->ret) && !(var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT)) && ((ins->opcode == OP_ICONST) || (ins->opcode == OP_I8CONST) || (ins->opcode == OP_R8CONST))) {
+ LIVENESS_DEBUG (printf ("\tdead def of R%d, eliminated\n", ins->dreg));
+ ins->opcode = OP_NOP;
+ ins->dreg = ins->sreg1 = ins->sreg2 = -1;
+ return;
+ }
+
+ LIVENESS_DEBUG (printf ("\tdead def of R%d, add range to R%d: [%x, %x]\n", ins->dreg, ins->dreg, inst_num, inst_num + 1));
+ mono_linterval_add_range (cfg, vi->interval, inst_num, inst_num + 1);
+ }
+ }
+ }
+
+ /* SREG1 */
+ sreg = ins->sreg1;
+ if ((spec [MONO_INST_SRC1] != ' ') && get_vreg_to_inst (cfg, sreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ int idx = var->inst_c0;
+
+ if (last_use [idx] == 0) {
+ LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num));
+ last_use [idx] = inst_num;
+ }
+ }
+
+ /* SREG2 */
+ sreg = ins->sreg2;
+ if ((spec [MONO_INST_SRC2] != ' ') && get_vreg_to_inst (cfg, sreg)) {
+ MonoInst *var = get_vreg_to_inst (cfg, sreg);
+ int idx = var->inst_c0;
+
+ if (last_use [idx] == 0) {
+ LIVENESS_DEBUG (printf ("\tlast use of R%d set to %x\n", sreg, inst_num));
+ last_use [idx] = inst_num;
+ }
+ }
+}
+
+static void
+mono_analyze_liveness2 (MonoCompile *cfg)
+{
+ int bnum, idx, i, j, nins, rem, max, max_vars, block_from, block_to, pos, reverse_len;
+ gint32 *last_use;
+ static guint32 disabled = -1;
+ MonoInst **reverse;
+
+ if (disabled == -1)
+ disabled = getenv ("DISABLED") != NULL;
+
+ if (disabled)
+ return;
+
+ LIVENESS_DEBUG (printf ("LIVENESS 2 %s\n", mono_method_full_name (cfg->method, TRUE)));
+
+ /*
+ if (strstr (cfg->method->name, "test_") != cfg->method->name)
+ return;
+ */
+
+ max_vars = cfg->num_varinfo;
+ last_use = g_new0 (gint32, max_vars);
+
+ reverse_len = 1024;
+ reverse = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * reverse_len);
+
+ for (idx = 0; idx < max_vars; ++idx) {
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ vi->interval = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoLiveInterval));
+ }
+
+ /*
+ * Process bblocks in reverse order, so the addition of new live ranges
+ * to the intervals is faster.
+ */
+ for (bnum = cfg->num_bblocks - 1; bnum >= 0; --bnum) {
+ MonoBasicBlock *bb = cfg->bblocks [bnum];
+ MonoInst *ins;
+
+ block_from = (bb->dfn << 16) + 1; /* so pos > 0 */
+ if (bnum < cfg->num_bblocks - 1)
+ /* Beginning of the next bblock */
+ block_to = (cfg->bblocks [bnum + 1]->dfn << 16) + 1;
+ else
+ block_to = (bb->dfn << 16) + 0xffff;
+
+ LIVENESS_DEBUG (printf ("LIVENESS BLOCK BB%d:\n", bb->block_num));
+
+ memset (last_use, 0, max_vars * sizeof (gint32));
+
+ /* For variables in bb->live_out, set last_use to block_to */
+
+ rem = max_vars % BITS_PER_CHUNK;
+ max = ((max_vars + (BITS_PER_CHUNK -1)) / BITS_PER_CHUNK);
+ for (j = 0; j < max; ++j) {
+ gsize bits_out;
+ int k;
+
+ bits_out = mono_bitset_get_fast (bb->live_out_set, j);
+ k = (j * BITS_PER_CHUNK);
+ while (bits_out) {
+ if (bits_out & 1) {
+ LIVENESS_DEBUG (printf ("Var R%d live at exit, set last_use to %x\n", cfg->varinfo [k]->dreg, block_to));
+ last_use [k] = block_to;
+ }
+ bits_out >>= 1;
+ k ++;
+ }
+ }
+
+ if (cfg->ret)
+ last_use [cfg->ret->inst_c0] = block_to;
+
+ for (nins = 0, pos = block_from, ins = bb->code; ins; ins = ins->next, ++nins, ++pos) {
+ if (nins >= reverse_len) {
+ int new_reverse_len = reverse_len * 2;
+ MonoInst **new_reverse = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * new_reverse_len);
+ memcpy (new_reverse, reverse, sizeof (MonoInst*) * reverse_len);
+ reverse = new_reverse;
+ reverse_len = new_reverse_len;
+ }
+
+ reverse [nins] = ins;
+ }
+
+ /* Process instructions backwards */
+ for (i = nins - 1; i >= 0; --i) {
+ MonoInst *ins = (MonoInst*)reverse [i];
+
+ update_liveness2 (cfg, ins, FALSE, pos, last_use);
+
+ pos --;
+ }
+
+ for (idx = 0; idx < max_vars; ++idx) {
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ if (last_use [idx] != 0) {
+ /* Live at exit, not written -> live on enter */
+ LIVENESS_DEBUG (printf ("Var R%d live at enter, add range to R%d: [%x, %x)\n", cfg->varinfo [idx]->dreg, cfg->varinfo [idx]->dreg, block_from, last_use [idx]));
+ mono_linterval_add_range (cfg, vi->interval, block_from, last_use [idx]);
+ }
+ }
+ }
+
+ /*
+ * Arguments need to have their live ranges extended to the beginning of
+ * the method to account for the arg reg/memory -> global register copies
+ * in the prolog (bug #74992).
+ */
+ for (i = 0; i < max_vars; i ++) {
+ MonoMethodVar *vi = MONO_VARINFO (cfg, i);
+ if (cfg->varinfo [vi->idx]->opcode == OP_ARG)
+ mono_linterval_add_range (cfg, vi->interval, 0, 1);
+ }
+
+#if 0
+ for (idx = 0; idx < max_vars; ++idx) {
+ MonoMethodVar *vi = MONO_VARINFO (cfg, idx);
+
+ LIVENESS_DEBUG (printf ("LIVENESS R%d: ", cfg->varinfo [idx]->dreg));
+ LIVENESS_DEBUG (mono_linterval_print (vi->interval));
+ LIVENESS_DEBUG (printf ("\n"));
+ }
+#endif
+
+ g_free (last_use);
}
static void
g_free (used);
}
+
#include <mono/metadata/opcodes.h>
#include "mini.h"
+/* FIXME: Get rid of these */
+#define NEW_BIALU(cfg,dest,op,dr,sr1,sr2) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ (dest)->dreg = (dr); \
+ (dest)->sreg1 = (sr1); \
+ (dest)->sreg2 = (sr2); \
+ } while (0)
+
+#define NEW_BIALU_IMM(cfg,dest,op,dr,sr,imm) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ (dest)->dreg = (dr); \
+ (dest)->sreg1 = (sr); \
+ (dest)->inst_p1 = (gpointer)(gssize)(imm); \
+ } while (0)
+
+#ifndef MONO_ARCH_IS_OP_MEMBASE
+#define MONO_ARCH_IS_OP_MEMBASE(opcode) FALSE
+#endif
#define MONO_DEBUG_LOCAL_PROP 0
#define MONO_DEBUG_TREE_MOVER 0
static void
mono_local_cprop_bb (MonoCompile *cfg, TreeMover *tree_mover, MonoBasicBlock *bb, MonoInst **acp, int acp_size)
{
- MonoInst *tree;
+ MonoInst *tree = bb->code;
int i;
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
+ if (!tree)
return;
if (tree_mover != NULL) {
TreeMoverTreeMove *move;
/* Move the movable trees */
if (MONO_DEBUG_TREE_MOVER) {
- printf ("BEFORE TREE MOVER START\n");
- mono_print_code (cfg);
- printf ("BEFORE TREE MOVER END\n");
+ mono_print_code (cfg, "BEFORE TREE MOVER");
printf ("Applying tree mover...\n");
}
for (move = tree_mover->scheduled_moves; move != NULL; move = move->next) {
apply_tree_mover (tree_mover, move);
}
if (MONO_DEBUG_TREE_MOVER) {
- printf ("AFTER TREE MOVER START\n");
- mono_print_code (cfg);
- printf ("AFTER TREE MOVER END\n");
+ mono_print_code (cfg, "AFTER TREE MOVER");
}
/* Global cleanup of tree mover memory */
mono_mempool_destroy(tree_mover->pool);
}
}
+
+static inline MonoBitSet*
+mono_bitset_mp_new_noinit (MonoMemPool *mp, guint32 max_size)
+{
+ int size = mono_bitset_alloc_size (max_size, 0);
+ gpointer mem;
+
+ mem = mono_mempool_alloc (mp, size);
+ return mono_bitset_mem_new (mem, max_size, MONO_BITSET_DONT_FREE);
+}
+
+/*
+ * mono_local_cprop2:
+ *
+ * A combined local copy and constant propagation pass.
+ */
+void
+mono_local_cprop2 (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ MonoInst **defs;
+ gint32 *def_index;
+ int max;
+
+restart:
+
+ max = cfg->next_vreg;
+ defs = mono_mempool_alloc (cfg->mempool, sizeof (MonoInst*) * (cfg->next_vreg + 1));
+ def_index = mono_mempool_alloc (cfg->mempool, sizeof (guint32) * (cfg->next_vreg + 1));
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *ins;
+ int ins_index;
+ int last_call_index;
+
+ /* Manually init the defs entries used by the bblock */
+ MONO_BB_FOR_EACH_INS (bb, ins) {
+ if ((ins->dreg != -1) && (ins->dreg < max)) {
+ defs [ins->dreg] = NULL;
+#if SIZEOF_VOID_P == 4
+ defs [ins->dreg + 1] = NULL;
+#endif
+ }
+ if ((ins->sreg1 != -1) && (ins->sreg1 < max)) {
+ defs [ins->sreg1] = NULL;
+#if SIZEOF_VOID_P == 4
+ defs [ins->sreg1 + 1] = NULL;
+#endif
+ }
+ if ((ins->sreg2 != -1) && (ins->sreg2 < max)) {
+ defs [ins->sreg2] = NULL;
+#if SIZEOF_VOID_P == 4
+ defs [ins->sreg2 + 1] = NULL;
+#endif
+ }
+ }
+
+ ins_index = 0;
+ last_call_index = -1;
+ MONO_BB_FOR_EACH_INS (bb, ins) {
+ const char *spec = INS_INFO (ins->opcode);
+ int regtype, srcindex, sreg;
+
+ if (ins->opcode == OP_NOP) {
+ MONO_DELETE_INS (bb, ins);
+ continue;
+ }
+
+ g_assert (ins->opcode > MONO_CEE_LAST);
+
+ /* FIXME: Optimize this */
+ if (ins->opcode == OP_LDADDR) {
+ MonoInst *var = ins->inst_p0;
+
+ defs [var->dreg] = NULL;
+ /*
+ if (!MONO_TYPE_ISSTRUCT (var->inst_vtype))
+ break;
+ */
+ }
+
+ if (MONO_IS_STORE_MEMBASE (ins)) {
+ sreg = ins->dreg;
+ regtype = 'i';
+
+ if ((regtype == 'i') && (sreg != -1) && defs [sreg]) {
+ MonoInst *def = defs [sreg];
+
+ if ((def->opcode == OP_MOVE) && (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) && !vreg_is_volatile (cfg, def->sreg1)) {
+ int vreg = def->sreg1;
+ //printf ("CCOPY: R%d -> R%d\n", sreg, vreg);
+ ins->dreg = vreg;
+ }
+ }
+ }
+
+ for (srcindex = 0; srcindex < 2; ++srcindex) {
+ MonoInst *def;
+
+ regtype = srcindex == 0 ? spec [MONO_INST_SRC1] : spec [MONO_INST_SRC2];
+ sreg = srcindex == 0 ? ins->sreg1 : ins->sreg2;
+
+ if ((regtype == ' ') || (sreg == -1) || (!defs [sreg]))
+ continue;
+
+ def = defs [sreg];
+
+ /* Copy propagation */
+ /*
+ * The first check makes sure the source of the copy did not change since
+ * the copy was made.
+ * The second check avoids volatile variables.
+ * The third check avoids copy propagating local vregs through a call,
+ * since the lvreg will be spilled
+ * The fourth check avoids copy propagating a vreg in cases where
+ * it would be eliminated anyway by reverse copy propagation later,
+ * because propagating it would create another use for it, thus making
+ * it impossible to use reverse copy propagation.
+ */
+ /* Enabling this for floats trips up the fp stack */
+ /*
+ * Enabling this for floats on amd64 seems to cause a failure in
+ * basic-math.cs, most likely because it gets rid of some r8->r4
+ * conversions.
+ */
+ if (MONO_IS_MOVE (def) &&
+ (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg])) &&
+ !vreg_is_volatile (cfg, def->sreg1) &&
+ /* This avoids propagating local vregs across calls */
+ ((get_vreg_to_inst (cfg, def->sreg1) || !defs [def->sreg1] || (def_index [def->sreg1] >= last_call_index) || (def->opcode == OP_VMOVE))) &&
+ !(defs [def->sreg1] && defs [def->sreg1]->next == def) &&
+ (!MONO_ARCH_USE_FPSTACK || (def->opcode != OP_FMOVE)) &&
+ (def->opcode != OP_FMOVE)) {
+ int vreg = def->sreg1;
+
+ //printf ("CCOPY: R%d -> R%d\n", sreg, vreg);
+ if (srcindex == 0)
+ ins->sreg1 = vreg;
+ else
+ ins->sreg2 = vreg;
+
+ /* Allow further iterations */
+ srcindex = -1;
+ continue;
+ }
+
+ /* Constant propagation */
+ /* FIXME: Make is_inst_imm a macro */
+ /* FIXME: Make is_inst_imm take an opcode argument */
+ /* is_inst_imm is only needed for binops */
+ if ((((def->opcode == OP_ICONST) || ((sizeof (gpointer) == 8) && (def->opcode == OP_I8CONST))) &&
+ (((srcindex == 0) && (ins->sreg2 == -1)) || mono_arch_is_inst_imm (def->inst_c0))) ||
+ (!MONO_ARCH_USE_FPSTACK && (def->opcode == OP_R8CONST))) {
+ guint32 opcode2;
+
+ /* srcindex == 1 -> binop, ins->sreg2 == -1 -> unop */
+ if ((srcindex == 1) && (ins->sreg1 != -1) && defs [ins->sreg1] && (defs [ins->sreg1]->opcode == OP_ICONST) && defs [ins->sreg2]) {
+ /* Both arguments are constants, perform cfold */
+ mono_constant_fold_ins2 (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
+ } else if ((srcindex == 0) && (ins->sreg2 != -1) && defs [ins->sreg2]) {
+ /* Arg 1 is constant, swap arguments if possible */
+ int opcode = ins->opcode;
+ mono_constant_fold_ins2 (cfg, ins, defs [ins->sreg1], defs [ins->sreg2], TRUE);
+ if (ins->opcode != opcode) {
+ /* Allow further iterations */
+ srcindex = -1;
+ continue;
+ }
+ } else if ((srcindex == 0) && (ins->sreg2 == -1)) {
+ /* Constant unop, perform cfold */
+ mono_constant_fold_ins2 (cfg, ins, defs [ins->sreg1], NULL, TRUE);
+ }
+
+ opcode2 = mono_op_to_op_imm (ins->opcode);
+ if ((opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0) && ((srcindex == 1) || (ins->sreg2 == -1))) {
+ ins->opcode = opcode2;
+ if ((def->opcode == OP_I8CONST) && (sizeof (gpointer) == 4)) {
+ ins->inst_ls_word = def->inst_ls_word;
+ ins->inst_ms_word = def->inst_ms_word;
+ } else {
+ ins->inst_imm = def->inst_c0;
+ }
+ if (srcindex == 0)
+ ins->sreg1 = -1;
+ else
+ ins->sreg2 = -1;
+
+ if ((opcode2 == OP_VOIDCALL) || (opcode2 == OP_CALL) || (opcode2 == OP_LCALL) || (opcode2 == OP_FCALL))
+ ((MonoCallInst*)ins)->fptr = (gpointer)ins->inst_imm;
+
+ /* Allow further iterations */
+ srcindex = -1;
+ continue;
+ }
+ else {
+ /* Special cases */
+#if defined(__i386__) || defined(__x86__64__)
+ if ((ins->opcode == OP_X86_LEA) && (srcindex == 1)) {
+#if SIZEOF_VOID_P == 8
+ /* FIXME: Use OP_PADD_IMM when the new JIT is done */
+ ins->opcode = OP_LADD_IMM;
+#else
+ ins->opcode = OP_ADD_IMM;
+#endif
+ ins->inst_imm += def->inst_c0 << ins->backend.shift_amount;
+ ins->sreg2 = -1;
+ }
+#endif
+ opcode2 = mono_load_membase_to_load_mem (ins->opcode);
+ if ((srcindex == 0) && (opcode2 != -1) && mono_arch_is_inst_imm (def->inst_c0)) {
+ ins->opcode = opcode2;
+ ins->inst_imm = def->inst_c0 + ins->inst_offset;
+ ins->sreg1 = -1;
+ }
+ }
+ }
+ else if (((def->opcode == OP_ADD_IMM) || (def->opcode == OP_LADD_IMM)) && (MONO_IS_LOAD_MEMBASE (ins) || MONO_ARCH_IS_OP_MEMBASE (ins->opcode))) {
+ /* ADD_IMM is created by spill_global_vars */
+ /*
+ * We have to guarantee that def->sreg1 haven't changed since def->dreg
+ * was defined. cfg->frame_reg is assumed to remain constant.
+ */
+ if ((def->sreg1 == cfg->frame_reg) || ((def->next == ins) && (def->dreg != def->sreg1))) {
+ ins->inst_basereg = def->sreg1;
+ ins->inst_offset += def->inst_imm;
+ }
+ } else if ((ins->opcode == OP_ISUB_IMM) && (def->opcode == OP_IADD_IMM) && (def->next == ins)) {
+ ins->sreg1 = def->sreg1;
+ ins->inst_imm -= def->inst_imm;
+ } else if ((ins->opcode == OP_IADD_IMM) && (def->opcode == OP_ISUB_IMM) && (def->next == ins)) {
+ ins->sreg1 = def->sreg1;
+ ins->inst_imm -= def->inst_imm;
+ } else if (ins->opcode == OP_STOREI1_MEMBASE_REG &&
+ (def->opcode == OP_ICONV_TO_U1 || def->opcode == OP_ICONV_TO_I1 || def->opcode == OP_SEXT_I4 || (SIZEOF_VOID_P == 8 && def->opcode == OP_LCONV_TO_U1)) &&
+ (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
+ /* Avoid needless sign extension */
+ ins->sreg1 = def->sreg1;
+ } else if (ins->opcode == OP_STOREI2_MEMBASE_REG &&
+ (def->opcode == OP_ICONV_TO_U2 || def->opcode == OP_ICONV_TO_I2 || def->opcode == OP_SEXT_I4 || (SIZEOF_VOID_P == 8 && def->opcode == OP_LCONV_TO_I2)) &&
+ (!defs [def->sreg1] || (def_index [def->sreg1] < def_index [sreg]))) {
+ /* Avoid needless sign extension */
+ ins->sreg1 = def->sreg1;
+ }
+ }
+
+ /* Do strength reduction here */
+ /* FIXME: Add long/float */
+ switch (ins->opcode) {
+ case OP_MOVE:
+ if (ins->dreg == ins->sreg1) {
+ MONO_DELETE_INS (bb, ins);
+ spec = INS_INFO (ins->opcode);
+ }
+ break;
+ case OP_ADD_IMM:
+ case OP_IADD_IMM:
+ case OP_SUB_IMM:
+ case OP_ISUB_IMM:
+#if SIZEOF_VOID_P == 8
+ case OP_LADD_IMM:
+ case OP_LSUB_IMM:
+#endif
+ if (ins->inst_imm == 0) {
+ ins->opcode = OP_MOVE;
+ spec = INS_INFO (ins->opcode);
+ }
+ break;
+ case OP_MUL_IMM:
+ case OP_IMUL_IMM:
+#if SIZEOF_VOID_P == 8
+ case OP_LMUL_IMM:
+#endif
+ if (ins->inst_imm == 0) {
+ ins->opcode = (ins->opcode == OP_LMUL_IMM) ? OP_I8CONST : OP_ICONST;
+ ins->inst_c0 = 0;
+ ins->sreg1 = -1;
+ } else if (ins->inst_imm == 1) {
+ ins->opcode = OP_MOVE;
+ } else if ((ins->opcode == OP_IMUL_IMM) && (ins->inst_imm == -1)) {
+ ins->opcode = OP_INEG;
+ } else if ((ins->opcode == OP_LMUL_IMM) && (ins->inst_imm == -1)) {
+ ins->opcode = OP_LNEG;
+ } else {
+ int power2 = mono_is_power_of_two (ins->inst_imm);
+ if (power2 >= 0) {
+ ins->opcode = (ins->opcode == OP_MUL_IMM) ? OP_SHL_IMM : ((ins->opcode == OP_LMUL_IMM) ? OP_LSHL_IMM : OP_ISHL_IMM);
+ ins->inst_imm = power2;
+ }
+ }
+ spec = INS_INFO (ins->opcode);
+ break;
+ case OP_IREM_UN_IMM:
+ case OP_IDIV_UN_IMM: {
+ int c = ins->inst_imm;
+ int power2 = mono_is_power_of_two (c);
+
+ if (power2 >= 0) {
+ if (ins->opcode == OP_IREM_UN_IMM) {
+ ins->opcode = OP_IAND_IMM;
+ ins->sreg2 = -1;
+ ins->inst_imm = (1 << power2) - 1;
+ } else if (ins->opcode == OP_IDIV_UN_IMM) {
+ ins->opcode = OP_ISHR_UN_IMM;
+ ins->sreg2 = -1;
+ ins->inst_imm = power2;
+ }
+ }
+ spec = INS_INFO (ins->opcode);
+ break;
+ }
+ case OP_IDIV_IMM: {
+ int c = ins->inst_imm;
+ int power2 = mono_is_power_of_two (c);
+ MonoInst *tmp1, *tmp2, *tmp3, *tmp4;
+
+ /* FIXME: Move this elsewhere cause its hard to implement it here */
+ if (power2 == 1) {
+ int r1 = mono_alloc_ireg (cfg);
+
+ NEW_BIALU_IMM (cfg, tmp1, OP_ISHR_UN_IMM, r1, ins->sreg1, 31);
+ mono_bblock_insert_after_ins (bb, ins, tmp1);
+ NEW_BIALU (cfg, tmp2, OP_IADD, r1, r1, ins->sreg1);
+ mono_bblock_insert_after_ins (bb, tmp1, tmp2);
+ NEW_BIALU_IMM (cfg, tmp3, OP_ISHR_IMM, ins->dreg, r1, 1);
+ mono_bblock_insert_after_ins (bb, tmp2, tmp3);
+
+ NULLIFY_INS (ins);
+
+ // We allocated a new vreg, so need to restart
+ goto restart;
+ } else if (power2 > 0) {
+ int r1 = mono_alloc_ireg (cfg);
+
+ NEW_BIALU_IMM (cfg, tmp1, OP_ISHR_IMM, r1, ins->sreg1, 31);
+ mono_bblock_insert_after_ins (bb, ins, tmp1);
+ NEW_BIALU_IMM (cfg, tmp2, OP_ISHR_UN_IMM, r1, r1, (32 - power2));
+ mono_bblock_insert_after_ins (bb, tmp1, tmp2);
+ NEW_BIALU (cfg, tmp3, OP_IADD, r1, r1, ins->sreg1);
+ mono_bblock_insert_after_ins (bb, tmp2, tmp3);
+ NEW_BIALU_IMM (cfg, tmp4, OP_ISHR_IMM, ins->dreg, r1, power2);
+ mono_bblock_insert_after_ins (bb, tmp3, tmp4);
+
+ NULLIFY_INS (ins);
+
+ // We allocated a new vreg, so need to restart
+ goto restart;
+ }
+ break;
+ }
+ }
+
+ if (spec [MONO_INST_DEST] != ' ') {
+ MonoInst *def = defs [ins->dreg];
+
+ if (def && (def->opcode == OP_ADD_IMM) && (def->sreg1 == cfg->frame_reg) && (MONO_IS_STORE_MEMBASE (ins))) {
+ /* ADD_IMM is created by spill_global_vars */
+ /* cfg->frame_reg is assumed to remain constant */
+ ins->inst_destbasereg = def->sreg1;
+ ins->inst_offset += def->inst_imm;
+ }
+ }
+
+ if ((spec [MONO_INST_DEST] != ' ') && !MONO_IS_STORE_MEMBASE (ins) && !vreg_is_volatile (cfg, ins->dreg)) {
+ defs [ins->dreg] = ins;
+ def_index [ins->dreg] = ins_index;
+ }
+
+ if (MONO_IS_CALL (ins))
+ last_call_index = ins_index;
+
+ ins_index ++;
+ }
+ }
+}
+
+/**
+ * mono_local_deadce:
+ *
+ * Get rid of the dead assignments to local vregs like the ones created by the
+ * copyprop pass.
+ */
+void
+mono_local_deadce (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+ MonoInst *ins, *prev;
+ MonoBitSet *used, *defined;
+
+ //mono_print_code (cfg, "BEFORE LOCAL-DEADCE");
+
+ /*
+ * Assignments to global vregs can't be eliminated so this pass must come
+ * after the handle_global_vregs () pass.
+ */
+
+ used = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
+ defined = mono_bitset_mp_new_noinit (cfg->mempool, cfg->next_vreg + 1);
+
+ /* First pass: collect liveness info */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ /* Manually init the defs entries used by the bblock */
+ MONO_BB_FOR_EACH_INS (bb, ins) {
+ const char *spec = INS_INFO (ins->opcode);
+
+ if (spec [MONO_INST_DEST] != ' ') {
+ mono_bitset_clear_fast (used, ins->dreg);
+ mono_bitset_clear_fast (defined, ins->dreg);
+#if SIZEOF_VOID_P == 4
+ /* Regpairs */
+ mono_bitset_clear_fast (used, ins->dreg + 1);
+ mono_bitset_clear_fast (defined, ins->dreg + 1);
+#endif
+ }
+ if (spec [MONO_INST_SRC1] != ' ') {
+ mono_bitset_clear_fast (used, ins->sreg1);
+#if SIZEOF_VOID_P == 4
+ mono_bitset_clear_fast (used, ins->sreg1 + 1);
+#endif
+ }
+ if (spec [MONO_INST_SRC2] != ' ') {
+ mono_bitset_clear_fast (used, ins->sreg2);
+#if SIZEOF_VOID_P == 4
+ mono_bitset_clear_fast (used, ins->sreg2 + 1);
+#endif
+ }
+ }
+
+ /*
+ * Make a reverse pass over the instruction list
+ */
+ MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) {
+ const char *spec = INS_INFO (ins->opcode);
+
+ if (ins->opcode == OP_NOP) {
+ MONO_DELETE_INS (bb, ins);
+ continue;
+ }
+
+ g_assert (ins->opcode > MONO_CEE_LAST);
+
+ if (((ins->opcode == OP_MOVE) || (ins->opcode == OP_VMOVE)) && ins->prev) {
+ MonoInst *def;
+ const char *spec2;
+
+ def = ins->prev;
+ while (def->prev && (def->opcode == OP_NOP))
+ def = def->prev;
+ spec2 = INS_INFO (def->opcode);
+
+ /*
+ * Perform a limited kind of reverse copy propagation, i.e.
+ * transform B <- FOO; A <- B into A <- FOO
+ * This isn't copyprop, not deadce, but it can only be performed
+ * after handle_global_vregs () has run.
+ */
+ if (!get_vreg_to_inst (cfg, ins->sreg1) && (spec2 [MONO_INST_DEST] != ' ') && (def->dreg == ins->sreg1) && !mono_bitset_test_fast (used, ins->sreg1) && !MONO_IS_STORE_MEMBASE (def) && ((spec [MONO_INST_DEST] == 'f' && ins->sreg1 > MONO_MAX_FREGS) || (spec [MONO_INST_DEST] == 'i' && ins->sreg1 > MONO_MAX_IREGS) || (spec [MONO_INST_DEST] == 'v'))) {
+ if (cfg->verbose_level > 2) {
+ printf ("\tReverse copyprop in BB%d on ", bb->block_num);
+ mono_print_ins (ins);
+ }
+
+ def->dreg = ins->dreg;
+ MONO_DELETE_INS (bb, ins);
+ spec = INS_INFO (ins->opcode);
+ }
+ }
+
+ /* Enabling this on x86 could screw up the fp stack */
+ if (((spec [MONO_INST_DEST] == 'i') && (ins->dreg >= MONO_MAX_IREGS)) ||
+ ((spec [MONO_INST_DEST] == 'f') && (ins->dreg >= MONO_MAX_FREGS) && !MONO_ARCH_USE_FPSTACK) ||
+ (spec [MONO_INST_DEST] == 'v')) {
+ /*
+ * Assignments to global vregs can only be eliminated if there is another
+ * assignment to the same vreg later in the same bblock.
+ */
+ if (!mono_bitset_test_fast (used, ins->dreg) &&
+ (!get_vreg_to_inst (cfg, ins->dreg) || (!bb->extended && !vreg_is_volatile (cfg, ins->dreg) && mono_bitset_test_fast (defined, ins->dreg))) &&
+ MONO_INS_HAS_NO_SIDE_EFFECT (ins)) {
+ /* Happens with CMOV instructions */
+ if (ins->prev && ins->prev->opcode == OP_ICOMPARE_IMM) {
+ MonoInst *prev = ins->prev;
+ MONO_DELETE_INS (bb, prev);
+ }
+ //printf ("DEADCE: "); mono_print_ins (ins);
+ MONO_DELETE_INS (bb, ins);
+ spec = INS_INFO (ins->opcode);
+ }
+
+ if (spec [MONO_INST_DEST] != ' ')
+ mono_bitset_clear_fast (used, ins->dreg);
+ }
+
+ if (spec [MONO_INST_DEST] != ' ')
+ mono_bitset_set_fast (defined, ins->dreg);
+ if (spec [MONO_INST_SRC1] != ' ')
+ mono_bitset_set_fast (used, ins->sreg1);
+ if (spec [MONO_INST_SRC2] != ' ')
+ mono_bitset_set_fast (used, ins->sreg2);
+ if (MONO_IS_STORE_MEMBASE (ins))
+ mono_bitset_set_fast (used, ins->dreg);
+
+ if (MONO_IS_CALL (ins)) {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ GSList *l;
+
+ if (call->out_ireg_args) {
+ for (l = call->out_ireg_args; l; l = l->next) {
+ guint32 regpair, reg;
+
+ regpair = (guint32)(gssize)(l->data);
+ reg = regpair & 0xffffff;
+
+ mono_bitset_set_fast (used, reg);
+ }
+ }
+
+ if (call->out_freg_args) {
+ for (l = call->out_freg_args; l; l = l->next) {
+ guint32 regpair, reg;
+
+ regpair = (guint32)(gssize)(l->data);
+ reg = regpair & 0xffffff;
+
+ mono_bitset_set_fast (used, reg);
+ }
+ }
+ }
+ }
+ }
+
+ //mono_print_code (cfg, "AFTER LOCAL-DEADCE");
+}
#define ALPHA_PRINT if (mini_alpha_verbose_level)
-#define NEW_INS(cfg,ins,dest,op) do { \
- (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
- (dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+#define NEW_INS(cfg,dest,op) do { \
+ (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
+ (dest)->opcode = (op); \
+ insert_after_ins (bb, last_ins, (dest)); \
} while (0)
#define NEW_ICONST(cfg,dest,val) do { \
}
}
+static void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
+{
+ if (ins == NULL)
+ {
+ ins = bb->code;
+ bb->code = to_insert;
+ to_insert->next = ins;
+ }
+ else
+ {
+ to_insert->next = ins->next;
+ ins->next = to_insert;
+ }
+}
+
static void add_got_entry(MonoCompile *cfg, AlphaGotType ge_type,
AlphaGotData ge_data,
int ip, MonoJumpInfoType type, gconstpointer target)
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
CFG_DEBUG(3) g_print ("ALPHA: PEEPHOLE_2 pass\n");
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
switch (ins->opcode)
{
case OP_MOVE:
break;
#endif
}
+
+ last_ins = ins;
+ ins = ins->next;
}
+
+ bb->last_ins = last_ins;
}
// Convert to opposite branch opcode
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n, *next, *temp;
+ MonoInst *ins, *n, *temp, *last_ins = NULL;
+ MonoInst *next;
+
+ ins = bb->code;
if (bb->max_vreg > cfg->rs->next_vreg)
cfg->rs->next_vreg = bb->max_vreg;
case OP_IDIV_IMM:
case OP_IREM_IMM:
case OP_MUL_IMM:
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
// Instead of compare+b<cond>/fcompare+b<cond>,
// Alpha has compare<cond>+br<cond>/fcompare<cond>+br<cond>
// we need to convert
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
cvt_cmp_branch(ins, next);
}
case OP_COMPARE_IMM:
if (!alpha_is_imm (ins->inst_imm))
{
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_COMPARE;
//continue;
}
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
cvt_cmp_branch(ins, next);
case OP_ICOMPARE_IMM:
if (!alpha_is_imm (ins->inst_imm))
{
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_ICOMPARE;
//continue;
}
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
cvt_cmp_branch(ins, next);
case OP_STOREI8_MEMBASE_IMM:
if (ins->inst_imm != 0)
{
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_STOREI8_MEMBASE_REG;
if (ins->inst_imm != 0)
{
MonoInst *temp;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_STOREI4_MEMBASE_REG;
if (ins->inst_imm != 0 || !bwx_supported)
{
MonoInst *temp;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_STOREI1_MEMBASE_REG;
if (ins->inst_imm != 0 || !bwx_supported)
{
MonoInst *temp;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_STOREI2_MEMBASE_REG;
if (!alpha_is_imm(ins->inst_imm))
{
MonoInst *temp;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
if (!alpha_is_imm(ins->inst_imm))
{
MonoInst *temp;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
if (!alpha_is_imm(ins->inst_imm))
{
MonoInst *temp;
- NEW_INS(cfg, ins, temp, OP_ICONST);
+ NEW_INS(cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int(cfg->rs);
ins->sreg2 = temp->dreg;
if (!alpha_is_imm(ins->inst_imm))
{
MonoInst *temp;
- NEW_INS(cfg, ins, temp, OP_ICONST);
+ NEW_INS(cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int(cfg->rs);
ins->sreg2 = temp->dreg;
default:
break;
}
+
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
+
bb->max_vreg = cfg->rs->next_vreg;
}
MonoCallInst *call;
guint offset;
unsigned int *code = (unsigned int *)(cfg->native_code + cfg->code_len);
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
break;
case OP_BR:
+ CFG_DEBUG(4) g_print("ALPHA_CHECK: [br] target: %p, next: %p, curr: %p, last: %p [",
+ ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
+
if (ins->flags & MONO_INST_BRLABEL)
{
if (ins->inst_i0->inst_c0)
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
- }
+ }
cfg->code_len = ((char *)code) - ((char *)cfg->native_code);
}
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->inst_left = sig_arg;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
if (is_virtual && i == 0) {
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
CFG_DEBUG(3) g_print("ALPHA: Param[%d] - ", i);
add_outarg_reg (cfg, call, set_reg, arg_storage,
dest_reg, load);
- if (&set_reg->node != call->out_args.next)
+ if (set_reg != call->out_args)
{
- MONO_INST_LIST_ADD (&set_reg->node, &call->out_args);
+ set_reg->next = call->out_args;
+ call->out_args = set_reg;
}
}
//outarg->inst_imm = 16 + ainfo->offset + (slot - 8) * 8;
outarg->dreg = ainfo->offset + (slot - 22) * 8;
- if (&outarg->node != call->out_args.next)
+ if (outarg != call->out_args)
{
- MONO_INST_LIST_ADD (&outarg->node, &call->out_args);
+ outarg->next = call->out_args;
+ call->out_args = outarg;
}
}
arg->inst_left = vtaddr;
arg->inst_right = in;
arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
else
{
zero_inst->inst_p0 = 0;
arg->inst_left = zero_inst;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
else
/* if the function returns a struct, the called method a
#include "inssel.h"
#include "cpu-amd64.h"
+/*
+ * Can't define this in mini-amd64.h cause that would turn on the generic code in
+ * method-to-ir.c.
+ */
+#define MONO_ARCH_IMT_REG AMD64_R11
+
static gint lmf_tls_offset = -1;
static gint lmf_addr_tls_offset = -1;
static gint appdomain_tls_offset = -1;
* UNORDERED 1 1 1
*/
+void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
+
const char*
mono_arch_regname (int reg)
{
else
size = mini_type_stack_size (gsctx, &klass->byval_arg, NULL);
#ifndef PLATFORM_WIN32
- if (!sig->pinvoke || (size == 0) || (size > 16)) {
+ if (!sig->pinvoke && !disable_vtypes_in_regs && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
+ /* We pass and return vtypes of size 8 in a register */
+ } else if (!sig->pinvoke || (size == 0) || (size > 16)) {
#else
if (!sig->pinvoke) {
#endif
else
nquads = 1;
- /*
- * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
- * The X87 and SSEUP stuff is left out since there are no such types in
- * the CLR.
- */
- info = mono_marshal_load_type_info (klass);
- g_assert (info);
+ if (!sig->pinvoke) {
+ /* Always pass in 1 or 2 integer registers */
+ args [0] = ARG_CLASS_INTEGER;
+ args [1] = ARG_CLASS_INTEGER;
+ /* Only the simplest cases are supported */
+ if (is_return && nquads != 1) {
+ args [0] = ARG_CLASS_MEMORY;
+ args [1] = ARG_CLASS_MEMORY;
+ }
+ } else {
+ /*
+ * Implement the algorithm from section 3.2.3 of the X86_64 ABI.
+ * The X87 and SSEUP stuff is left out since there are no such types in
+ * the CLR.
+ */
+ info = mono_marshal_load_type_info (klass);
+ g_assert (info);
#ifndef PLATFORM_WIN32
- if (info->native_size > 16) {
- ainfo->offset = *stack_size;
- *stack_size += ALIGN_TO (info->native_size, 8);
- ainfo->storage = ArgOnStack;
-
- return;
- }
-#else
- switch (info->native_size) {
- case 1: case 2: case 4: case 8:
- break;
- default:
- if (is_return) {
- ainfo->storage = ArgOnStack;
+ if (info->native_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (info->native_size, 8);
- }
- else {
- ainfo->storage = ArgValuetypeAddrInIReg;
+ ainfo->storage = ArgOnStack;
- if (*gr < PARAM_REGS) {
- ainfo->pair_storage [0] = ArgInIReg;
- ainfo->pair_regs [0] = param_regs [*gr];
- (*gr) ++;
- }
- else {
- ainfo->pair_storage [0] = ArgOnStack;
+ return;
+ }
+#else
+ switch (info->native_size) {
+ case 1: case 2: case 4: case 8:
+ break;
+ default:
+ if (is_return) {
+ ainfo->storage = ArgOnStack;
ainfo->offset = *stack_size;
- *stack_size += 8;
+ *stack_size += ALIGN_TO (info->native_size, 8);
}
- }
+ else {
+ ainfo->storage = ArgValuetypeAddrInIReg;
- return;
- }
+ if (*gr < PARAM_REGS) {
+ ainfo->pair_storage [0] = ArgInIReg;
+ ainfo->pair_regs [0] = param_regs [*gr];
+ (*gr) ++;
+ }
+ else {
+ ainfo->pair_storage [0] = ArgOnStack;
+ ainfo->offset = *stack_size;
+ *stack_size += 8;
+ }
+ }
+
+ return;
+ }
#endif
- args [0] = ARG_CLASS_NO_CLASS;
- args [1] = ARG_CLASS_NO_CLASS;
- for (quad = 0; quad < nquads; ++quad) {
- int size;
- guint32 align;
- ArgumentClass class1;
+ args [0] = ARG_CLASS_NO_CLASS;
+ args [1] = ARG_CLASS_NO_CLASS;
+ for (quad = 0; quad < nquads; ++quad) {
+ int size;
+ guint32 align;
+ ArgumentClass class1;
- class1 = ARG_CLASS_NO_CLASS;
- for (i = 0; i < info->num_fields; ++i) {
- size = mono_marshal_type_size (info->fields [i].field->type,
- info->fields [i].mspec,
- &align, TRUE, klass->unicode);
- if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
- /* Unaligned field */
- NOT_IMPLEMENTED;
- }
+ if (info->num_fields == 0)
+ class1 = ARG_CLASS_MEMORY;
+ else
+ class1 = ARG_CLASS_NO_CLASS;
+ for (i = 0; i < info->num_fields; ++i) {
+ size = mono_marshal_type_size (info->fields [i].field->type,
+ info->fields [i].mspec,
+ &align, TRUE, klass->unicode);
+ if ((info->fields [i].offset < 8) && (info->fields [i].offset + size) > 8) {
+ /* Unaligned field */
+ NOT_IMPLEMENTED;
+ }
- /* Skip fields in other quad */
- if ((quad == 0) && (info->fields [i].offset >= 8))
- continue;
- if ((quad == 1) && (info->fields [i].offset < 8))
- continue;
+ /* Skip fields in other quad */
+ if ((quad == 0) && (info->fields [i].offset >= 8))
+ continue;
+ if ((quad == 1) && (info->fields [i].offset < 8))
+ continue;
- class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
+ class1 = merge_argument_class_from_type (info->fields [i].field->type, class1);
+ }
+ g_assert (class1 != ARG_CLASS_NO_CLASS);
+ args [quad] = class1;
}
- g_assert (class1 != ARG_CLASS_NO_CLASS);
- args [quad] = class1;
}
/* Post merger cleanup */
*fr = orig_fr;
ainfo->offset = *stack_size;
- *stack_size += ALIGN_TO (info->native_size, 8);
+ if (sig->pinvoke)
+ *stack_size += ALIGN_TO (info->native_size, 8);
+ else
+ *stack_size += nquads * sizeof (gpointer);
ainfo->storage = ArgOnStack;
}
}
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
-#ifdef PLATFORM_WIN32
- /* The float param registers and other param registers must be the same index on Windows x64.*/
- if (gr > fr)
- fr = gr;
- else if (fr > gr)
- gr = fr;
-#endif
-
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
/* We allways pass the sig cookie on the stack for simplicity */
/*
mono_arch_compute_omit_fp (cfg);
- if (cfg->arch.omit_fp)
- regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+ if (cfg->globalra) {
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R12);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R13);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+
+ regs = g_list_prepend (regs, (gpointer)AMD64_R10);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R9);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R8);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RCX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RAX);
+ } else {
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+
+ /* We use the callee saved registers for global allocation */
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R12);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R13);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+ }
- /* We use the callee saved registers for global allocation */
- regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
- regs = g_list_prepend (regs, (gpointer)AMD64_R12);
- regs = g_list_prepend (regs, (gpointer)AMD64_R13);
- regs = g_list_prepend (regs, (gpointer)AMD64_R14);
- regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+ return regs;
+}
+
+GList*
+mono_arch_get_global_fp_regs (MonoCompile *cfg)
+{
+ GList *regs = NULL;
+ int i;
+
+ /* All XMM registers */
+ for (i = 0; i < 16; ++i)
+ regs = g_list_prepend (regs, GINT_TO_POINTER (i));
return regs;
}
+GList*
+mono_arch_get_iregs_clobbered_by_call (MonoCallInst *call)
+{
+ static GList *r = NULL;
+
+ if (r == NULL) {
+ GList *regs = NULL;
+
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R12);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R13);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+
+ regs = g_list_prepend (regs, (gpointer)AMD64_R10);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R9);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R8);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RCX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RAX);
+
+ InterlockedCompareExchangePointer ((gpointer*)&r, regs, NULL);
+ }
+
+ return r;
+}
+
+GList*
+mono_arch_get_fregs_clobbered_by_call (MonoCallInst *call)
+{
+ int i;
+ static GList *r = NULL;
+
+ if (r == NULL) {
+ GList *regs = NULL;
+
+ for (i = 0; i < AMD64_XMM_NREG; ++i)
+ regs = g_list_prepend (regs, GINT_TO_POINTER (MONO_MAX_IREGS + i));
+
+ InterlockedCompareExchangePointer ((gpointer*)&r, regs, NULL);
+ }
+
+ return r;
+}
+
/*
* mono_arch_regalloc_cost:
*
/* push+pop */
return (ins->opcode == OP_ARG) ? 1 : 2;
}
+
+/*
+ * mono_arch_fill_argument_info:
+ *
+ * Populate cfg->args, cfg->ret and cfg->vret_addr with information about the arguments
+ * of the method.
+ */
+void
+mono_arch_fill_argument_info (MonoCompile *cfg)
+{
+ MonoMethodSignature *sig;
+ MonoMethodHeader *header;
+ MonoInst *ins;
+ int i;
+ CallInfo *cinfo;
+
+ header = mono_method_get_header (cfg->method);
+
+ sig = mono_method_signature (cfg->method);
+
+ cinfo = cfg->arch.cinfo;
+
+ /*
+ * Contrary to mono_arch_allocate_vars (), the information should describe
+ * where the arguments are at the beginning of the method, not where they can be
+ * accessed during the execution of the method. The later makes no sense for the
+ * global register allocator, since a variable can be in more than one location.
+ */
+ if (sig->ret->type != MONO_TYPE_VOID) {
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
+ if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->inst_c0 = cinfo->ret.reg;
+ }
+ else {
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
+ }
+ break;
+ case ArgValuetypeInReg:
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = -1;
+ cfg->ret->inst_offset = -1;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
+ ArgInfo *ainfo = &cinfo->args [i];
+ MonoType *arg_type;
+
+ ins = cfg->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
+ ins->opcode = OP_REGVAR;
+ ins->inst_c0 = ainfo->reg;
+ break;
+ case ArgOnStack:
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = -1;
+ ins->inst_offset = -1;
+ break;
+ case ArgValuetypeInReg:
+ /* Dummy */
+ ins->opcode = OP_NOP;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+}
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
MonoMethodSignature *sig;
MonoMethodHeader *header;
- MonoInst *inst;
+ MonoInst *ins;
int i, offset;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
- /* The register is volatile */
- cfg->vret_addr->opcode = OP_REGOFFSET;
- cfg->vret_addr->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp) {
- cfg->vret_addr->inst_offset = offset;
- offset += 8;
+ if (cfg->globalra) {
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->inst_c0 = cinfo->ret.reg;
} else {
- offset += 8;
- cfg->vret_addr->inst_offset = -offset;
- }
- if (G_UNLIKELY (cfg->verbose_level > 1)) {
- printf ("vret_addr =");
- mono_print_ins (cfg->vret_addr);
+ /* The register is volatile */
+ cfg->vret_addr->opcode = OP_REGOFFSET;
+ cfg->vret_addr->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->vret_addr->inst_offset = offset;
+ offset += 8;
+ } else {
+ offset += 8;
+ cfg->vret_addr->inst_offset = -offset;
+ }
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr =");
+ mono_print_ins (cfg->vret_addr);
+ }
}
}
else {
default:
g_assert_not_reached ();
}
- cfg->ret->dreg = cfg->ret->inst_c0;
+ if (!cfg->globalra)
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/* Allocate locals */
- offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
- if (locals_stack_align) {
- offset += (locals_stack_align - 1);
- offset &= ~(locals_stack_align - 1);
- }
- for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
- if (offsets [i] != -1) {
- MonoInst *inst = cfg->varinfo [i];
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp)
- inst->inst_offset = (offset + offsets [i]);
- else
- inst->inst_offset = - (offset + offsets [i]);
- //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
+ if (!cfg->globalra) {
+ offsets = mono_allocate_stack_slots_full (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_align) {
+ offset += (locals_stack_align - 1);
+ offset &= ~(locals_stack_align - 1);
+ }
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ if (offsets [i] != -1) {
+ MonoInst *ins = cfg->varinfo [i];
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp)
+ ins->inst_offset = (offset + offsets [i]);
+ else
+ ins->inst_offset = - (offset + offsets [i]);
+ //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
+ }
}
+ offset += locals_stack_size;
}
- offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (!cfg->arch.omit_fp);
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
- inst = cfg->args [i];
- if (inst->opcode != OP_REGVAR) {
+ ins = cfg->args [i];
+ if (ins->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
MonoType *arg_type;
else
arg_type = sig->params [i - sig->hasthis];
+ if (cfg->globalra) {
+ /* The new allocator needs info about the original locations of the arguments */
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
+ ins->opcode = OP_REGVAR;
+ ins->inst_c0 = ainfo->reg;
+ break;
+ case ArgOnStack:
+ g_assert (!cfg->arch.omit_fp);
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = ainfo->offset + ARGS_OFFSET;
+ break;
+ case ArgValuetypeInReg:
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ /* These arguments are saved to the stack in the prolog */
+ offset = ALIGN_TO (offset, sizeof (gpointer));
+ if (cfg->arch.omit_fp) {
+ ins->inst_offset = offset;
+ offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+ } else {
+ offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
+ ins->inst_offset = - offset;
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ continue;
+ }
+
/* FIXME: Allocate volatile arguments to registers */
- if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
+ if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
/*
if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
inreg = FALSE;
- inst->opcode = OP_REGOFFSET;
+ ins->opcode = OP_REGOFFSET;
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- inst->opcode = OP_REGVAR;
- inst->dreg = ainfo->reg;
+ if (inreg) {
+ ins->opcode = OP_REGVAR;
+ ins->dreg = ainfo->reg;
+ }
break;
case ArgOnStack:
g_assert (!cfg->arch.omit_fp);
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = cfg->frame_reg;
- inst->inst_offset = ainfo->offset + ARGS_OFFSET;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = ainfo->offset + ARGS_OFFSET;
break;
case ArgValuetypeInReg:
break;
}
if (!inreg && (ainfo->storage != ArgOnStack)) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = cfg->frame_reg;
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
offset = ALIGN_TO (offset, sizeof (gpointer));
if (cfg->arch.omit_fp) {
- inst->inst_offset = offset;
+ ins->inst_offset = offset;
offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
} else {
offset += (ainfo->storage == ArgValuetypeInReg) ? 2 * sizeof (gpointer) : sizeof (gpointer);
- inst->inst_offset = - offset;
+ ins->inst_offset = - offset;
}
}
}
}
}
-/* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
- * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
- */
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
+{
+ MonoInst *ins;
+
+ switch (storage) {
+ case ArgInIReg:
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
+ break;
+ case ArgInFloatSSEReg:
+ MONO_INST_NEW (cfg, ins, OP_AMD64_SET_XMMREG_R4);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ case ArgInDoubleSSEReg:
+ MONO_INST_NEW (cfg, ins, OP_FMOVE);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
static int
arg_storage_to_ldind (ArgStorage storage)
return -1;
}
+static int
+arg_storage_to_load_membase (ArgStorage storage)
+{
+ switch (storage) {
+ case ArgInIReg:
+ return OP_LOAD_MEMBASE;
+ case ArgInDoubleSSEReg:
+ return OP_LOADR8_MEMBASE;
+ case ArgInFloatSSEReg:
+ return OP_LOADR4_MEMBASE;
+ default:
+ g_assert_not_reached ();
+ }
+
+ return -1;
+}
+
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->inst_left = sig_arg;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
/*
* instructions to properly call the function in call.
* This includes pushing, moving arguments to the right register
* etc.
- * Issue: who does the spilling if needed, and when?
*/
MonoCallInst*
mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call, int is_virtual) {
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+ if (cfg->method->save_lmf) {
+ MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
+ arg->next = call->out_args;
+ call->out_args = arg;
+ }
+
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+#if 0
if (!cinfo->stack_usage)
/* Keep the assignments to the arg registers in order if possible */
MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
else
MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+#endif
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
guint32 align;
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->cil_code = in->cil_code;
arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
add_outarg_reg (cfg, call, arg, ainfo->pair_storage [1], ainfo->pair_regs [1], load);
arg->inst_right = in;
arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
}
else if (ainfo->storage == ArgValuetypeAddrInIReg){
load->backend.memcpy_args->align = align;
load->inst_left = (cfg)->varinfo [vtaddr->inst_c0];
load->inst_right = in->inst_i0;
- MONO_INST_LIST_ADD (&load->node, &call->out_args);
+
+ // FIXME:
+ g_assert_not_reached ();
+ //MONO_INST_LIST_ADD (&load->node, &call->out_args);
}
else {
arg->opcode = OP_OUTARG_VT;
if (cinfo->need_stack_align) {
MONO_INST_NEW (cfg, arg, OP_AMD64_OUTARG_ALIGN_STACK);
arg->inst_c0 = 8;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
#ifdef PLATFORM_WIN32
MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
#endif
+#if 0
if (cfg->method->save_lmf) {
MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
}
+#endif
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
return call;
}
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoInst *arg;
+ MonoMethodSignature *tmp_sig;
+ MonoInst *sig_arg;
+
+ if (call->tail_call)
+ NOT_IMPLEMENTED;
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
+ g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
+ arg->sreg1 = sig_arg->dreg;
+ MONO_ADD_INS (cfg->cbb, arg);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *arg, *in;
+ MonoMethodSignature *sig;
+ int i, n, stack_size;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+
+ stack_size = 0;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+
+ if (cinfo->need_stack_align) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+ }
+
+ /*
+ * Emit all parameters passed in registers in non-reverse order for better readability
+ * and to help the optimization in emit_prolog ().
+ */
+ for (i = 0; i < n; ++i) {
+ ainfo = cinfo->args + i;
+
+ in = call->args [i];
+
+ if (ainfo->storage == ArgInIReg)
+ add_outarg_reg2 (cfg, call, ainfo->storage, ainfo->reg, in);
+ }
+
+ for (i = n - 1; i >= 0; --i) {
+ ainfo = cinfo->args + i;
+
+ in = call->args [i];
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ /* Already done */
+ break;
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
+ add_outarg_reg2 (cfg, call, ainfo->storage, ainfo->reg, in);
+ break;
+ case ArgOnStack:
+ case ArgValuetypeInReg:
+ if (ainfo->storage == ArgOnStack && call->tail_call)
+ NOT_IMPLEMENTED;
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
+ guint32 align;
+ guint32 size;
+
+ if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else {
+ if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+ }
+ g_assert (in->klass);
+
+ if (size > 0) {
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ } else {
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
+ arg->sreg1 = in->dreg;
+ if (!sig->params [i - sig->hasthis]->byref) {
+ if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+ arg->opcode = OP_STORER4_MEMBASE_REG;
+ arg->inst_destbasereg = X86_ESP;
+ arg->inst_offset = 0;
+ } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+ arg->opcode = OP_STORER8_MEMBASE_REG;
+ arg->inst_destbasereg = X86_ESP;
+ arg->inst_offset = 0;
+ }
+ }
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the implicit arguments */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ MonoInst *vtarg;
+
+ if (cinfo->ret.storage == ArgValuetypeInReg) {
+ if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
+ /*
+ * Tell the JIT to use a more efficient calling convention: call using
+ * OP_CALL, compute the result location after the call, and save the
+ * result there.
+ */
+ call->vret_in_reg = TRUE;
+ } else {
+ if (call->tail_call)
+ NOT_IMPLEMENTED;
+ /*
+ * The valuetype is in RAX:RDX after the call, need to be copied to
+ * the stack. Push the address here, so the call instruction can
+ * access it.
+ */
+ if (!cfg->arch.vret_addr_loc) {
+ cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ /* Prevent it from being register allocated or optimized away */
+ ((MonoInst*)cfg->arch.vret_addr_loc)->flags |= MONO_INST_VOLATILE;
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ((MonoInst*)cfg->arch.vret_addr_loc)->dreg, call->vret_var->dreg);
+ }
+ }
+ else {
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ }
+ }
+
+#ifdef PLATFORM_WIN32
+ // FIXME:
+ NOT_IMPLEMENTED;
+#endif
+
+ if (cfg->method->save_lmf) {
+ MONO_INST_NEW (cfg, arg, OP_AMD64_SAVE_SP_TO_LMF);
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoInst *arg;
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->storage == ArgValuetypeInReg) {
+ MonoInst *load;
+ int part;
+
+ for (part = 0; part < 2; ++part) {
+ if (ainfo->pair_storage [part] == ArgNone)
+ continue;
+
+ MONO_INST_NEW (cfg, load, arg_storage_to_load_membase (ainfo->pair_storage [part]));
+ load->inst_basereg = src->dreg;
+ load->inst_offset = part * sizeof (gpointer);
+
+ switch (ainfo->pair_storage [part]) {
+ case ArgInIReg:
+ load->dreg = mono_alloc_ireg (cfg);
+ break;
+ case ArgInDoubleSSEReg:
+ case ArgInFloatSSEReg:
+ load->dreg = mono_alloc_freg (cfg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ MONO_ADD_INS (cfg->cbb, load);
+
+ add_outarg_reg2 (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load);
+ }
+ } else {
+ if (size == 8) {
+ /* Can't use this for < 8 since it does an 8 byte memory load */
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
+ arg->inst_basereg = src->dreg;
+ arg->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, arg);
+ } else if (size <= 40) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 8));
+ mini_emit_memcpy2 (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
+ } else {
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
+ arg->inst_basereg = src->dreg;
+ arg->inst_offset = 0;
+ arg->inst_imm = size;
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ }
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
#define EMIT_COND_BRANCH(ins,cond,sign) \
if (ins->flags & MONO_INST_BRLABEL) { \
if (ins->inst_i0->inst_c0) { \
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
+ MonoInst *last_ins = ins->prev;
switch (ins->opcode) {
case OP_ADD_IMM:
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
- for (ins2 = mono_inst_list_next (&ins->node, &bb->ins_list); ins2;
- ins2 = mono_inst_list_next (&ins2->node, &bb->ins_list)) {
+ for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
switch (ins->opcode) {
case OP_ICONST:
case OP_I8CONST: {
- MonoInst *next;
-
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if (ins->inst_c0 == 0 && (!next ||
- (next && INST_IGNORES_CFLAGS (next->opcode)))) {
+ if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
ins->opcode = OP_LXOR;
ins->sreg1 = ins->dreg;
ins->sreg2 = ins->dreg;
* propagation). These instruction sequences are very common
* in the initlocals bblock.
*/
- for (ins2 = mono_inst_list_next (&ins->node, &bb->ins_list); ins2;
- ins2 = mono_inst_list_next (&ins2->node, &bb->ins_list)) {
+ for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if (((ins2->opcode == OP_STORE_MEMBASE_IMM) || (ins2->opcode == OP_STOREI4_MEMBASE_IMM) || (ins2->opcode == OP_STOREI8_MEMBASE_IMM) || (ins2->opcode == OP_STORE_MEMBASE_IMM)) && (ins2->inst_imm == 0)) {
ins2->opcode = store_membase_imm_to_store_membase_reg (ins2->opcode);
ins2->sreg1 = ins->dreg;
#define NEW_INS(cfg,ins,dest,op) do { \
MONO_INST_NEW ((cfg), (dest), (op)); \
(dest)->cil_code = (ins)->cil_code; \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
/*
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
- mono_decompose_op_imm (cfg, ins);
+ mono_decompose_op_imm (cfg, bb, ins);
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
if (!amd64_is_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ if (cfg->globalra)
+ temp->dreg = mono_alloc_ireg (cfg);
+ else
+ temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_COMPARE;
ins->sreg2 = temp->dreg;
}
if (!amd64_is_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ if (cfg->globalra)
+ temp->dreg = mono_alloc_ireg (cfg);
+ else
+ temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
ins->inst_indexreg = temp->dreg;
}
if (!amd64_is_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
- temp->dreg = mono_regstate_next_int (cfg->rs);
+ if (cfg->globalra)
+ temp->dreg = mono_alloc_ireg (cfg);
+ else
+ temp->dreg = mono_regstate_next_int (cfg->rs);
ins->opcode = OP_STOREI8_MEMBASE_REG;
ins->sreg1 = temp->dreg;
}
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE:
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
if (cinfo->ret.storage == ArgValuetypeInReg) {
MonoInst *loc = cfg->arch.vret_addr_loc;
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
amd64_movsxd_reg_membase (code, ins->dreg, ins->dreg, 0);
break;
case OP_LOADU4_MEM:
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
- amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
+ // FIXME: Decompose this earlier
+ if (cfg->new_ir) {
+ if (amd64_is_imm32 (ins->inst_imm))
+ amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
+ else {
+ amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
+ }
+ } else {
+ amd64_mov_reg_imm (code, ins->dreg, ins->inst_p0);
+ amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
+ }
break;
case OP_LOADU1_MEM:
amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, 4);
break;
case OP_LOADU1_MEMBASE:
- amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE);
+ /* The cpu zero extends the result into 64 bits */
+ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, FALSE, 4);
break;
case OP_LOADI1_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, FALSE);
break;
case OP_LOADU2_MEMBASE:
- amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE);
+ /* The cpu zero extends the result into 64 bits */
+ amd64_widen_membase_size (code, ins->dreg, ins->inst_basereg, ins->inst_offset, FALSE, TRUE, 4);
break;
case OP_LOADI2_MEMBASE:
amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
amd64_dec_reg_size (code, ins->dreg, 4);
break;
case OP_X86_MUL_REG_MEMBASE:
+ case OP_X86_MUL_MEMBASE_REG:
amd64_imul_reg_membase_size (code, ins->sreg1, ins->sreg2, ins->inst_offset, 4);
break;
case OP_AMD64_ICOMPARE_MEMBASE_REG:
EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
break;
+ case OP_CMOV_IEQ:
+ case OP_CMOV_IGE:
+ case OP_CMOV_IGT:
+ case OP_CMOV_ILE:
+ case OP_CMOV_ILT:
+ case OP_CMOV_INE_UN:
+ case OP_CMOV_IGE_UN:
+ case OP_CMOV_IGT_UN:
+ case OP_CMOV_ILE_UN:
+ case OP_CMOV_ILT_UN:
+ case OP_CMOV_LEQ:
+ case OP_CMOV_LGE:
+ case OP_CMOV_LGT:
+ case OP_CMOV_LLE:
+ case OP_CMOV_LLT:
+ case OP_CMOV_LNE_UN:
+ case OP_CMOV_LGE_UN:
+ case OP_CMOV_LGT_UN:
+ case OP_CMOV_LLE_UN:
+ case OP_CMOV_LLT_UN:
+ g_assert (ins->dreg == ins->sreg1);
+ /* This needs to operate on 64 bit values */
+ amd64_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
+ break;
+
case OP_LNOT:
amd64_not_reg (code, ins->sreg1);
break;
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
break;
+ case OP_JUMP_TABLE:
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ amd64_mov_reg_imm_size (code, ins->dreg, 0, 8);
+ break;
case OP_MOVE:
amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
break;
amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
break;
}
- case OP_JMP: {
+ case OP_JMP:
+ case OP_TAILCALL: {
/*
* Note: this 'frame destruction' logic is useful for tail calls, too.
* Keep in sync with the code in emit_epilog.
g_assert (!cfg->method->save_lmf);
- code = emit_load_volatile_arguments (cfg, code);
+ if (ins->opcode == OP_JMP)
+ code = emit_load_volatile_arguments (cfg, code);
if (cfg->arch.omit_fp) {
guint32 save_offset = 0;
amd64_mov_membase_reg (code, ins->sreg1, 0, AMD64_R11, 8);
break;
}
+ case OP_CALL:
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
- case OP_CALL:
call = (MonoCallInst*)ins;
/*
* The AMD64 ABI forces callers to know about varargs.
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
ins->sreg1 = AMD64_RAX;
}
+ if (call->method && ins->inst_offset < 0) {
+ gssize val;
+
+ /*
+ * This is a possible IMT call so save the IMT method in the proper
+ * register. We don't use the generic code in method-to-ir.c, because
+ * we need to disassemble this in get_vcall_slot_addr (), so we have to
+ * maintain control over the layout of the code.
+ * Also put the base reg in %rax to simplify find_imt_method ().
+ */
+ if (ins->sreg1 != AMD64_RAX) {
+ amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
+ ins->sreg1 = AMD64_RAX;
+ }
+ val = (gssize)(gpointer)call->method;
+
+ // FIXME: Generics sharing
+#if 0
+ if ((((guint64)val) >> 32) == 0)
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_REG, val, 4);
+ else
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_REG, val, 8);
+#endif
+ }
+
amd64_call_membase (code, ins->sreg1, ins->inst_offset);
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
ins->inst_c0 = code - cfg->native_code;
break;
case OP_BR:
+ //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
+ //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
+ //break;
if (ins->flags & MONO_INST_BRLABEL) {
if (ins->inst_i0->inst_c0) {
amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
max_epilog_size = get_max_epilog_size (cfg);
if (cfg->opt & MONO_OPT_BRANCH) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *ins;
bb->max_offset = max_offset;
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
stack_offset = ainfo->offset + ARGS_OFFSET;
+ if (cfg->globalra) {
+ /* All the other moves are done by the register allocator */
+ switch (ainfo->storage) {
+ case ArgInFloatSSEReg:
+ amd64_sse_cvtss2sd_reg_reg (code, ainfo->reg, ainfo->reg);
+ break;
+ case ArgValuetypeInReg:
+ for (quad = 0; quad < 2; quad ++) {
+ switch (ainfo->pair_storage [quad]) {
+ case ArgInIReg:
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+ break;
+ case ArgInFloatSSEReg:
+ amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ break;
+ case ArgInDoubleSSEReg:
+ amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+ break;
+ case ArgNone:
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ continue;
+ }
+
/* Save volatile arguments to the stack */
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
MonoBasicBlock *first_bb = cfg->bb_entry;
MonoInst *next;
- next = mono_inst_list_first (&first_bb->ins_list);
+ next = mono_bb_first_ins (first_bb);
if (!next && first_bb->next_bb) {
first_bb = first_bb->next_bb;
- next = mono_inst_list_first (&first_bb->ins_list);
+ next = mono_bb_first_ins (first_bb);
}
if (first_bb->in_count > 1)
}
if (match) {
- next = mono_inst_list_next (&next->node, &first_bb->ins_list);
+ next = next->next;
+ //next = mono_inst_list_next (&next->node, &first_bb->ins_list);
if (!next)
break;
}
/* call OFFSET(%rip) */
disp = *(guint32*)(code + 3);
return (gpointer*)(code + disp + 7);
- }
- else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
+ } else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2) && (amd64_modrm_reg (code [2]) == X86_ESP) && (amd64_modrm_mod (code [2]) == 0) && (amd64_modrm_rm (code [2]) == X86_ESP)) {
+ /* call *[r12+disp32] */
+ if (IS_REX (code [-1]))
+ rex = code [-1];
+ reg = AMD64_RSP;
+ disp = *(gint32*)(code + 3);
+ } else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
/* call *[reg+disp32] */
if (IS_REX (code [0]))
rex = code [0];
disp = *(gint32*)(code + 3);
/* R10 is clobbered by the IMT thunk code */
g_assert (reg != AMD64_R10);
- }
- else if (code [2] == 0xe8) {
+ } else if (code [2] == 0xe8) {
/* call <ADDR> */
return NULL;
- }
- else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
+ } else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1) && (amd64_modrm_reg (code [5]) == X86_ESP) && (amd64_modrm_mod (code [5]) == 0) && (amd64_modrm_rm (code [5]) == X86_ESP)) {
+ /* call *[r12+disp32] */
+ if (IS_REX (code [2]))
+ rex = code [2];
+ reg = AMD64_RSP;
+ disp = *(gint8*)(code + 6);
+ } else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
/* call *%reg */
return NULL;
- }
- else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
+ } else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
/* call *[reg+disp8] */
if (IS_REX (code [3]))
rex = code [3];
{
return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
}
+
+void
+mono_arch_emit_imt_argument (MonoCompile *cfg, MonoCallInst *call)
+{
+ /* Done by the implementation of the CALL_MEMBASE opcodes */
+}
#endif
MonoVTable*
return ins;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+ int opcode = 0;
+
+ if (cmethod->klass == mono_defaults.math_class) {
+ if (strcmp (cmethod->name, "Sin") == 0) {
+ opcode = OP_SIN;
+ } else if (strcmp (cmethod->name, "Cos") == 0) {
+ opcode = OP_COS;
+ } else if (strcmp (cmethod->name, "Sqrt") == 0) {
+ opcode = OP_SQRT;
+ } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
+ opcode = OP_ABS;
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = STACK_R8;
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ opcode = 0;
+ if (cfg->opt & MONO_OPT_CMOV) {
+ if (strcmp (cmethod->name, "Min") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMIN;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMIN_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMIN;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMIN_UN;
+ } else if (strcmp (cmethod->name, "Max") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMAX;
+ if (fsig->params [0]->type == MONO_TYPE_U4)
+ opcode = OP_IMAX_UN;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_LMAX;
+ else if (fsig->params [0]->type == MONO_TYPE_U8)
+ opcode = OP_LMAX_UN;
+ }
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+#if 0
+ /* OP_FREM is not IEEE compatible */
+ else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
+ MONO_INST_NEW (cfg, ins, OP_FREM);
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = args [1];
+ }
+#endif
+ }
+
+ /*
+ * Can't implement CompareExchange methods this way since they have
+ * three arguments.
+ */
+
+ return ins;
+}
+
gboolean
mono_arch_print_tree (MonoInst *tree, int arity)
{
#define MONO_MAX_FREGS AMD64_XMM_NREG
+#define MONO_ARCH_FP_RETURN_REG AMD64_XMM0
+
/* xmm15 is reserved for use by some opcodes */
#define MONO_ARCH_CALLEE_FREGS 0xef
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
*/
#define MONO_ARCH_RGCTX_REG AMD64_R10
#define MONO_ARCH_COMMON_VTABLE_TRAMPOLINE 1
+#define MONO_ARCH_HAVE_CMOV_OPS 1
#define MONO_ARCH_HAVE_NOTIFY_PENDING_EXC 1
#define MONO_ARCH_ENABLE_NORMALIZE_OPCODES 1
+#define MONO_ARCH_ENABLE_GLOBAL_RA 1
#define MONO_ARCH_AOT_SUPPORTED 1
+/* Used for optimization, not complete */
+#define MONO_ARCH_IS_OP_MEMBASE(opcode) ((opcode) == OP_X86_PUSH_MEMBASE)
+
+#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
+ MonoInst *inst; \
+ MONO_INST_NEW ((cfg), inst, OP_AMD64_ICOMPARE_MEMBASE_REG); \
+ inst->inst_basereg = array_reg; \
+ inst->inst_offset = offset; \
+ inst->sreg2 = index_reg; \
+ MONO_ADD_INS ((cfg)->cbb, inst); \
+ MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
+ } while (0)
+
void
mono_amd64_patch (unsigned char* code, gpointer target) MONO_INTERNAL;
#define MOV_LR_PC ((ARMCOND_AL << ARMCOND_SHIFT) | (1 << 24) | (0xa << 20) | (ARMREG_LR << 12) | ARMREG_PC)
#define DEBUG_IMT 0
+void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align);
+
const char*
mono_arch_regname (int reg)
{
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->inst_imm = cinfo->sig_cookie.offset;
arg->inst_left = sig_arg;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
if (is_virtual && i == 0) {
/* the argument will be attached to the call instrucion */
arg->inst_left = in;
arg->inst_right = (MonoInst*)call;
arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, we'll need to reverse them later */
+ arg->next = call->out_args;
+ call->out_args = arg;
if (ainfo->regtype == RegTypeGeneral) {
arg->backend.reg3 = ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
}
}
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
return call;
}
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in, *ins;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = calculate_sizes (sig, sig->pinvoke);
+
+ for (i = 0; i < n; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* FIXME: */
+ NOT_IMPLEMENTED;
+ }
+
+ in = call->args [i];
+
+ switch (ainfo->regtype) {
+ case RegTypeGeneral:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+ } else if (!t->byref && ((t->type == MONO_TYPE_R8) || (t->type == MONO_TYPE_R4))) {
+#ifndef MONO_ARCH_SOFT_FLOAT
+ int creg;
+#endif
+
+ if (ainfo->size == 4) {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ /* mono_emit_call_args () have already done the r8->r4 conversion */
+ /* The converted value is in an int vreg */
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+#endif
+ } else {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ MONO_INST_NEW (cfg, ins, OP_FGETLOW32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_FGETHIGH32);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg, FALSE);
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8 + 4));
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ainfo->reg + 1, FALSE);
+#endif
+ }
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ }
+ break;
+ case RegTypeStructByAddr:
+ NOT_IMPLEMENTED;
+#if 0
+ /* FIXME: where si the data allocated? */
+ arg->backend.reg3 = ainfo->reg;
+ call->used_iregs |= 1 << ainfo->reg;
+ g_assert_not_reached ();
+#endif
+ break;
+ case RegTypeStructByVal:
+ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
+ ins->opcode = OP_OUTARG_VT;
+ ins->sreg1 = in->dreg;
+ ins->klass = in->klass;
+ ins->inst_p0 = call;
+ ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+ MONO_ADD_INS (cfg->cbb, ins);
+ break;
+ case RegTypeBase:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
+ if (t->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ } else {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+#else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+#endif
+ }
+ } else {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, in->dreg);
+ }
+ break;
+ case RegTypeBaseGen:
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, (G_BYTE_ORDER == G_BIG_ENDIAN) ? in->dreg + 1 : in->dreg + 2);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = G_BYTE_ORDER == G_BIG_ENDIAN ? in->dreg + 2 : in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ARMREG_R3, FALSE);
+ } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
+ int creg;
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+ g_assert_not_reached ();
+#endif
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ARMREG_SP, (cfg->param_area - 8), in->dreg);
+ creg = mono_alloc_ireg (cfg);
+ mono_call_inst_add_outarg_reg (cfg, call, creg, ARMREG_R3, FALSE);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 8));
+ creg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOAD_MEMBASE, creg, ARMREG_SP, (cfg->param_area - 4));
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ARMREG_SP, ainfo->offset, creg);
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ } else {
+ g_assert_not_reached ();
+ }
+ break;
+ case RegTypeFP: {
+ /* FIXME: */
+ NOT_IMPLEMENTED;
+#if 0
+ arg->backend.reg3 = ainfo->reg;
+ /* FP args are passed in int regs */
+ call->used_iregs |= 1 << ainfo->reg;
+ if (ainfo->size == 8) {
+ arg->opcode = OP_OUTARG_R8;
+ call->used_iregs |= 1 << (ainfo->reg + 1);
+ } else {
+ arg->opcode = OP_OUTARG_R4;
+ }
+#endif
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ break;
+ }
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ MonoInst *vtarg;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = ins->inst_p1;
+ int ovf_size = ainfo->vtsize;
+ int doffset = ainfo->offset;
+ int i, soffset, dreg;
+
+ soffset = 0;
+ for (i = 0; i < ainfo->size; ++i) {
+ dreg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
+ soffset += sizeof (gpointer);
+ }
+ //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
+ if (ovf_size != 0)
+ mini_emit_memcpy2 (cfg, ARMREG_SP, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETLRET);
+ ins->sreg1 = val->dreg + 1;
+ ins->sreg2 = val->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+#ifdef MONO_ARCH_SOFT_FLOAT
+ if (ret->type == MONO_TYPE_R8) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETFRET);
+ ins->dreg = cfg->ret->dreg;
+ ins->sreg1 = val->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+ if (ret->type == MONO_TYPE_R4) {
+ /* Already converted to an int in method_to_ir () */
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+#endif
+ }
+
+ /* FIXME: */
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
+gboolean
+mono_arch_is_inst_imm (gint64 imm)
+{
+ return TRUE;
+}
+
/*
* Allow tracing to work with this interface (with an optional argument)
*/
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
-
switch (ins->opcode) {
case OP_MUL_IMM:
+ case OP_IMUL_IMM:
+ /* Already done by an arch-independent pass */
+ if (cfg->new_ir)
+ break;
+
/* remove unnecessary multiplication with 1 */
if (ins->inst_imm == 1) {
if (ins->dreg != ins->sreg1) {
}
break;
}
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
}
/*
ARMCOND_LO
};
-
-#define NEW_INS(cfg,ins,dest,op) do { \
- (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
- (dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+#define NEW_INS(cfg,dest,op) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ mono_bblock_insert_before_ins (bb, ins, (dest)); \
} while (0)
static int
return OP_IAND;
case OP_COMPARE_IMM:
return OP_COMPARE;
+ case OP_ICOMPARE_IMM:
+ return OP_ICOMPARE;
case OP_ADDCC_IMM:
return OP_ADDCC;
case OP_ADC_IMM:
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
+ MonoInst *ins, *temp, *last_ins = NULL;
int rot_amount, imm8, low_imm;
- MonoInst *ins, *temp;
/* setup the virtual reg allocator */
if (bb->max_vreg > cfg->rs->next_vreg)
cfg->rs->next_vreg = bb->max_vreg;
MONO_BB_FOR_EACH_INS (bb, ins) {
- MonoInst *last_ins;
-
loop_start:
- last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
switch (ins->opcode) {
case OP_ADD_IMM:
case OP_SUB_IMM:
case OP_AND_IMM:
case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM:
case OP_ADDCC_IMM:
case OP_ADC_IMM:
case OP_SUBCC_IMM:
case OP_SBB_IMM:
case OP_OR_IMM:
case OP_XOR_IMM:
+ case OP_IADD_IMM:
+ case OP_ISUB_IMM:
+ case OP_IAND_IMM:
+ case OP_IADC_IMM:
+ case OP_ISBB_IMM:
+ case OP_IOR_IMM:
+ case OP_IXOR_IMM:
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount)) < 0) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
- ins->opcode = map_to_reg_reg_op (ins->opcode);
+ if (cfg->new_ir)
+ ins->opcode = mono_op_imm_to_op (ins->opcode);
+ else
+ ins->opcode = map_to_reg_reg_op (ins->opcode);
}
break;
case OP_MUL_IMM:
+ case OP_IMUL_IMM:
if (ins->inst_imm == 1) {
ins->opcode = OP_MOVE;
break;
ins->inst_imm = imm8;
break;
}
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = OP_IMUL;
break;
+ case OP_LOCALLOC_IMM:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->sreg1 = temp->dreg;
+ ins->opcode = OP_LOCALLOC;
+ break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
*/
if (arm_is_imm12 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_LOADI1_MEMBASE:
if (arm_is_imm8 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~0x1ff, &rot_amount)) >= 0) {
- NEW_INS (cfg, ins, temp, OP_ADD_IMM);
+ NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_basereg;
temp->dreg = mono_regstate_next_int (cfg->rs);
case OP_STOREI1_MEMBASE_REG:
if (arm_is_imm12 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_STOREI2_MEMBASE_REG:
if (arm_is_imm8 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
break;
low_imm = ins->inst_offset & 0x1ff;
if ((imm8 = mono_arm_is_rotated_imm8 (ins->inst_offset & ~ 0x1ff, &rot_amount)) >= 0 && arm_is_fpimm8 (low_imm)) {
- NEW_INS (cfg, ins, temp, OP_ADD_IMM);
+ NEW_INS (cfg, temp, OP_ADD_IMM);
temp->inst_imm = ins->inst_offset & ~0x1ff;
temp->sreg1 = ins->inst_destbasereg;
temp->dreg = mono_regstate_next_int (cfg->rs);
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
+ last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
+ case OP_FCOMPARE: {
+ gboolean swap = FALSE;
+ int reg;
+
+ /* Some fp compares require swapped operands */
+ g_assert (ins->next);
+ switch (ins->next->opcode) {
+ case OP_FBGT:
+ ins->next->opcode = OP_FBLT;
+ swap = TRUE;
+ break;
+ case OP_FBGT_UN:
+ ins->next->opcode = OP_FBLT_UN;
+ swap = TRUE;
+ break;
+ case OP_FBLE:
+ ins->next->opcode = OP_FBGE;
+ swap = TRUE;
+ break;
+ case OP_FBLE_UN:
+ ins->next->opcode = OP_FBGE_UN;
+ swap = TRUE;
+ break;
+ default:
+ break;
+ }
+ if (swap) {
+ reg = ins->sreg1;
+ ins->sreg1 = ins->sreg2;
+ ins->sreg2 = reg;
+ }
+ break;
+ }
}
+
+ last_ins = ins;
}
+ bb->last_ins = last_ins;
bb->max_vreg = cfg->rs->next_vreg;
+
}
static guchar*
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
int imm8, rot_amount;
ARM_SHR_IMM (code, ins->dreg, ins->dreg, 16);
break;
case OP_COMPARE:
+ case OP_ICOMPARE:
ARM_CMP_REG_REG (code, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_CMP_REG_IMM (code, ins->sreg1, imm8, rot_amount);
(gpointer)"mono_break");
code = emit_call_seq (cfg, code);
break;
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
+ break;
case OP_ADDCC:
+ case OP_IADDCC:
ARM_ADDS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IADD:
ARM_ADD_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
+ case OP_IADC:
ARM_ADCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADDCC_IMM:
ARM_ADDS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADD_IMM:
+ case OP_IADD_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADD_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_ADC_IMM:
+ case OP_IADC_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ADCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
//EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUBCC:
+ case OP_ISUBCC:
ARM_SUBS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUBCC_IMM:
ARM_SUB_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SBB:
+ case OP_ISBB:
ARM_SBCS_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SUB_IMM:
+ case OP_ISUB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SUB_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
break;
case OP_SBB_IMM:
+ case OP_ISBB_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_SBCS_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
ARM_AND_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_AND_IMM:
+ case OP_IAND_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_AND_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
ARM_ORR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
+ case OP_IOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_ORR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
ARM_EOR_REG_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_XOR_IMM:
+ case OP_IXOR_IMM:
imm8 = mono_arm_is_rotated_imm8 (ins->inst_imm, &rot_amount);
g_assert (imm8 >= 0);
ARM_EOR_REG_IMM (code, ins->dreg, ins->sreg1, imm8, rot_amount);
ARM_SHL_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHL_IMM:
+ case OP_ISHL_IMM:
if (ins->inst_imm)
ARM_SHL_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_SAR_REG (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHR_IMM:
+ case OP_ISHR_IMM:
if (ins->inst_imm)
ARM_SAR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
ARM_MOV_REG_REG (code, ins->dreg, ins->sreg1);
break;
case OP_SHR_UN_IMM:
+ case OP_ISHR_UN_IMM:
if (ins->inst_imm)
ARM_SHR_IMM (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
else if (ins->dreg != ins->sreg1)
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
code = emit_call_reg (code, ins->sreg1);
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
g_assert (arm_is_imm12 (ins->inst_offset));
code = emit_call_seq (cfg, code);
break;
}
- case OP_START_HANDLER:
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_STR_IMM (code, ARMREG_LR, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ case OP_START_HANDLER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_STR_IMM (code, ARMREG_LR, spvar->inst_basereg, spvar->inst_offset);
} else {
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_STR_REG_REG (code, ARMREG_LR, ins->inst_left->inst_basereg, ARMREG_IP);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_STR_REG_REG (code, ARMREG_LR, spvar->inst_basereg, ARMREG_IP);
}
break;
- case OP_ENDFILTER:
+ }
+ case OP_ENDFILTER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
if (ins->sreg1 != ARMREG_R0)
ARM_MOV_REG_REG (code, ARMREG_R0, ins->sreg1);
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
- g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
+ g_assert (ARMREG_IP != spvar->inst_basereg);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
- case OP_ENDFINALLY:
- if (arm_is_imm12 (ins->inst_left->inst_offset)) {
- ARM_LDR_IMM (code, ARMREG_IP, ins->inst_left->inst_basereg, ins->inst_left->inst_offset);
+ }
+ case OP_ENDFINALLY: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
+ if (arm_is_imm12 (spvar->inst_offset)) {
+ ARM_LDR_IMM (code, ARMREG_IP, spvar->inst_basereg, spvar->inst_offset);
} else {
- g_assert (ARMREG_IP != ins->inst_left->inst_basereg);
- code = mono_arm_emit_load_imm (code, ARMREG_IP, ins->inst_left->inst_offset);
- ARM_LDR_REG_REG (code, ARMREG_IP, ins->inst_left->inst_basereg, ARMREG_IP);
+ g_assert (ARMREG_IP != spvar->inst_basereg);
+ code = mono_arm_emit_load_imm (code, ARMREG_IP, spvar->inst_offset);
+ ARM_LDR_REG_REG (code, ARMREG_IP, spvar->inst_basereg, ARMREG_IP);
}
ARM_MOV_REG_REG (code, ARMREG_PC, ARMREG_IP);
break;
+ }
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
ARM_BL (code, 0);
* After follows the data.
* FIXME: add aot support.
*/
+ if (cfg->new_ir)
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_SWITCH, ins->inst_p0);
max_len += 4 * GPOINTER_TO_INT (ins->klass);
if (offset > (cfg->code_size - max_len - 16)) {
cfg->code_size += max_len;
code += 4 * GPOINTER_TO_INT (ins->klass);
break;
case OP_CEQ:
+ case OP_ICEQ:
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 0, ARMCOND_NE);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_EQ);
break;
case OP_CLT:
+ case OP_ICLT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LT);
break;
case OP_CLT_UN:
+ case OP_ICLT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_LO);
break;
case OP_CGT:
+ case OP_ICGT:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_GT);
break;
case OP_CGT_UN:
+ case OP_ICGT_UN:
ARM_MOV_REG_IMM8 (code, ins->dreg, 0);
ARM_MOV_REG_IMM8_COND (code, ins->dreg, 1, ARMCOND_HI);
break;
case OP_COND_EXC_LE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
break;
+ case OP_COND_EXC_IEQ:
+ case OP_COND_EXC_INE_UN:
+ case OP_COND_EXC_ILT:
+ case OP_COND_EXC_ILT_UN:
+ case OP_COND_EXC_IGT:
+ case OP_COND_EXC_IGT_UN:
+ case OP_COND_EXC_IGE:
+ case OP_COND_EXC_IGE_UN:
+ case OP_COND_EXC_ILE:
+ case OP_COND_EXC_ILE_UN:
+ EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
+ break;
case OP_COND_EXC_C:
case OP_COND_EXC_OV:
case OP_COND_EXC_NC:
case OP_COND_EXC_NO:
- g_assert_not_reached ();
+ case OP_COND_EXC_IC:
+ case OP_COND_EXC_IOV:
+ case OP_COND_EXC_INC:
+ case OP_COND_EXC_INO:
+ /* FIXME: */
break;
case OP_IBEQ:
case OP_IBNE_UN:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
- case OP_LCONV_TO_OVF_I: {
+ case OP_LCONV_TO_OVF_I:
+ case OP_LCONV_TO_OVF_I4_2: {
#if ARM_PORT
guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
// Check if its negative
g_assert_not_reached ();
break;
case OP_FCOMPARE:
- /* each fp compare op needs to do its own */
- g_assert_not_reached ();
- //ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#ifdef ARM_FPU_FPA
+ ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
+#elif defined(ARM_FPU_VFP)
+ ARM_CMPD (code, ins->sreg1, ins->sreg2);
+#endif
break;
case OP_FCEQ:
#ifdef ARM_FPU_FPA
* V Unordered ARMCOND_VS
*/
case OP_FBEQ:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH (ins, OP_IBEQ - OP_IBEQ);
break;
case OP_FBNE_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH (ins, OP_IBNE_UN - OP_IBEQ);
break;
case OP_FBLT:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBLT_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set */
break;
case OP_FBGT:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
- break;
case OP_FBGT_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_MI); /* N set, swapped args */
+ case OP_FBLE:
+ case OP_FBLE_UN:
+ g_assert_not_reached ();
break;
case OP_FBGE:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS);
break;
case OP_FBGE_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg1, ins->sreg2);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg1, ins->sreg2);
-#endif
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE);
break;
- case OP_FBLE:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_CS); /* swapped */
- break;
- case OP_FBLE_UN:
-#ifdef ARM_FPU_FPA
- ARM_FCMP (code, ARM_FPA_CMF, ins->sreg2, ins->sreg1);
-#elif defined(ARM_FPU_VFP)
- ARM_CMPD (code, ins->sreg2, ins->sreg1);
-#endif
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_VS); /* V set */
- EMIT_COND_BRANCH_FLAGS (ins, ARMCOND_GE); /* swapped */
- break;
+
case OP_CKFINITE: {
#ifdef ARM_FPU_FPA
if (ins->dreg != ins->sreg1)
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *ins;
+ MonoInst *ins = bb->code;
bb->max_offset = max_offset;
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
return NULL;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ /* FIXME: */
+ return NULL;
+}
+
gboolean
mono_arch_print_tree (MonoInst *tree, int arity)
{
#define MONO_ARCH_INST_SREG2_MASK(ins) (0)
#ifdef MONO_ARCH_SOFT_FLOAT
-#define MONO_ARCH_INST_FIXED_REG(desc) (((desc) == 'l' || (desc == 'f') || (desc == 'g'))? ARM_LSW_REG: -1)
+#define MONO_ARCH_INST_FIXED_REG(desc) (((desc) == 'l' || (desc == 'f') || (desc == 'g')) ? ARM_LSW_REG: (((desc) == 'a') ? ARMREG_R0 : -1))
#define MONO_ARCH_INST_IS_REGPAIR(desc) ((desc) == 'l' || (desc) == 'L' || (desc) == 'f' || (desc) == 'g')
#define MONO_ARCH_INST_IS_FLOAT(desc) (FALSE)
#else
-#define MONO_ARCH_INST_FIXED_REG(desc) (((desc) == 'l')? ARM_LSW_REG: -1)
+#define MONO_ARCH_INST_FIXED_REG(desc) (((desc) == 'l')? ARM_LSW_REG: (((desc) == 'a') ? ARMREG_R0 : -1))
#define MONO_ARCH_INST_IS_REGPAIR(desc) (desc == 'l' || desc == 'L')
#define MONO_ARCH_INST_IS_FLOAT(desc) ((desc == 'f') || (desc == 'g'))
#endif
#define DEBUG(a) MINI_DEBUG(cfg->verbose_level, 2, a;)
-#define use_fpstack MONO_ARCH_USE_FPSTACK
-
static inline GSList*
g_slist_append_mempool (MonoMemPool *mp, GSList *list, gpointer data)
{
}
memset (rs->isymbolic, 0, MONO_MAX_IREGS * sizeof (rs->isymbolic [0]));
- memset (rs->vassign, -1, sizeof (rs->vassign [0]) * rs->next_vreg);
memset (rs->fsymbolic, 0, MONO_MAX_FREGS * sizeof (rs->fsymbolic [0]));
}
* spill variable if necessary.
*/
static inline int
-mono_spillvar_offset_int (MonoCompile *cfg, int spillvar)
+mono_spillvar_offset (MonoCompile *cfg, int spillvar, gboolean fp)
{
MonoSpillInfo *info;
- if (G_UNLIKELY (spillvar >= cfg->spill_info_len)) {
- resize_spill_info (cfg, FALSE);
- g_assert (spillvar < cfg->spill_info_len);
+#if defined (__mips__)
+ g_assert_not_reached();
+#endif
+ if (G_UNLIKELY (spillvar >= (fp ? cfg->spill_info_float_len : cfg->spill_info_len))) {
+ while (spillvar >= (fp ? cfg->spill_info_float_len : cfg->spill_info_len))
+ resize_spill_info (cfg, fp);
}
- info = &cfg->spill_info [spillvar];
+ /*
+ * Allocate separate spill slots for fp/non-fp variables since most processors prefer it.
+ */
+ info = fp ? &cfg->spill_info_float [spillvar] : &cfg->spill_info [spillvar];
if (info->offset == -1) {
cfg->stack_offset += sizeof (gpointer) - 1;
cfg->stack_offset &= ~(sizeof (gpointer) - 1);
if (cfg->flags & MONO_CFG_HAS_SPILLUP) {
- info->offset = cfg->stack_offset;
- cfg->stack_offset += sizeof (gpointer);
- } else {
- cfg->stack_offset += sizeof (gpointer);
- info->offset = - cfg->stack_offset;
- }
- }
-
- return info->offset;
-}
-
-/*
- * returns the offset used by spillvar. It allocates a new
- * spill float variable if necessary.
- * (same as mono_spillvar_offset but for float)
- */
-static inline int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
-{
- MonoSpillInfo *info;
-
- if (G_UNLIKELY (spillvar >= cfg->spill_info_float_len)) {
- resize_spill_info (cfg, TRUE);
- g_assert (spillvar < cfg->spill_info_float_len);
- }
-
- info = &cfg->spill_info_float [spillvar];
- if (info->offset == -1) {
- cfg->stack_offset += sizeof (double) - 1;
- cfg->stack_offset &= ~(sizeof (double) - 1);
-
- if (cfg->flags & MONO_CFG_HAS_SPILLUP) {
- info->offset = cfg->stack_offset;
- cfg->stack_offset += sizeof (double);
+ if (fp) {
+ cfg->stack_offset += 7;
+ cfg->stack_offset &= ~7;
+ info->offset = cfg->stack_offset;
+ cfg->stack_offset += sizeof (double);
+ } else {
+ cfg->stack_offset += sizeof (gpointer) - 1;
+ cfg->stack_offset &= ~(sizeof (gpointer) - 1);
+ info->offset = cfg->stack_offset;
+ cfg->stack_offset += sizeof (gpointer);
+ }
} else {
- cfg->stack_offset += sizeof (double);
- info->offset = - cfg->stack_offset;
+ if (fp) {
+ cfg->stack_offset += sizeof (double) - 1;
+ cfg->stack_offset &= ~(sizeof (double) - 1);
+ cfg->stack_offset += sizeof (double);
+ info->offset = - cfg->stack_offset;
+ } else {
+ cfg->stack_offset += sizeof (gpointer) - 1;
+ cfg->stack_offset &= ~(sizeof (gpointer) - 1);
+ cfg->stack_offset += sizeof (gpointer);
+ info->offset = - cfg->stack_offset;
+ }
}
}
return info->offset;
}
-static inline int
-mono_spillvar_offset (MonoCompile *cfg, int spillvar, gboolean fp)
-{
- if (fp)
- return mono_spillvar_offset_float (cfg, spillvar);
- else
- return mono_spillvar_offset_int (cfg, spillvar);
-}
-
-#if MONO_ARCH_USE_FPSTACK
-
-/*
- * Creates a store for spilled floating point items
- */
-static MonoInst*
-create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
- MonoInst *store;
- MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
- store->sreg1 = reg;
- store->inst_destbasereg = cfg->frame_reg;
- store->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (printf ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
- return store;
-}
-
-/*
- * Creates a load for spilled floating point items
- */
-static MonoInst*
-create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
- MonoInst *load;
- MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
- load->dreg = reg;
- load->inst_basereg = cfg->frame_reg;
- load->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
- DEBUG (printf ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
- return load;
-}
-
-#endif /* MONO_ARCH_USE_FPSTACK */
-
#define regmask(reg) (((regmask_t)1) << (reg))
#define is_hard_ireg(r) ((r) >= 0 && (r) < MONO_MAX_IREGS)
#define freg_is_freeable(r) is_hard_freg ((r))
#define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
-#define is_hard_reg(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
+#define is_hard_reg(r,fp) (G_UNLIKELY (fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
#define is_soft_reg(r,fp) (!is_hard_reg((r),(fp)))
#ifdef MONO_ARCH_INST_IS_FLOAT
#define reg_is_fp(desc) (MONO_ARCH_INST_IS_FLOAT (desc))
+#define dreg_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_DEST]))
+#define sreg1_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC1]))
+#define sreg2_is_fp(spec) (MONO_ARCH_INST_IS_FLOAT (spec [MONO_INST_SRC2]))
#else
#define reg_is_fp(desc) ((desc) == 'f')
+#define sreg1_is_fp(spec) (G_UNLIKELY (spec [MONO_INST_SRC1] == 'f'))
+#define sreg2_is_fp(spec) (G_UNLIKELY (spec [MONO_INST_SRC2] == 'f'))
+#define dreg_is_fp(spec) (G_UNLIKELY (spec [MONO_INST_DEST] == 'f'))
#endif
-#define dreg_is_fp(spec) (reg_is_fp (spec [MONO_INST_DEST]))
-#define sreg1_is_fp(spec) (reg_is_fp (spec [MONO_INST_SRC1]))
-#define sreg2_is_fp(spec) (reg_is_fp (spec [MONO_INST_SRC2]))
-
#define sreg1_is_fp_ins(ins) (sreg1_is_fp (ins_get_spec ((ins)->opcode)))
#define sreg2_is_fp_ins(ins) (sreg2_is_fp (ins_get_spec ((ins)->opcode)))
#define dreg_is_fp_ins(ins) (dreg_is_fp (ins_get_spec ((ins)->opcode)))
/* Not (yet) used */
//int last_use;
//int prev_use;
-#if MONO_ARCH_USE_FPSTACK
- int flags; /* used to track fp spill/load */
-#endif
regmask_t preferred_mask; /* the hreg where the register should be allocated, or 0 */
} RegTrack;
printf ("\t%-2d %s", i, mono_inst_name (ins->opcode));
else
printf (" %s", mono_inst_name (ins->opcode));
- if (!spec)
- g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
+ if (spec == MONO_ARCH_CPU_SPEC) {
+ /* This is a lowered opcode */
+ if (ins->dreg != -1)
+ printf (" R%d <-", ins->dreg);
+ if (ins->sreg1 != -1)
+ printf (" R%d", ins->sreg1);
+ if (ins->sreg2 != -1)
+ printf (" R%d", ins->sreg2);
+
+ switch (ins->opcode) {
+ case OP_LBNE_UN:
+ case OP_LBEQ:
+ case OP_LBLT:
+ case OP_LBLT_UN:
+ case OP_LBGT:
+ case OP_LBGT_UN:
+ case OP_LBGE:
+ case OP_LBGE_UN:
+ case OP_LBLE:
+ case OP_LBLE_UN:
+ if (!(ins->flags & MONO_INST_BRLABEL)) {
+ if (!ins->inst_false_bb)
+ printf (" [B%d]", ins->inst_true_bb->block_num);
+ else
+ printf (" [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num);
+ }
+ break;
+ case OP_PHI:
+ case OP_FPHI: {
+ int i;
+ printf (" [%d (", (int)ins->inst_c0);
+ for (i = 0; i < ins->inst_phi_args [0]; i++) {
+ if (i)
+ printf (", ");
+ printf ("R%d", ins->inst_phi_args [i + 1]);
+ }
+ printf (")]");
+ break;
+ }
+ case OP_LDADDR:
+ case OP_OUTARG_VTRETADDR:
+ printf (" R%d", ((MonoInst*)ins->inst_p0)->dreg);
+ break;
+ case OP_REGOFFSET:
+ printf (" + 0x%lx", (long)ins->inst_offset);
+ default:
+ break;
+ }
+
+ printf ("\n");
+ //g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
+ return;
+ }
if (spec [MONO_INST_DEST]) {
gboolean fp = dreg_is_fp_ins (ins);
}
if (spec [MONO_INST_SRC1]) {
gboolean fp = (spec [MONO_INST_SRC1] == 'f');
- if (is_soft_reg (ins->sreg1, fp))
- printf (" R%d", ins->sreg1);
- else if (spec [MONO_INST_SRC1] == 'b')
+ if (is_soft_reg (ins->sreg1, fp)) {
+ if (spec [MONO_INST_SRC1] == 'b')
+ printf (" [R%d + 0x%lx]", ins->sreg1, (long)ins->inst_offset);
+ else
+ printf (" R%d", ins->sreg1);
+ } else if (spec [MONO_INST_SRC1] == 'b')
printf (" [%s + 0x%lx]", mono_arch_regname (ins->sreg1), (long)ins->inst_offset);
else
printf (" %s", mono_regname_full (ins->sreg1, fp));
else
printf (" %s", mono_regname_full (ins->sreg2, fp));
}
+
+ switch (ins->opcode) {
+ case OP_ICONST:
+ printf (" [%d]", (int)ins->inst_c0);
+ break;
+#if defined(__i386__) || defined(__x86_64__)
+ case OP_X86_PUSH_IMM:
+#endif
+ case OP_ICOMPARE_IMM:
+ case OP_COMPARE_IMM:
+ case OP_IADD_IMM:
+ case OP_ISUB_IMM:
+ case OP_IAND_IMM:
+ case OP_IOR_IMM:
+ case OP_IXOR_IMM:
+ printf (" [%d]", (int)ins->inst_imm);
+ break;
+ case OP_ADD_IMM:
+ case OP_LADD_IMM:
+ printf (" [%d]", (int)(gssize)ins->inst_p1);
+ break;
+ case OP_I8CONST:
+ printf (" [%lld]", (long long)ins->inst_l);
+ break;
+ case OP_R8CONST:
+ printf (" [%f]", *(double*)ins->inst_p0);
+ break;
+ case OP_R4CONST:
+ printf (" [%f]", *(float*)ins->inst_p0);
+ break;
+ case CEE_CALL:
+ case CEE_CALLVIRT:
+ case OP_CALL:
+ case OP_CALL_MEMBASE:
+ case OP_CALL_REG:
+ case OP_FCALL:
+ case OP_FCALLVIRT:
+ case OP_LCALL:
+ case OP_LCALLVIRT:
+ case OP_VCALL:
+ case OP_VCALLVIRT:
+ case OP_VCALL_REG:
+ case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE:
+ case OP_VOIDCALL:
+ case OP_VOIDCALLVIRT: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ GSList *list;
+
+ if (ins->opcode == OP_VCALL || ins->opcode == OP_VCALL_REG || ins->opcode == OP_VCALL_MEMBASE) {
+ /*
+ * These are lowered opcodes, but they are in the .md files since the old
+ * JIT passes them to backends.
+ */
+ if (ins->dreg != -1)
+ printf (" R%d <-", ins->dreg);
+ }
+
+ if (call->method) {
+ char *full_name = mono_method_full_name (call->method, TRUE);
+ printf (" [%s]", full_name);
+ g_free (full_name);
+ } else if (call->fptr) {
+ MonoJitICallInfo *info = mono_find_jit_icall_by_addr (call->fptr);
+ if (info)
+ printf (" [%s]", info->name);
+ }
+
+ list = call->out_ireg_args;
+ while (list) {
+ guint32 regpair;
+ int reg, hreg;
+
+ regpair = (guint32)(gssize)(list->data);
+ hreg = regpair >> 24;
+ reg = regpair & 0xffffff;
+
+ printf (" [%s <- R%d]", mono_arch_regname (hreg), reg);
+
+ list = g_slist_next (list);
+ }
+ break;
+ }
+ case OP_BR:
+ case OP_CALL_HANDLER:
+ printf (" [B%d]", ins->inst_target_bb->block_num);
+ break;
+ case CEE_BNE_UN:
+ case CEE_BEQ:
+ case CEE_BLT:
+ case CEE_BLT_UN:
+ case CEE_BGT:
+ case CEE_BGT_UN:
+ case CEE_BGE:
+ case CEE_BGE_UN:
+ case CEE_BLE:
+ case CEE_BLE_UN:
+ case OP_IBNE_UN:
+ case OP_IBEQ:
+ case OP_IBLT:
+ case OP_IBLT_UN:
+ case OP_IBGT:
+ case OP_IBGT_UN:
+ case OP_IBGE:
+ case OP_IBGE_UN:
+ case OP_IBLE:
+ case OP_IBLE_UN:
+ case OP_LBNE_UN:
+ case OP_LBEQ:
+ case OP_LBLT:
+ case OP_LBLT_UN:
+ case OP_LBGT:
+ case OP_LBGT_UN:
+ case OP_LBGE:
+ case OP_LBGE_UN:
+ case OP_LBLE:
+ case OP_LBLE_UN:
+ if (!(ins->flags & MONO_INST_BRLABEL)) {
+ if (!ins->inst_false_bb)
+ printf (" [B%d]", ins->inst_true_bb->block_num);
+ else
+ printf (" [B%dB%d]", ins->inst_true_bb->block_num, ins->inst_false_bb->block_num);
+ }
+ break;
+ default:
+ break;
+ }
+
if (spec [MONO_INST_CLOB])
printf (" clobbers: %c", spec [MONO_INST_CLOB]);
printf ("\n");
}
static inline void
-insert_before_ins (MonoInst *ins, MonoInst* to_insert)
+insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst* to_insert)
{
- MONO_INST_LIST_ADD_TAIL (&to_insert->node, &ins->node);
+ /*
+ * If this function is called multiple times, the new instructions are inserted
+ * in the proper order.
+ */
+ mono_bblock_insert_before_ins (bb, ins, to_insert);
+}
+
+static inline void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst **last, MonoInst* to_insert)
+{
+ /*
+ * If this function is called multiple times, the new instructions are inserted in
+ * proper order.
+ */
+ mono_bblock_insert_after_ins (bb, *last, to_insert);
+
+ *last = to_insert;
}
/*
* Force the spilling of the variable in the symbolic register 'reg'.
*/
static int
-get_register_force_spilling (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, int reg, gboolean fp)
+get_register_force_spilling (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int reg, gboolean fp)
{
MonoInst *load;
int i, sel, spill;
symbolic = rs->fsymbolic;
else
symbolic = rs->isymbolic;
-
+
sel = rs->vassign [reg];
+
/*i = rs->isymbolic [sel];
g_assert (i == reg);*/
i = reg;
-
- /* vassign contains 16 bit values */
- g_assert (cfg->spill_count < (1 << 15));
-
spill = ++cfg->spill_count;
rs->vassign [i] = -spill - 1;
if (fp)
load->dreg = sel;
load->inst_basereg = cfg->frame_reg;
load->inst_offset = mono_spillvar_offset (cfg, spill, fp);
- MONO_INST_LIST_ADD_TAIL (&load->node, next);
+ insert_after_ins (bb, ins, last, load);
DEBUG (printf ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, fp)));
if (fp)
i = mono_regstate_alloc_float (rs, regmask (sel));
#endif
static int
-get_register_spilling (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, regmask_t regmask, int reg, gboolean fp)
+get_register_spilling (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t regmask, int reg, gboolean fp)
{
MonoInst *load;
int i, sel, spill;
DEBUG (printf ("\t\tavailable regmask: 0x%08" G_GUINT64_FORMAT "\n", (guint64)regmask));
g_assert (regmask); /* need at least a register we can free */
sel = 0;
-
- /* vassign contains 16 bit values */
- g_assert (cfg->spill_count < (1 << 15));
-
/* we should track prev_use and spill the register that's farther */
if (fp) {
for (i = 0; i < MONO_MAX_FREGS; ++i) {
if (regmask & (regmask (i))) {
sel = i;
- DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fsymbolic [sel]));
+ DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), rs->fsymbolic [sel]));
break;
}
}
for (i = 0; i < MONO_MAX_IREGS; ++i) {
if (regmask & (regmask (i))) {
sel = i;
- DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->isymbolic [sel]));
+ DEBUG (printf ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), rs->isymbolic [sel]));
break;
}
}
load->dreg = sel;
load->inst_basereg = cfg->frame_reg;
load->inst_offset = mono_spillvar_offset (cfg, spill, fp);
- MONO_INST_LIST_ADD_TAIL (&load->node, next);
+ insert_after_ins (bb, ins, last, load);
DEBUG (printf ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_regname_full (sel, fp)));
if (fp)
i = mono_regstate_alloc_float (rs, regmask (sel));
}
static void
-free_up_ireg (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, int hreg)
+free_up_ireg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int hreg)
{
if (!(cfg->rs->ifree_mask & (regmask (hreg)))) {
DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->isymbolic [hreg]));
- get_register_force_spilling (cfg, ins, next, cfg->rs->isymbolic [hreg], FALSE);
+ get_register_force_spilling (cfg, bb, last, ins, cfg->rs->isymbolic [hreg], FALSE);
mono_regstate_free_int (cfg->rs, hreg);
}
}
static void
-free_up_reg (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, int hreg, gboolean fp)
+free_up_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, int hreg, gboolean fp)
{
if (fp) {
if (!(cfg->rs->ffree_mask & (regmask (hreg)))) {
DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->isymbolic [hreg]));
- get_register_force_spilling (cfg, ins, next, cfg->rs->isymbolic [hreg], fp);
+ get_register_force_spilling (cfg, bb, last, ins, cfg->rs->isymbolic [hreg], fp);
mono_regstate_free_float (cfg->rs, hreg);
}
- } else {
+ }
+ else {
if (!(cfg->rs->ifree_mask & (regmask (hreg)))) {
DEBUG (printf ("\tforced spill of R%d\n", cfg->rs->isymbolic [hreg]));
- get_register_force_spilling (cfg, ins, next, cfg->rs->isymbolic [hreg], fp);
+ get_register_force_spilling (cfg, bb, last, ins, cfg->rs->isymbolic [hreg], fp);
mono_regstate_free_int (cfg->rs, hreg);
}
}
}
static MonoInst*
-create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, const unsigned char *ip, gboolean fp)
+create_copy_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, int dest, int src, MonoInst *ins, const unsigned char *ip, gboolean fp)
{
MonoInst *copy;
copy->sreg1 = src;
copy->cil_code = ip;
if (ins) {
- MONO_INST_LIST_ADD (©->node, &ins->node);
- copy->cil_code = ins->cil_code;
+ mono_bblock_insert_after_ins (bb, ins, copy);
+ *last = copy;
}
DEBUG (printf ("\tforced copy from %s to %s\n", mono_regname_full (src, fp), mono_regname_full (dest, fp)));
return copy;
}
static MonoInst*
-create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
+create_spilled_store (MonoCompile *cfg, MonoBasicBlock *bb, int spill, int reg, int prev_reg, MonoInst **last, MonoInst *ins, gboolean fp)
{
MonoInst *store;
MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
store->sreg1 = reg;
store->inst_destbasereg = cfg->frame_reg;
store->inst_offset = mono_spillvar_offset (cfg, spill, fp);
- if (ins)
- MONO_INST_LIST_ADD (&store->node, &ins->node);
-
+ if (ins) {
+ mono_bblock_insert_after_ins (bb, ins, store);
+ *last = store;
+ }
DEBUG (printf ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_regname_full (reg, fp)));
return store;
}
};
static inline int
-alloc_int_reg (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, regmask_t dest_mask, int sym_reg, RegTrack *info)
+alloc_int_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info)
{
int val;
val = mono_regstate_alloc_int (cfg->rs, dest_mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, dest_mask, sym_reg, FALSE);
+ val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, FALSE);
return val;
}
static inline int
-alloc_float_reg (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, regmask_t dest_mask, int sym_reg)
+alloc_float_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg)
{
int val;
val = mono_regstate_alloc_float (cfg->rs, dest_mask);
if (val < 0) {
- val = get_register_spilling (cfg, ins, next, dest_mask, sym_reg, TRUE);
+ val = get_register_spilling (cfg, bb, last, ins, dest_mask, sym_reg, TRUE);
}
return val;
}
static inline int
-alloc_reg (MonoCompile *cfg, MonoInst *ins, MonoInstList *next, regmask_t dest_mask, int sym_reg, RegTrack *info, gboolean fp)
+alloc_reg (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **last, MonoInst *ins, regmask_t dest_mask, int sym_reg, RegTrack *info, gboolean fp)
{
- if (fp)
- return alloc_float_reg (cfg, ins, next, dest_mask, sym_reg);
+ if (G_UNLIKELY (fp))
+ return alloc_float_reg (cfg, bb, last, ins, dest_mask, sym_reg);
else
- return alloc_int_reg (cfg, ins, next, dest_mask, sym_reg, info);
+ return alloc_int_reg (cfg, bb, last, ins, dest_mask, sym_reg, info);
}
static inline void
assign_reg (MonoCompile *cfg, MonoRegState *rs, int reg, int hreg, gboolean fp)
{
- if (fp) {
+ if (G_UNLIKELY (fp)) {
g_assert (reg >= MONO_MAX_FREGS);
g_assert (hreg < MONO_MAX_FREGS);
g_assert (! is_global_freg (hreg));
}
}
-static inline void
-assign_ireg (MonoCompile *cfg, MonoRegState *rs, int reg, int hreg)
-{
- assign_reg (cfg, rs, reg, hreg, FALSE);
-}
-
static gint8 desc_to_fixed_reg [256];
static gboolean desc_to_fixed_reg_inited = FALSE;
* Local register allocation.
* We first scan the list of instructions and we save the liveness info of
* each register (when the register is first used, when it's value is set etc.).
+ * We also reverse the list of instructions because assigning registers backwards allows
+ * for more tricks to be used.
*/
void
mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins;
+ MonoInst *ins, *prev, *last;
+ MonoInst **tmp;
MonoRegState *rs = cfg->rs;
- int i, val, fpcount;
+ int i, val, max;
RegTrack *reginfo;
- const char const *spec;
+ const char *spec;
unsigned char spec_src1, spec_src2, spec_dest;
- GList *fspill_list = NULL;
gboolean fp;
- int fspill = 0;
#if MONO_ARCH_USE_FPSTACK
- gboolean need_fpstack = use_fpstack;
+ gboolean has_fp = FALSE;
+ int fpstack [8];
+ int sp = 0;
#endif
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
+ if (!bb->code)
return;
if (!desc_to_fixed_reg_inited) {
rs->ifree_mask = MONO_ARCH_CALLEE_REGS;
rs->ffree_mask = MONO_ARCH_CALLEE_FREGS;
- if (use_fpstack)
- rs->ffree_mask = 0xff & ~(regmask (MONO_ARCH_FPSTACK_SIZE));
+ max = rs->next_vreg;
- if (cfg->reginfo && cfg->reginfo_len < rs->next_vreg) {
+ if (cfg->reginfo && cfg->reginfo_len < max)
cfg->reginfo = NULL;
- }
+
reginfo = cfg->reginfo;
if (!reginfo) {
- cfg->reginfo_len = MAX (256, rs->next_vreg * 2);
+ cfg->reginfo_len = MAX (1024, max * 2);
reginfo = cfg->reginfo = mono_mempool_alloc (cfg->mempool, sizeof (RegTrack) * cfg->reginfo_len);
}
else
g_assert (cfg->reginfo_len >= rs->next_vreg);
- memset (reginfo, 0, rs->next_vreg * sizeof (RegTrack));
+ if (cfg->verbose_level > 1) {
+ /* print_regtrack reads the info of all variables */
+ memset (cfg->reginfo, 0, cfg->reginfo_len * sizeof (RegTrack));
+ }
+
+ if (cfg->new_ir) {
+ /*
+ * For large methods, next_vreg can be very large, so g_malloc0 time can
+ * be prohibitive. So we manually init the reginfo entries used by the
+ * bblock.
+ */
+ for (ins = bb->code; ins; ins = ins->next) {
+ spec = ins_get_spec (ins->opcode);
+
+ if ((ins->dreg != -1) && (ins->dreg < max)) {
+ memset (®info [ins->dreg], 0, sizeof (RegTrack));
+#if SIZEOF_VOID_P == 4
+ if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_DEST])) {
+ /**
+ * In the new IR, the two vregs of the regpair do not alias the
+ * original long vreg. shift the vreg here so the rest of the
+ * allocator doesn't have to care about it.
+ */
+ if (cfg->new_ir)
+ ins->dreg ++;
+ memset (®info [ins->dreg + 1], 0, sizeof (RegTrack));
+ }
+#endif
+ }
+ if ((ins->sreg1 != -1) && (ins->sreg1 < max)) {
+ memset (®info [ins->sreg1], 0, sizeof (RegTrack));
+#if SIZEOF_VOID_P == 4
+ if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC1])) {
+ if (cfg->new_ir)
+ ins->sreg1 ++;
+ memset (®info [ins->sreg1 + 1], 0, sizeof (RegTrack));
+ }
+#endif
+ }
+ if ((ins->sreg2 != -1) && (ins->sreg2 < max)) {
+ memset (®info [ins->sreg2], 0, sizeof (RegTrack));
+#if SIZEOF_VOID_P == 4
+ if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC2])) {
+ if (cfg->new_ir)
+ ins->sreg2 ++;
+ memset (®info [ins->sreg2 + 1], 0, sizeof (RegTrack));
+ }
+#endif
+ }
+ }
+ }
+ else {
+ memset (reginfo, 0, max * sizeof (RegTrack));
+ }
+
+ /*if (cfg->opt & MONO_OPT_COPYPROP)
+ local_copy_prop (cfg, ins);*/
i = 1;
- fpcount = 0;
- DEBUG (printf ("\nLOCAL REGALLOC: BASIC BLOCK: %d\n", bb->block_num));
+ DEBUG (printf ("\nLOCAL REGALLOC: BASIC BLOCK %d:\n", bb->block_num));
/* forward pass on the instructions to collect register liveness info */
MONO_BB_FOR_EACH_INS (bb, ins) {
spec = ins_get_spec (ins->opcode);
DEBUG (mono_print_ins_index (i, ins));
- /*
- * TRACK FP STACK
- */
#if MONO_ARCH_USE_FPSTACK
- if (need_fpstack) {
- GList *spill;
-
- if (spec_src1 == 'f') {
- spill = g_list_first (fspill_list);
- if (spill && fpcount < MONO_ARCH_FPSTACK_SIZE) {
- reginfo [ins->sreg1].flags |= MONO_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- } else
- fpcount--;
- }
-
- if (spec [MONO_INST_SRC2] == 'f') {
- spill = g_list_first (fspill_list);
- if (spill) {
- reginfo [ins->sreg2].flags |= MONO_FP_NEEDS_LOAD;
- fspill_list = g_list_remove (fspill_list, spill->data);
- if (fpcount >= MONO_ARCH_FPSTACK_SIZE) {
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- reginfo [ins->sreg2].flags |= MONO_FP_NEEDS_LOAD_SPILL;
- }
- } else
- fpcount--;
- }
-
- if (reg_is_fp (spec_dest)) {
- if (use_fpstack && (spec [MONO_INST_CLOB] != 'm')) {
- if (fpcount >= MONO_ARCH_FPSTACK_SIZE) {
- reginfo [ins->dreg].flags |= MONO_FP_NEEDS_SPILL;
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- fpcount--;
- }
- fpcount++;
- }
- }
- }
+ if (sreg1_is_fp (spec) || sreg2_is_fp (spec) || dreg_is_fp (spec))
+ has_fp = TRUE;
#endif
- if (spec [MONO_INST_SRC1]) {
+ if (spec_src1) {
+ fp = sreg1_is_fp (spec);
+ g_assert (ins->sreg1 != -1);
+ if (cfg->new_ir && is_soft_reg (ins->sreg1, fp))
+ /* This means the vreg is not local to this bb */
+ g_assert (reginfo [ins->sreg1].born_in > 0);
+ rs->vassign [ins->sreg1] = -1;
//reginfo [ins->sreg1].prev_use = reginfo [ins->sreg1].last_use;
//reginfo [ins->sreg1].last_use = i;
- if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC2])) {
+ if (MONO_ARCH_INST_IS_REGPAIR (spec_src2)) {
/* The virtual register is allocated sequentially */
+ rs->vassign [ins->sreg1 + 1] = -1;
//reginfo [ins->sreg1 + 1].prev_use = reginfo [ins->sreg1 + 1].last_use;
//reginfo [ins->sreg1 + 1].last_use = i;
if (reginfo [ins->sreg1 + 1].born_in == 0 || reginfo [ins->sreg1 + 1].born_in > i)
} else {
ins->sreg1 = -1;
}
- if (spec [MONO_INST_SRC2]) {
+ if (spec_src2) {
+ fp = sreg2_is_fp (spec);
+ g_assert (ins->sreg2 != -1);
+ if (cfg->new_ir && is_soft_reg (ins->sreg2, fp))
+ /* This means the vreg is not local to this bb */
+ g_assert (reginfo [ins->sreg2].born_in > 0);
+ rs->vassign [ins->sreg2] = -1;
//reginfo [ins->sreg2].prev_use = reginfo [ins->sreg2].last_use;
//reginfo [ins->sreg2].last_use = i;
- if (MONO_ARCH_INST_IS_REGPAIR (spec [MONO_INST_SRC2])) {
+ if (MONO_ARCH_INST_IS_REGPAIR (spec_src2)) {
/* The virtual register is allocated sequentially */
+ rs->vassign [ins->sreg2 + 1] = -1;
//reginfo [ins->sreg2 + 1].prev_use = reginfo [ins->sreg2 + 1].last_use;
//reginfo [ins->sreg2 + 1].last_use = i;
if (reginfo [ins->sreg2 + 1].born_in == 0 || reginfo [ins->sreg2 + 1].born_in > i)
} else {
ins->sreg2 = -1;
}
- if (spec [MONO_INST_DEST]) {
+ if (spec_dest) {
int dest_dreg;
- if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
+ fp = dreg_is_fp (spec);
+ if (spec_dest != 'b') /* it's not just a base register */
reginfo [ins->dreg].killed_in = i;
+ g_assert (ins->dreg != -1);
+ rs->vassign [ins->dreg] = -1;
//reginfo [ins->dreg].prev_use = reginfo [ins->dreg].last_use;
//reginfo [ins->dreg].last_use = i;
if (reginfo [ins->dreg].born_in == 0 || reginfo [ins->dreg].born_in > i)
if (dest_dreg != -1)
reginfo [ins->dreg].preferred_mask = (regmask (dest_dreg));
+#ifdef MONO_ARCH_INST_FIXED_MASK
+ reginfo [ins->dreg].preferred_mask |= MONO_ARCH_INST_FIXED_MASK (spec_dest);
+#endif
+
if (MONO_ARCH_INST_IS_REGPAIR (spec_dest)) {
/* The virtual register is allocated sequentially */
+ rs->vassign [ins->dreg + 1] = -1;
//reginfo [ins->dreg + 1].prev_use = reginfo [ins->dreg + 1].last_use;
//reginfo [ins->dreg + 1].last_use = i;
if (reginfo [ins->dreg + 1].born_in == 0 || reginfo [ins->dreg + 1].born_in > i)
reginfo [ins->dreg + 1].born_in = i;
- if (MONO_ARCH_INST_REGPAIR_REG2 (spec [MONO_INST_DEST], -1) != -1)
- reginfo [ins->dreg + 1].preferred_mask = regpair_reg2_mask (spec [MONO_INST_DEST], -1);
+ if (MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, -1) != -1)
+ reginfo [ins->dreg + 1].preferred_mask = regpair_reg2_mask (spec_dest, -1);
}
} else {
ins->dreg = -1;
}
list = call->out_freg_args;
- if (!use_fpstack && list) {
+ if (list) {
while (list) {
guint32 regpair;
int reg, hreg;
hreg = regpair >> 24;
reg = regpair & 0xffffff;
- //reginfo [reg].prev_use = reginfo [reg].last_use;
- //reginfo [reg].last_use = i;
-
list = g_slist_next (list);
}
}
++i;
}
- // todo: check if we have anything left on fp stack, in verify mode?
- fspill = 0;
+ tmp = &last;
DEBUG (print_regtrack (reginfo, rs->next_vreg));
- ins = mono_inst_list_last (&bb->ins_list);
- while (ins) {
+ MONO_BB_FOR_EACH_INS_REVERSE_SAFE (bb, prev, ins) {
int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
int dest_dreg, dest_sreg1, dest_sreg2, clob_reg;
int dreg_high, sreg1_high;
regmask_t dreg_mask, sreg1_mask, sreg2_mask, mask;
regmask_t dreg_fixed_mask, sreg1_fixed_mask, sreg2_fixed_mask;
const unsigned char *ip;
- MonoInst *prev_ins;
- MonoInstList *next;
-
- prev_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
- next = ins->node.next;
--i;
- g_assert (i >= 0);
spec = ins_get_spec (ins->opcode);
spec_src1 = spec [MONO_INST_SRC1];
spec_src2 = spec [MONO_INST_SRC2];
prev_sreg1 = -1;
dreg_high = -1;
sreg1_high = -1;
- dreg_mask = reg_is_fp (spec_dest) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
- sreg1_mask = reg_is_fp (spec_src1) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
- sreg2_mask = reg_is_fp (spec_src2) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
+ dreg_mask = dreg_is_fp (spec) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
+ sreg1_mask = sreg1_is_fp (spec) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
+ sreg2_mask = sreg2_is_fp (spec) ? MONO_ARCH_CALLEE_FREGS : MONO_ARCH_CALLEE_REGS;
DEBUG (printf ("processing:"));
DEBUG (mono_print_ins_index (i, ins));
ip = ins->cil_code;
+ last = ins;
+
/*
* FIXED REGS
*/
sreg1_fixed_mask = sreg2_fixed_mask = dreg_fixed_mask = 0;
#endif
- /*
- * TRACK FP STACK
- */
-#if MONO_ARCH_USE_FPSTACK
- if (need_fpstack && (spec [MONO_INST_CLOB] != 'm')) {
- if (reg_is_fp (spec_dest)) {
- if (reginfo [ins->dreg].flags & MONO_FP_NEEDS_SPILL) {
- GList *spill_node;
- MonoInst *store;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
- insert_before_ins (ins, store);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- fspill--;
- }
- }
-
- if (spec_src1 == 'f') {
- if (reginfo [ins->sreg1].flags & MONO_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfo [ins->sreg1].flags & MONO_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
- insert_before_ins (ins, load);
- if (store)
- insert_before_ins (load, store);
- }
- }
-
- if (spec_src2 == 'f') {
- if (reginfo [ins->sreg2].flags & MONO_FP_NEEDS_LOAD) {
- MonoInst *load;
- MonoInst *store = NULL;
-
- if (reginfo [ins->sreg2].flags & MONO_FP_NEEDS_LOAD_SPILL) {
- GList *spill_node;
-
- spill_node = g_list_first (fspill_list);
- g_assert (spill_node);
- if (spec_src1 == 'f' && (reginfo [ins->sreg2].flags & MONO_FP_NEEDS_LOAD_SPILL))
- spill_node = g_list_next (spill_node);
-
- store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
- fspill_list = g_list_remove (fspill_list, spill_node->data);
- }
-
- fspill++;
- fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
- load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
- insert_before_ins (ins, load);
- if (store)
- insert_before_ins (load, store);
- }
- }
- }
-#endif
-
/*
* TRACK FIXED SREG2
*/
if (rs->ifree_mask & (regmask (dest_sreg2))) {
if (is_global_ireg (ins->sreg2)) {
/* Argument already in hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, dest_sreg2, ins->sreg2, NULL, ip, FALSE);
- insert_before_ins (ins, copy);
+ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg2, ins->sreg2, NULL, ip, FALSE);
+ insert_before_ins (bb, ins, copy);
}
else {
val = rs->vassign [ins->sreg2];
g_assert_not_reached ();
} else {
/* Argument already in hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, dest_sreg2, val, NULL, ip, FALSE);
- insert_before_ins (ins, copy);
+ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg2, val, NULL, ip, FALSE);
+ insert_before_ins (bb, ins, copy);
}
}
} else {
- int need_spill = TRUE;
+ gboolean need_spill = TRUE;
+ gboolean need_assign = TRUE;
dreg_mask &= ~ (regmask (dest_sreg2));
sreg1_mask &= ~ (regmask (dest_sreg2));
* and then copy from this to dest_sreg2.
*/
int new_dest;
- new_dest = alloc_int_reg (cfg, ins, next, dreg_mask, ins->dreg, ®info [ins->dreg]);
+ new_dest = alloc_int_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, ®info [ins->dreg]);
g_assert (new_dest >= 0);
DEBUG (printf ("\tchanging dreg R%d to %s from %s\n", ins->dreg, mono_arch_regname (new_dest), mono_arch_regname (dest_sreg2)));
prev_dreg = ins->dreg;
- assign_ireg (cfg, rs, ins->dreg, new_dest);
+ assign_reg (cfg, rs, ins->dreg, new_dest, FALSE);
clob_dreg = ins->dreg;
- create_copy_ins (cfg, dest_sreg2, new_dest, ins, ip, FALSE);
+ create_copy_ins (cfg, bb, tmp, dest_sreg2, new_dest, ins, ip, FALSE);
mono_regstate_free_int (rs, dest_sreg2);
need_spill = FALSE;
}
if (is_global_ireg (ins->sreg2)) {
- MonoInst *copy = create_copy_ins (cfg, dest_sreg2, ins->sreg2, NULL, ip, FALSE);
- insert_before_ins (ins, copy);
+ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg2, ins->sreg2, NULL, ip, FALSE);
+ insert_before_ins (bb, ins, copy);
+ need_assign = FALSE;
}
else {
val = rs->vassign [ins->sreg2];
if (val == dest_sreg2) {
/* sreg2 is already assigned to the correct register */
need_spill = FALSE;
- }
- else if ((val >= 0) || (val < -1)) {
- /* FIXME: sreg2 already assigned to another register */
- g_assert_not_reached ();
+ } else if (val < -1) {
+ /* sreg2 is spilled, it can be assigned to dest_sreg2 */
+ } else if (val >= 0) {
+ /* sreg2 already assigned to another register */
+ /*
+ * We couldn't emit a copy from val to dest_sreg2, because
+ * val might be spilled later while processing this
+ * instruction. So we spill sreg2 so it can be allocated to
+ * dest_sreg2.
+ */
+ DEBUG (printf ("\tforced spill of R%d\n", ins->sreg2));
+ free_up_reg (cfg, bb, tmp, ins, val, FALSE);
}
}
if (need_spill) {
DEBUG (printf ("\tforced spill of R%d\n", rs->isymbolic [dest_sreg2]));
- get_register_force_spilling (cfg, ins, next, rs->isymbolic [dest_sreg2], FALSE);
- mono_regstate_free_int (rs, dest_sreg2);
+ free_up_reg (cfg, bb, tmp, ins, dest_sreg2, FALSE);
}
- if (!is_global_ireg (ins->sreg2))
+ if (need_assign) {
+ if (rs->vassign [ins->sreg2] < -1) {
+ MonoInst *store;
+ int spill;
+
+ /* Need to emit a spill store */
+ spill = - rs->vassign [ins->sreg2] - 1;
+ store = create_spilled_store (cfg, bb, spill, dest_sreg2, ins->sreg2, tmp, NULL, fp);
+ insert_before_ins (bb, ins, store);
+ }
/* force-set sreg2 */
- assign_ireg (cfg, rs, ins->sreg2, dest_sreg2);
+ assign_reg (cfg, rs, ins->sreg2, dest_sreg2, FALSE);
+ }
}
ins->sreg2 = dest_sreg2;
}
/*
* TRACK DREG
*/
- fp = reg_is_fp (spec_dest);
- if (spec_dest && (!fp || (fp && !use_fpstack)) && is_soft_reg (ins->dreg, fp))
+ fp = dreg_is_fp (spec);
+ if (spec_dest && is_soft_reg (ins->dreg, fp)) {
prev_dreg = ins->dreg;
+ }
if (spec_dest == 'b') {
/*
* The dest reg is read by the instruction, not written, so
* avoid allocating sreg1/sreg2 to the same reg.
*/
- if (dest_sreg1 != -1)
+ if (!dest_sreg1 != -1)
dreg_mask &= ~ (regmask (dest_sreg1));
if (dest_sreg2 != -1)
dreg_mask &= ~ (regmask (dest_sreg2));
val = rs->vassign [ins->dreg];
if (is_soft_reg (ins->dreg, fp) && (val >= 0) && (!(regmask (val) & dreg_mask))) {
/* DREG is already allocated to a register needed for sreg1 */
- get_register_force_spilling (cfg, ins, next, ins->dreg, FALSE);
+ get_register_force_spilling (cfg, bb, tmp, ins, ins->dreg, FALSE);
mono_regstate_free_int (rs, val);
}
}
if (dest_dreg != -1) {
if (rs->vassign [ins->dreg] != dest_dreg)
- free_up_ireg (cfg, ins, next, dest_dreg);
+ free_up_ireg (cfg, bb, tmp, ins, dest_dreg);
dreg2 = ins->dreg + 1;
dest_dreg2 = MONO_ARCH_INST_REGPAIR_REG2 (spec_dest, dest_dreg);
if (dest_dreg2 != -1) {
if (rs->vassign [dreg2] != dest_dreg2)
- free_up_ireg (cfg, ins, next, dest_dreg2);
+ free_up_ireg (cfg, bb, tmp, ins, dest_dreg2);
}
}
}
*/
val = mono_regstate_alloc_int (rs, dreg_fixed_mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, dreg_fixed_mask, -1, fp);
+ val = get_register_spilling (cfg, bb, tmp, ins, dreg_fixed_mask, -1, fp);
mono_regstate_free_int (rs, val);
dest_dreg = val;
dreg_mask &= dreg_fixed_mask;
}
- if ((!fp || (fp && !use_fpstack)) && (is_soft_reg (ins->dreg, fp))) {
- if (dest_dreg != -1)
- dreg_mask = (regmask (dest_dreg));
-
+ if (is_soft_reg (ins->dreg, fp)) {
val = rs->vassign [ins->dreg];
if (val < 0) {
/* the register gets spilled after this inst */
spill = -val -1;
}
- val = alloc_reg (cfg, ins, next, dreg_mask, ins->dreg, ®info [ins->dreg], fp);
+ val = alloc_reg (cfg, bb, tmp, ins, dreg_mask, ins->dreg, ®info [ins->dreg], fp);
assign_reg (cfg, rs, ins->dreg, val, fp);
if (spill)
- create_spilled_store (cfg, spill, val, prev_dreg, ins, fp);
+ create_spilled_store (cfg, bb, spill, val, prev_dreg, tmp, ins, fp);
}
-
+
DEBUG (printf ("\tassigned dreg %s to dest R%d\n", mono_regname_full (val, fp), ins->dreg));
ins->dreg = val;
}
}
val = mono_regstate_alloc_int (rs, mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, mask, reg2, fp);
+ val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, fp);
if (spill)
- create_spilled_store (cfg, spill, val, reg2, ins, fp);
+ create_spilled_store (cfg, bb, spill, val, reg2, tmp, ins, fp);
}
else {
if (! (mask & (regmask (val)))) {
val = mono_regstate_alloc_int (rs, mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, mask, reg2, fp);
+ val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, fp);
/* Reallocate hreg to the correct register */
- create_copy_ins (cfg, rs->vassign [reg2], val, ins, ip, fp);
+ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, fp);
mono_regstate_free_int (rs, rs->vassign [reg2]);
}
}
}
- if ((!fp || (fp && !use_fpstack)) && prev_dreg >= 0 && is_soft_reg (prev_dreg, fp) && reginfo [prev_dreg].born_in >= i) {
+ if (prev_dreg >= 0 && is_soft_reg (prev_dreg, fp) && (spec_dest != 'b') && (cfg->new_ir || reginfo [prev_dreg].born_in >= i)) {
/*
* In theory, we could free up the hreg even if the vreg is alive,
* but branches inside bblocks force us to assign the same hreg
mono_regstate_free_float (rs, dreg);
else
mono_regstate_free_int (rs, dreg);
+ if (cfg->new_ir)
+ rs->vassign [prev_dreg] = -1;
}
if ((dest_dreg != -1) && (ins->dreg != dest_dreg)) {
/* this instruction only outputs to dest_dreg, need to copy */
- create_copy_ins (cfg, ins->dreg, dest_dreg, ins, ip, fp);
+ create_copy_ins (cfg, bb, tmp, ins->dreg, dest_dreg, ins, ip, fp);
ins->dreg = dest_dreg;
if (fp) {
if (rs->fsymbolic [dest_dreg] >= MONO_MAX_FREGS)
- free_up_reg (cfg, ins, next, dest_dreg, fp);
+ free_up_reg (cfg, bb, tmp, ins, dest_dreg, fp);
}
else {
if (rs->isymbolic [dest_dreg] >= MONO_MAX_IREGS)
- free_up_reg (cfg, ins, next, dest_dreg, fp);
+ free_up_reg (cfg, bb, tmp, ins, dest_dreg, fp);
}
}
* The dest reg is read by the instruction, not written, so
* avoid allocating sreg1/sreg2 to the same reg.
*/
- sreg1_mask &= ~ (regmask (ins->dreg));
- sreg2_mask &= ~ (regmask (ins->dreg));
+ if (!sreg1_is_fp (spec))
+ sreg1_mask &= ~ (regmask (ins->dreg));
+ if (!sreg2_is_fp (spec))
+ sreg2_mask &= ~ (regmask (ins->dreg));
}
/*
*/
if ((clob_reg != -1) && (!(rs->ifree_mask & (regmask (clob_reg))))) {
DEBUG (printf ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
- get_register_force_spilling (cfg, ins, next, rs->isymbolic [clob_reg], FALSE);
+ get_register_force_spilling (cfg, bb, tmp, ins, rs->isymbolic [clob_reg], FALSE);
mono_regstate_free_int (rs, clob_reg);
}
for (j = 0; j < MONO_MAX_IREGS; ++j) {
s = regmask (j);
- if ((clob_mask & s) && !(rs->ifree_mask & s) && (j != ins->sreg1) && (j != dreg) && (j != dreg2)) {
- get_register_force_spilling (cfg, ins, next, rs->isymbolic [j], FALSE);
+ if ((clob_mask & s) && !(rs->ifree_mask & s) && (j != ins->sreg1)) {
+ if ((j != dreg) && (j != dreg2))
+ get_register_force_spilling (cfg, bb, tmp, ins, rs->isymbolic [j], FALSE);
+ else if (rs->isymbolic [j])
+ /* The hreg is assigned to the dreg of this instruction */
+ rs->vassign [rs->isymbolic [j]] = -1;
mono_regstate_free_int (rs, j);
}
}
}
- if (!use_fpstack && rs->ffree_mask != MONO_ARCH_CALLEE_FREGS) {
+ if (rs->ffree_mask != MONO_ARCH_CALLEE_FREGS) {
clob_mask = MONO_ARCH_CALLEE_FREGS;
if ((prev_dreg != -1) && reg_is_fp (spec_dest))
dreg = rs->vassign [prev_dreg];
for (j = 0; j < MONO_MAX_FREGS; ++j) {
s = regmask (j);
- if ((clob_mask & s) && !(rs->ffree_mask & s) && (j != ins->sreg1) && (j != dreg)) {
- get_register_force_spilling (cfg, ins, next, rs->fsymbolic [j], TRUE);
+ if ((clob_mask & s) && !(rs->ffree_mask & s) && (j != ins->sreg1)) {
+ if (j != dreg)
+ get_register_force_spilling (cfg, bb, tmp, ins, rs->fsymbolic [j], TRUE);
+ else if (rs->fsymbolic [j])
+ /* The hreg is assigned to the dreg of this instruction */
+ rs->vassign [rs->fsymbolic [j]] = -1;
mono_regstate_free_float (rs, j);
}
}
}
list = call->out_freg_args;
- if (list && !use_fpstack) {
+ if (list) {
while (list) {
guint32 regpair;
int reg, hreg;
/*
* TRACK SREG1
*/
- fp = reg_is_fp (spec_src1);
- if ((!fp || (fp && !use_fpstack))) {
- if (MONO_ARCH_INST_IS_REGPAIR (spec_dest) && (spec [MONO_INST_CLOB] == '1')) {
- g_assert (is_soft_reg (ins->sreg1, fp));
-
- /* To simplify things, we allocate the same regpair to sreg1 and dreg */
- if (dest_sreg1 != -1)
- g_assert (dest_sreg1 == ins->dreg);
- val = mono_regstate_alloc_int (rs, regmask (ins->dreg));
- g_assert (val >= 0);
- assign_reg (cfg, rs, ins->sreg1, val, fp);
+ fp = sreg1_is_fp (spec);
+ if (MONO_ARCH_INST_IS_REGPAIR (spec_dest) && (spec [MONO_INST_CLOB] == '1')) {
+ g_assert (is_soft_reg (ins->sreg1, fp));
- DEBUG (printf ("\tassigned sreg1-low %s to R%d\n", mono_regname_full (val, fp), ins->sreg1));
+ /* To simplify things, we allocate the same regpair to sreg1 and dreg */
+ if (dest_sreg1 != -1)
+ g_assert (dest_sreg1 == ins->dreg);
+ val = mono_regstate_alloc_int (rs, regmask (ins->dreg));
+ g_assert (val >= 0);
- g_assert ((regmask (dreg_high)) & regpair_reg2_mask (spec_src1, ins->dreg));
- val = mono_regstate_alloc_int (rs, regmask (dreg_high));
- g_assert (val >= 0);
- assign_reg (cfg, rs, ins->sreg1 + 1, val, fp);
+ if (rs->vassign [ins->sreg1] >= 0 && rs->vassign [ins->sreg1] != val)
+ // FIXME:
+ g_assert_not_reached ();
- DEBUG (printf ("\tassigned sreg1-high %s to R%d\n", mono_regname_full (val, fp), ins->sreg1 + 1));
+ assign_reg (cfg, rs, ins->sreg1, val, fp);
- /* Skip rest of this section */
- dest_sreg1 = -1;
- }
+ DEBUG (printf ("\tassigned sreg1-low %s to R%d\n", mono_regname_full (val, fp), ins->sreg1));
- if (sreg1_fixed_mask) {
- g_assert (!fp);
- if (is_global_ireg (ins->sreg1)) {
- /*
- * The argument is already in a hard reg, but that reg is
- * not usable by this instruction, so allocate a new one.
- */
- val = mono_regstate_alloc_int (rs, sreg1_fixed_mask);
- if (val < 0)
- val = get_register_spilling (cfg, ins, next, sreg1_fixed_mask, -1, fp);
- mono_regstate_free_int (rs, val);
- dest_sreg1 = val;
+ g_assert ((regmask (dreg_high)) & regpair_reg2_mask (spec_src1, ins->dreg));
+ val = mono_regstate_alloc_int (rs, regmask (dreg_high));
+ g_assert (val >= 0);
- /* Fall through to the dest_sreg1 != -1 case */
- }
- else
- sreg1_mask &= sreg1_fixed_mask;
- }
+ if (rs->vassign [ins->sreg1 + 1] >= 0 && rs->vassign [ins->sreg1 + 1] != val)
+ // FIXME:
+ g_assert_not_reached ();
- if (dest_sreg1 != -1) {
- sreg1_mask = regmask (dest_sreg1);
+ assign_reg (cfg, rs, ins->sreg1 + 1, val, fp);
- if (!(rs->ifree_mask & (regmask (dest_sreg1)))) {
- DEBUG (printf ("\tforced spill of R%d\n", rs->isymbolic [dest_sreg1]));
- get_register_force_spilling (cfg, ins, next, rs->isymbolic [dest_sreg1], FALSE);
- mono_regstate_free_int (rs, dest_sreg1);
- }
- if (is_global_ireg (ins->sreg1)) {
- /* The argument is already in a hard reg, need to copy */
- MonoInst *copy = create_copy_ins (cfg, dest_sreg1, ins->sreg1, NULL, ip, FALSE);
- insert_before_ins (ins, copy);
- ins->sreg1 = dest_sreg1;
- }
+ DEBUG (printf ("\tassigned sreg1-high %s to R%d\n", mono_regname_full (val, fp), ins->sreg1 + 1));
+
+ /* Skip rest of this section */
+ dest_sreg1 = -1;
+ }
+
+ if (sreg1_fixed_mask) {
+ g_assert (!fp);
+ if (is_global_ireg (ins->sreg1)) {
+ /*
+ * The argument is already in a hard reg, but that reg is
+ * not usable by this instruction, so allocate a new one.
+ */
+ val = mono_regstate_alloc_int (rs, sreg1_fixed_mask);
+ if (val < 0)
+ val = get_register_spilling (cfg, bb, tmp, ins, sreg1_fixed_mask, -1, fp);
+ mono_regstate_free_int (rs, val);
+ dest_sreg1 = val;
+
+ /* Fall through to the dest_sreg1 != -1 case */
}
+ else
+ sreg1_mask &= sreg1_fixed_mask;
+ }
- if (is_soft_reg (ins->sreg1, fp)) {
- val = rs->vassign [ins->sreg1];
- prev_sreg1 = ins->sreg1;
- if (val < 0) {
- int spill = 0;
- if (val < -1) {
- /* the register gets spilled after this inst */
- spill = -val -1;
- }
+ if (dest_sreg1 != -1) {
+ sreg1_mask = regmask (dest_sreg1);
- if ((ins->opcode == OP_MOVE) && !spill && !fp && (is_local_ireg (ins->dreg) && (rs->ifree_mask & (regmask (ins->dreg))))) {
- /*
- * Allocate the same hreg to sreg1 as well so the
- * peephole can get rid of the move.
- */
- sreg1_mask = regmask (ins->dreg);
- }
+ if ((rs->vassign [ins->sreg1] != dest_sreg1) && !(rs->ifree_mask & (regmask (dest_sreg1)))) {
+ DEBUG (printf ("\tforced spill of R%d\n", rs->isymbolic [dest_sreg1]));
+ get_register_force_spilling (cfg, bb, tmp, ins, rs->isymbolic [dest_sreg1], FALSE);
+ mono_regstate_free_int (rs, dest_sreg1);
+ }
+ if (is_global_ireg (ins->sreg1)) {
+ /* The argument is already in a hard reg, need to copy */
+ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg1, ins->sreg1, NULL, ip, FALSE);
+ insert_before_ins (bb, ins, copy);
+ ins->sreg1 = dest_sreg1;
+ }
+ }
- val = alloc_reg (cfg, ins, next, sreg1_mask, ins->sreg1, ®info [ins->sreg1], fp);
- assign_reg (cfg, rs, ins->sreg1, val, fp);
- DEBUG (printf ("\tassigned sreg1 %s to R%d\n", mono_regname_full (val, fp), ins->sreg1));
+ if (is_soft_reg (ins->sreg1, fp)) {
+ val = rs->vassign [ins->sreg1];
+ prev_sreg1 = ins->sreg1;
+ if (val < 0) {
+ int spill = 0;
+ if (val < -1) {
+ /* the register gets spilled after this inst */
+ spill = -val -1;
+ }
- if (spill) {
- MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, fp);
- /*
- * Need to insert before the instruction since it can
- * overwrite sreg1.
- */
- insert_before_ins (ins, store);
- }
+ if ((ins->opcode == OP_MOVE) && !spill && !fp && is_local_ireg (ins->dreg) && (rs->ifree_mask & (regmask (ins->dreg)))) {
+ /*
+ * Allocate the same hreg to sreg1 as well so the
+ * peephole can get rid of the move.
+ */
+ sreg1_mask = regmask (ins->dreg);
}
- else if ((dest_sreg1 != -1) && (dest_sreg1 != val)) {
- create_copy_ins (cfg, dest_sreg1, val, ins, ip, fp);
+
+ if (spec [MONO_INST_CLOB] == '1' && !dreg_is_fp (spec) && (rs->ifree_mask & (regmask (ins->dreg))))
+ /* Allocate the same reg to sreg1 to avoid a copy later */
+ sreg1_mask = regmask (ins->dreg);
+
+ val = alloc_reg (cfg, bb, tmp, ins, sreg1_mask, ins->sreg1, ®info [ins->sreg1], fp);
+ assign_reg (cfg, rs, ins->sreg1, val, fp);
+ DEBUG (printf ("\tassigned sreg1 %s to R%d\n", mono_regname_full (val, fp), ins->sreg1));
+
+ if (spill) {
+ MonoInst *store = create_spilled_store (cfg, bb, spill, val, prev_sreg1, tmp, NULL, fp);
+ /*
+ * Need to insert before the instruction since it can
+ * overwrite sreg1.
+ */
+ insert_before_ins (bb, ins, store);
}
-
- ins->sreg1 = val;
}
- else {
- prev_sreg1 = -1;
+ else if ((dest_sreg1 != -1) && (dest_sreg1 != val)) {
+ MonoInst *copy = create_copy_ins (cfg, bb, tmp, dest_sreg1, val, NULL, ip, fp);
+ insert_before_ins (bb, ins, copy);
+ sreg2_mask &= ~(regmask (dest_sreg1));
+ val = dest_sreg1;
}
- sreg2_mask &= ~(regmask (ins->sreg1));
+
+ ins->sreg1 = val;
+ }
+ else {
+ prev_sreg1 = -1;
}
+ sreg2_mask &= ~(regmask (ins->sreg1));
/* Handle the case when sreg1 is a regpair but dreg is not */
if (MONO_ARCH_INST_IS_REGPAIR (spec_src1) && (spec [MONO_INST_CLOB] != '1')) {
}
val = mono_regstate_alloc_int (rs, mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, mask, reg2, fp);
+ val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, fp);
if (spill)
g_assert_not_reached ();
}
#if 0
val = mono_regstate_alloc_int (rs, mask);
if (val < 0)
- val = get_register_spilling (cfg, ins, next, mask, reg2, fp);
+ val = get_register_spilling (cfg, bb, tmp, ins, mask, reg2, fp);
/* Reallocate hreg to the correct register */
- create_copy_ins (cfg, rs->vassign [reg2], val, ins, ip, fp);
+ create_copy_ins (cfg, bb, tmp, rs->vassign [reg2], val, ins, ip, fp);
mono_regstate_free_int (rs, rs->vassign [reg2]);
#endif
}
/* Handle dreg==sreg1 */
- if (((reg_is_fp (spec_dest) && spec_src1 == 'f' && !use_fpstack) || spec [MONO_INST_CLOB] == '1') && ins->dreg != ins->sreg1) {
+ if (((dreg_is_fp (spec) && spec_src1 == 'f') || spec [MONO_INST_CLOB] == '1') && ins->dreg != ins->sreg1) {
MonoInst *sreg2_copy = NULL;
MonoInst *copy;
gboolean fp = (spec_src1 == 'f');
* copying sreg1 to dreg could clobber sreg2, so allocate a new
* register for it.
*/
- int reg2 = alloc_reg (cfg, ins, next, dreg_mask, ins->sreg2, NULL, fp);
+ int reg2 = alloc_reg (cfg, bb, tmp, ins, dreg_mask, ins->sreg2, NULL, fp);
DEBUG (printf ("\tneed to copy sreg2 %s to reg %s\n", mono_regname_full (ins->sreg2, fp), mono_regname_full (reg2, fp)));
- sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, ip, fp);
+ sreg2_copy = create_copy_ins (cfg, bb, tmp, reg2, ins->sreg2, NULL, ip, fp);
prev_sreg2 = ins->sreg2 = reg2;
if (fp)
}
DEBUG (printf ("\tneed to copy sreg1 %s to dreg %s\n", mono_regname_full (ins->sreg1, fp), mono_regname_full (ins->dreg, fp)));
- copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, ip, fp);
- insert_before_ins (ins, copy);
+ copy = create_copy_ins (cfg, bb, tmp, ins->dreg, ins->sreg1, NULL, ip, fp);
+ insert_before_ins (bb, ins, copy);
if (sreg2_copy)
- insert_before_ins (copy, sreg2_copy);
+ insert_before_ins (bb, copy, sreg2_copy);
/*
* Need to prevent sreg2 to be allocated to sreg1, since that
/*
* TRACK SREG2
*/
- fp = reg_is_fp (spec_src2);
+ fp = sreg2_is_fp (spec);
if (MONO_ARCH_INST_IS_REGPAIR (spec_src2))
g_assert_not_reached ();
- if ((!fp || (fp && !use_fpstack)) && (is_soft_reg (ins->sreg2, fp))) {
+ if (is_soft_reg (ins->sreg2, fp)) {
val = rs->vassign [ins->sreg2];
if (val < 0) {
/* the register gets spilled after this inst */
spill = -val -1;
}
- val = alloc_reg (cfg, ins, next, sreg2_mask, ins->sreg2, ®info [ins->sreg2], fp);
+ val = alloc_reg (cfg, bb, tmp, ins, sreg2_mask, ins->sreg2, ®info [ins->sreg2], fp);
assign_reg (cfg, rs, ins->sreg2, val, fp);
DEBUG (printf ("\tassigned sreg2 %s to R%d\n", mono_regname_full (val, fp), ins->sreg2));
if (spill) {
- MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg2, NULL, fp);
+ MonoInst *store = create_spilled_store (cfg, bb, spill, val, prev_sreg2, tmp, NULL, fp);
/*
* Need to insert before the instruction since it can
* overwrite sreg2.
*/
- insert_before_ins (ins, store);
+ insert_before_ins (bb, ins, store);
}
}
ins->sreg2 = val;
}*/
DEBUG (mono_print_ins_index (i, ins));
- ins = prev_ins;
}
- if (fspill_list)
- g_list_free (fspill_list);
+ // FIXME: Set MAX_FREGS to 8
+ // FIXME: Optimize generated code
+#if MONO_ARCH_USE_FPSTACK
+ /*
+ * Make a forward pass over the code, simulating the fp stack, making sure the
+ * arguments required by the fp opcodes are at the top of the stack.
+ */
+ if (has_fp) {
+ MonoInst *prev = NULL;
+ MonoInst *fxch;
+ int tmp;
+
+ for (ins = bb->code; ins; ins = ins->next) {
+ spec = ins_get_spec (ins->opcode);
+
+ DEBUG (printf ("processing:"));
+ DEBUG (mono_print_ins_index (0, ins));
+
+ if (ins->opcode == OP_FMOVE) {
+ /* Do it by renaming the source to the destination on the stack */
+ // FIXME: Is this correct ?
+ for (i = 0; i < sp; ++i)
+ if (fpstack [i] == ins->sreg1)
+ fpstack [i] = ins->dreg;
+ prev = ins;
+ continue;
+ }
+
+ if (sreg1_is_fp (spec) && sreg2_is_fp (spec) && (fpstack [sp - 2] != ins->sreg1)) {
+ /* Arg1 must be in %st(1) */
+ g_assert (prev);
+
+ i = 0;
+ while ((i < sp) && (fpstack [i] != ins->sreg1))
+ i ++;
+ g_assert (i < sp);
+
+ if (sp - 1 - i > 0) {
+ /* First move it to %st(0) */
+ DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i));
+
+ MONO_INST_NEW (cfg, fxch, OP_X86_FXCH);
+ fxch->inst_imm = sp - 1 - i;
+
+ prev->next = fxch;
+ fxch->next = ins;
+ prev = fxch;
+
+ tmp = fpstack [sp - 1];
+ fpstack [sp - 1] = fpstack [i];
+ fpstack [i] = tmp;
+ }
+
+ /* Then move it to %st(1) */
+ DEBUG (printf ("\tswap %%st(0) and %%st(1)\n"));
+
+ MONO_INST_NEW (cfg, fxch, OP_X86_FXCH);
+ fxch->inst_imm = 1;
+
+ prev->next = fxch;
+ fxch->next = ins;
+ prev = fxch;
+
+ tmp = fpstack [sp - 1];
+ fpstack [sp - 1] = fpstack [sp - 2];
+ fpstack [sp - 2] = tmp;
+ }
+
+ if (sreg2_is_fp (spec)) {
+ g_assert (sp > 0);
+
+ if (fpstack [sp - 1] != ins->sreg2) {
+ g_assert (prev);
+
+ i = 0;
+ while ((i < sp) && (fpstack [i] != ins->sreg2))
+ i ++;
+ g_assert (i < sp);
+
+ DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i));
+
+ MONO_INST_NEW (cfg, fxch, OP_X86_FXCH);
+ fxch->inst_imm = sp - 1 - i;
+
+ prev->next = fxch;
+ fxch->next = ins;
+ prev = fxch;
+
+ tmp = fpstack [sp - 1];
+ fpstack [sp - 1] = fpstack [i];
+ fpstack [i] = tmp;
+ }
+
+ sp --;
+ }
+
+ if (sreg1_is_fp (spec)) {
+ g_assert (sp > 0);
+
+ if (fpstack [sp - 1] != ins->sreg1) {
+ g_assert (prev);
+
+ i = 0;
+ while ((i < sp) && (fpstack [i] != ins->sreg1))
+ i ++;
+ g_assert (i < sp);
+
+ DEBUG (printf ("\tswap %%st(0) and %%st(%d)\n", sp - 1 - i));
+
+ MONO_INST_NEW (cfg, fxch, OP_X86_FXCH);
+ fxch->inst_imm = sp - 1 - i;
+
+ prev->next = fxch;
+ fxch->next = ins;
+ prev = fxch;
+
+ tmp = fpstack [sp - 1];
+ fpstack [sp - 1] = fpstack [i];
+ fpstack [i] = tmp;
+ }
+
+ sp --;
+ }
+
+ if (dreg_is_fp (spec)) {
+ g_assert (sp < 8);
+ fpstack [sp ++] = ins->dreg;
+ }
+
+ if (G_UNLIKELY (cfg->verbose_level >= 2)) {
+ printf ("\t[");
+ for (i = 0; i < sp; ++i)
+ printf ("%s%%fr%d", (i > 0) ? ", " : "", fpstack [i]);
+ printf ("]\n");
+ }
+
+ prev = ins;
+ }
+ }
+#endif
}
CompRelation
case OP_FCEQ:
case OP_COND_EXC_EQ:
case OP_COND_EXC_IEQ:
+ case OP_CMOV_IEQ:
+ case OP_CMOV_LEQ:
return CMP_EQ;
case CEE_BNE_UN:
case OP_IBNE_UN:
case OP_FBNE_UN:
case OP_COND_EXC_NE_UN:
case OP_COND_EXC_INE_UN:
+ case OP_CMOV_INE_UN:
+ case OP_CMOV_LNE_UN:
return CMP_NE;
case CEE_BLE:
case OP_IBLE:
case OP_LBLE:
case OP_FBLE:
+ case OP_CMOV_ILE:
+ case OP_CMOV_LLE:
return CMP_LE;
case CEE_BGE:
case OP_IBGE:
case OP_LBGE:
case OP_FBGE:
+ case OP_CMOV_IGE:
+ case OP_CMOV_LGE:
return CMP_GE;
case CEE_BLT:
case OP_CLT:
case OP_FCLT:
case OP_COND_EXC_LT:
case OP_COND_EXC_ILT:
+ case OP_CMOV_ILT:
+ case OP_CMOV_LLT:
return CMP_LT;
case CEE_BGT:
case OP_CGT:
case OP_FCGT:
case OP_COND_EXC_GT:
case OP_COND_EXC_IGT:
+ case OP_CMOV_IGT:
+ case OP_CMOV_LGT:
return CMP_GT;
case CEE_BLE_UN:
case OP_FBLE_UN:
case OP_COND_EXC_LE_UN:
case OP_COND_EXC_ILE_UN:
+ case OP_CMOV_ILE_UN:
+ case OP_CMOV_LLE_UN:
return CMP_LE_UN;
case CEE_BGE_UN:
case OP_IBGE_UN:
case OP_LBGE_UN:
case OP_FBGE_UN:
+ case OP_CMOV_IGE_UN:
+ case OP_CMOV_LGE_UN:
return CMP_GE_UN;
case CEE_BLT_UN:
case OP_CLT_UN:
case OP_FCLT_UN:
case OP_COND_EXC_LT_UN:
case OP_COND_EXC_ILT_UN:
+ case OP_CMOV_ILT_UN:
+ case OP_CMOV_LLT_UN:
return CMP_LT_UN;
case CEE_BGT_UN:
case OP_CGT_UN:
case OP_FBGT_UN:
case OP_COND_EXC_GT_UN:
case OP_COND_EXC_IGT_UN:
+ case OP_CMOV_IGT_UN:
+ case OP_CMOV_LGT_UN:
return CMP_GT_UN;
default:
printf ("%s\n", mono_inst_name (opcode));
}
}
+CompRelation
+mono_negate_cond (CompRelation cond)
+{
+ switch (cond) {
+ case CMP_EQ:
+ return CMP_NE;
+ case CMP_NE:
+ return CMP_EQ;
+ case CMP_LE:
+ return CMP_GT;
+ case CMP_GE:
+ return CMP_LT;
+ case CMP_LT:
+ return CMP_GE;
+ case CMP_GT:
+ return CMP_LE;
+ case CMP_LE_UN:
+ return CMP_GT_UN;
+ case CMP_GE_UN:
+ return CMP_LT_UN;
+ case CMP_LT_UN:
+ return CMP_GE_UN;
+ case CMP_GT_UN:
+ return CMP_LE_UN;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
CompType
mono_opcode_to_type (int opcode, int cmp_opcode)
{
switch (cmp_opcode) {
case OP_ICOMPARE:
case OP_ICOMPARE_IMM:
+ case OP_LCOMPARE_IMM:
return CMP_TYPE_I;
default:
return CMP_TYPE_L;
void
mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins)
{
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
+ MonoInst *last_ins = ins->prev;
switch (ins->opcode) {
case OP_MUL_IMM:
* OP_STORE_MEMBASE_REG reg1, offset(basereg)
* OP_MOVE reg1, reg2
*/
- if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG
- || last_ins->opcode == OP_STORE_MEMBASE_REG) &&
+ if (last_ins &&
+ (((ins->opcode == OP_LOADI4_MEMBASE) && (last_ins->opcode == OP_STOREI4_MEMBASE_REG)) ||
+ ((ins->opcode == OP_LOAD_MEMBASE) && (last_ins->opcode == OP_STORE_MEMBASE_REG))) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
if (ins->dreg == last_ins->sreg1) {
MONO_DELETE_INS (bb, ins);
}
break;
+ case OP_NOP:
+ MONO_DELETE_INS (bb, ins);
+ break;
}
}
+
MonoJitExceptionInfo *ei = &ji->clauses [i];
gboolean filtered = FALSE;
-#ifdef __s390__
+#if defined(__s390__) || defined(__ia64__)
+ /*
+ * This is required in cases where a try block starts immediately after
+ * a call which causes an exception. Testcase: tests/exception8.cs.
+ * FIXME: Clean this up.
+ */
if (ei->try_start < MONO_CONTEXT_GET_IP (ctx) &&
#else
if (ei->try_start <= MONO_CONTEXT_GET_IP (ctx) &&
arg->inst_left = in;
arg->inst_call = call;
arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+
+ /* prepend, we'll need to reverse them later */
+ arg->next = call->out_args;
+ call->out_args = arg;
switch (ainfo->storage) {
case ArgInIReg:
}
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, call->stack_usage);
cfg->param_area = ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT);
DEBUG_FUNC_EXIT();
}
-#define NEW_INS(cfg,ins,dest,op) do { \
+static void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = to_insert;
+ to_insert->next = ins;
+ } else {
+ to_insert->next = ins->next;
+ ins->next = to_insert;
+ }
+}
+
+#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ insert_after_ins (bb, last_ins, (dest)); \
} while (0)
static int
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp, *temp2;
+ MonoInst *ins, *next, *temp, *last_ins = NULL;
int imm;
/* setup the virtual reg allocator */
case OP_ADD_IMM:
case OP_ADDCC_IMM:
if (!hppa_check_bits (ins->inst_imm, 11)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_SUB_IMM:
case OP_SUBCC_IMM:
if (!hppa_check_bits (ins->inst_imm, 11)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
}
else {
int tmp = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_c0;
temp->dreg = tmp;
int freg1 = mono_regstate_next_float (cfg->rs);
int freg2 = mono_regstate_next_float (cfg->rs);
- NEW_INS(cfg, ins, temp, OP_STORE_MEMBASE_REG);
+ NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
temp->sreg1 = ins->sreg1;
temp->inst_destbasereg = hppa_sp;
temp->inst_offset = -16;
- NEW_INS(cfg, temp, temp2, OP_LOADR4_MEMBASE);
- temp2->dreg = freg1;
- temp2->inst_basereg = hppa_sp;
- temp2->inst_offset = -16;
+ NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
+ temp->dreg = freg1;
+ temp->inst_basereg = hppa_sp;
+ temp->inst_offset = -16;
- NEW_INS(cfg, temp2, temp, OP_STORE_MEMBASE_REG);
+ NEW_INS(cfg, temp, OP_STORE_MEMBASE_REG);
temp->sreg1 = ins->sreg2;
temp->inst_destbasereg = hppa_sp;
temp->inst_offset = -16;
- NEW_INS(cfg, temp, temp2, OP_LOADR4_MEMBASE);
- temp2->dreg = freg2;
- temp2->inst_basereg = hppa_sp;
- temp2->inst_offset = -16;
+ NEW_INS(cfg, temp, OP_LOADR4_MEMBASE);
+ temp->dreg = freg2;
+ temp->inst_basereg = hppa_sp;
+ temp->inst_offset = -16;
- NEW_INS (cfg, temp2, temp, OP_HPPA_XMPYU);
+ NEW_INS (cfg, temp, OP_HPPA_XMPYU);
temp->dreg = freg2;
temp->sreg1 = freg1;
temp->sreg2 = freg2;
- NEW_INS(cfg, temp, temp2, OP_HPPA_STORER4_RIGHT);
- temp2->sreg1 = freg2;
- temp2->inst_destbasereg = hppa_sp;
- temp2->inst_offset = -16;
+ NEW_INS(cfg, temp, OP_HPPA_STORER4_RIGHT);
+ temp->sreg1 = freg2;
+ temp->inst_destbasereg = hppa_sp;
+ temp->inst_offset = -16;
ins->opcode = OP_LOAD_MEMBASE;
ins->inst_basereg = hppa_sp;
default:
break;
}
+ last_ins = ins;
}
+ bb->last_ins = last_ins;
bb->max_vreg = cfg->rs->next_vreg;
}
MonoCallInst *call;
guint offset;
guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
+ MonoInst *last_ins = NULL;
int max_len, cpos;
const char *spec;
break;
case OP_BR: {
guint32 target;
+ DEBUG (printf ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins));
if (ins->flags & MONO_INST_BRLABEL) {
mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
} else {
}
cpos += max_len;
+
+ last_ins = ins;
}
cfg->code_len = (guint8*)code - cfg->native_code;
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *ins;
+ MonoInst *ins = bb->code;
bb->max_offset = max_offset;
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
max_offset += 6;
- MONO_BB_FOR_EACH_INS (bb, ins) {
+ MONO_BB_FOR_EACH_INS (bb, ins)
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
MonoInst *target_label; \
target_label = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
target_label->opcode = OP_LABEL; \
- MONO_INST_LIST_ADD (&target_label->node, \
- &(targetbb)->ins_list); \
+ target_label->next = (targetbb)->code; \
target_label->inst_c0 = (targetbb)->native_offset; \
+ (targetbb)->code = target_label; \
inst = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
g_assert (op >= CEE_BEQ && op <= CEE_BLT_UN); \
inst->opcode = OP_HPPA_BEQ + (op - CEE_BEQ); \
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
- /* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
- if (cinfo->ret.storage == ArgInIReg)
- cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ if (sig->ret->byref) {
+ /* This seems to happen with ldfld wrappers */
+ cinfo->ret.storage = ArgInIReg;
+ } else {
+ add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ if (cinfo->ret.storage == ArgOnStack)
+ /* The caller passes the address where the value is stored */
+ add_general (&gr, &stack_size, &cinfo->ret);
+ if (cinfo->ret.storage == ArgInIReg)
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
break;
}
case MONO_TYPE_VOID:
cfg->arch.omit_fp = FALSE;
if (cfg->param_area)
cfg->arch.omit_fp = FALSE;
+ if ((sig->ret->type != MONO_TYPE_VOID) && (cinfo->ret.storage == ArgAggregate))
+ cfg->arch.omit_fp = FALSE;
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
*/
if (cfg->arch.omit_fp) {
+ cfg->flags |= MONO_CFG_HAS_SPILLUP;
cfg->frame_reg = IA64_SP;
offset = ARGS_OFFSET;
}
}
if (!inreg && (ainfo->storage != ArgOnStack)) {
+ guint32 size = 0;
+
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = cfg->frame_reg;
/* These arguments are saved to the stack in the prolog */
switch (ainfo->storage) {
case ArgAggregate:
if (ainfo->atype == AggregateSingleHFA)
- offset += ainfo->nslots * 4;
+ size = ainfo->nslots * 4;
else
- offset += ainfo->nslots * 8;
+ size = ainfo->nslots * 8;
break;
default:
- offset += sizeof (gpointer);
+ size = sizeof (gpointer);
break;
}
+
offset = ALIGN_TO (offset, sizeof (gpointer));
- if (cfg->arch.omit_fp)
+
+ if (cfg->arch.omit_fp) {
inst->inst_offset = offset;
- else
+ offset += size;
+ } else {
+ offset += size;
inst->inst_offset = - offset;
+ }
}
}
}
+ /*
+ * FIXME: This doesn't work because some variables are allocated during local
+ * regalloc.
+ */
+ /*
if (cfg->arch.omit_fp && offset == 16)
offset = 0;
+ */
cfg->stack_offset = offset;
}
}
}
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
+{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_NOP);
+ arg->sreg1 = tree->dreg;
+
+ switch (storage) {
+ case ArgInIReg:
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
+ break;
+ case ArgInFloatReg:
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ case ArgInFloatRegR4:
+ arg->opcode = OP_FCONV_TO_R4;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ MONO_ADD_INS (cfg->cbb, arg);
+}
+
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
{
arg->inst_imm = 16 + cinfo->sig_cookie.offset;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
/*
} else {
MonoType *arg_type;
- in = call->args [i];
MONO_INST_NEW (cfg, arg, OP_OUTARG);
+ in = call->args [i];
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
if (sig->hasthis && (i == 0))
arg_type = &mono_defaults.object_class->byval_arg;
vtaddr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- /* Trees can't be shared so make a copy */
- MONO_INST_NEW (cfg, arg, CEE_STIND_I);
- arg->cil_code = in->cil_code;
- arg->ssa_op = MONO_SSA_STORE;
- arg->inst_left = vtaddr;
- arg->inst_right = in;
- arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
-
/*
* Part of the structure is passed in registers.
*/
MONO_INST_NEW (cfg, load, load_op);
load->inst_left = load2;
- MONO_INST_NEW (cfg, set_reg, OP_OUTARG_REG);
+ if (j == 0)
+ set_reg = arg;
+ else
+ MONO_INST_NEW (cfg, set_reg, OP_OUTARG_REG);
add_outarg_reg (cfg, call, set_reg, arg_storage, dest_reg, load);
- MONO_INST_LIST_ADD_TAIL (&set_reg->node, &call->out_args);
+ if (set_reg != call->out_args) {
+ set_reg->next = call->out_args;
+ call->out_args = set_reg;
+ }
}
/*
MONO_INST_NEW (cfg, load, CEE_LDIND_I);
load->inst_left = load2;
- MONO_INST_NEW (cfg, outarg, OP_OUTARG);
+ if (j == 0)
+ outarg = arg;
+ else
+ MONO_INST_NEW (cfg, outarg, OP_OUTARG);
outarg->inst_left = load;
outarg->inst_imm = 16 + ainfo->offset + (slot - 8) * 8;
- MONO_INST_LIST_ADD_TAIL (&outarg->node, &call->out_args);
+ if (outarg != call->out_args) {
+ outarg->next = call->out_args;
+ call->out_args = outarg;
+ }
}
+
+ /* Trees can't be shared so make a copy */
+ MONO_INST_NEW (cfg, arg, CEE_STIND_I);
+ arg->cil_code = in->cil_code;
+ arg->ssa_op = MONO_SSA_STORE;
+ arg->inst_left = vtaddr;
+ arg->inst_right = in;
+ arg->type = in->type;
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
else {
MONO_INST_NEW (cfg, stack_addr, OP_REGOFFSET);
arg->opcode = OP_OUTARG_VT;
arg->inst_right = stack_addr;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
}
}
else {
add_outarg_reg (cfg, call, arg, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
break;
case ArgInFloatReg:
- case ArgInFloatRegR4:
add_outarg_reg (cfg, call, arg, ainfo->storage, ainfo->reg, in);
break;
case ArgOnStack:
default:
g_assert_not_reached ();
}
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
}
}
}
return call;
}
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmp_sig;
+
+ /* Emit the signature cookie just before the implicit arguments */
+ MonoInst *sig_arg;
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
+ g_assert (cinfo->sig_cookie.storage == ArgOnStack);
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmp_sig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + cinfo->sig_cookie.offset, sig_arg->dreg);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ int i, n, stack_size;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+
+ stack_size = 0;
+
+ mono_ia64_alloc_stacked_registers (cfg);
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
+
+ if (cinfo->ret.storage == ArgAggregate) {
+ MonoInst *vtarg;
+ MonoInst *local;
+
+ /*
+ * The valuetype is in registers after the call, need to be copied
+ * to the stack. Save the address to a local here, so the call
+ * instruction can access it.
+ */
+ local = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ local->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ret_var_addr_local = local;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = local->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ add_outarg_reg2 (cfg, call, ArgInIReg, cfg->arch.reg_out0 + cinfo->ret.reg, call->vret_var);
+ }
+
+ for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
+ ainfo = cinfo->args + i;
+
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the implicit arguments */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ in = call->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(arg_type))) {
+ guint32 align;
+ guint32 size;
+
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ if (size > 0) {
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg2 (cfg, call, ainfo->storage, cfg->arch.reg_out0 + ainfo->reg, in);
+ break;
+ case ArgInFloatReg:
+ case ArgInFloatRegR4:
+ add_outarg_reg2 (cfg, call, ainfo->storage, ainfo->reg, in);
+ break;
+ case ArgOnStack:
+ if (arg_type->type == MONO_TYPE_R4 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else if (arg_type->type == MONO_TYPE_R8 && !arg_type->byref)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, IA64_SP, 16 + ainfo->offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+ cfg->arch.n_out_regs = MAX (cfg->arch.n_out_regs, cinfo->reg_usage);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->storage == ArgAggregate) {
+ MonoInst *load, *store;
+ int i, slot;
+
+ /*
+ * Part of the structure is passed in registers.
+ */
+ for (i = 0; i < ainfo->nregs; ++i) {
+ slot = ainfo->reg + i;
+
+ if (ainfo->atype == AggregateSingleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR4_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 4;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else if (ainfo->atype == AggregateDoubleHFA) {
+ MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, ainfo->reg + i, TRUE);
+ } else {
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * 8;
+ load->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, load->dreg, cfg->arch.reg_out0 + ainfo->reg + i, FALSE);
+ }
+ MONO_ADD_INS (cfg->cbb, load);
+ }
+
+ /*
+ * Part of the structure is passed on the stack.
+ */
+ for (i = ainfo->nregs; i < ainfo->nslots; ++i) {
+ slot = ainfo->reg + i;
+
+ MONO_INST_NEW (cfg, load, OP_LOADI8_MEMBASE);
+ load->inst_basereg = src->dreg;
+ load->inst_offset = i * sizeof (gpointer);
+ load->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, load);
+
+ MONO_INST_NEW (cfg, store, OP_STOREI8_MEMBASE_REG);
+ store->sreg1 = load->dreg;
+ store->inst_destbasereg = IA64_SP;
+ store->inst_offset = 16 + ainfo->offset + (slot - 8) * 8;
+ MONO_ADD_INS (cfg->cbb, store);
+ }
+ } else {
+ mini_emit_memcpy2 (cfg, IA64_SP, 16 + ainfo->offset, src->dreg, 0, size, 0);
+ }
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ CallInfo *cinfo = get_call_info (cfg, cfg->mempool, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInFloatReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
void
mono_arch_peephole_pass_1 (MonoCompile *cfg, MonoBasicBlock *bb)
{
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
-
switch (ins->opcode) {
case OP_MOVE:
case OP_FMOVE:
}
break;
}
+
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
}
int cond_to_ia64_cmp [][3] = {
return cond_to_ia64_cmp_imm [mono_opcode_to_cond (opcode)][mono_opcode_to_type (opcode, cmp_opcode)];
}
-#define NEW_INS(cfg,ins,dest,op) do { \
+#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
+ last_ins = (dest); \
} while (0)
/*
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n, *next, *temp, *temp2, *temp3;
+ MonoInst *ins, *n, *next, *temp, *temp2, *temp3, *last_ins = NULL;
+ ins = bb->code;
if (bb->max_vreg > cfg->rs->next_vreg)
cfg->rs->next_vreg = bb->max_vreg;
if (ins->inst_offset == 0) {
temp2 = NULL;
} else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
temp2->dreg = mono_regstate_next_int (cfg->rs);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, OP_LADD);
+
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
if (ins->inst_imm == 0)
ins->sreg1 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp3, OP_I8CONST);
+ NEW_INS (cfg, temp3, OP_I8CONST);
temp3->inst_c0 = ins->inst_imm;
temp3->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg1 = temp3->dreg;
break;
}
else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_destbasereg;
temp2->inst_imm = ins->inst_offset;
temp2->dreg = mono_regstate_next_int (cfg->rs);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, OP_LADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_destbasereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
break;
}
else if (ia64_is_imm14 (ins->inst_offset)) {
- NEW_INS (cfg, ins, temp2, OP_ADD_IMM);
+ NEW_INS (cfg, temp2, OP_ADD_IMM);
temp2->sreg1 = ins->inst_basereg;
temp2->inst_imm = ins->inst_offset;
temp2->dreg = mono_regstate_next_int (cfg->rs);
}
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, OP_LADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->sreg1 = ins->inst_basereg;
temp2->sreg2 = temp->dreg;
temp2->dreg = mono_regstate_next_int (cfg->rs);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
break;
}
case OP_COMPARE_IMM:
- case OP_ICOMPARE_IMM: {
+ case OP_ICOMPARE_IMM:
+ case OP_LCOMPARE_IMM: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
gboolean imm;
CompRelation cond;
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ ins->opcode = OP_NOP;
+ break;
+ }
+
/*
* The compare_imm instructions have switched up arguments, and
* some of them take an imm between -127 and 128.
*/
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
cond = mono_opcode_to_cond (next->opcode);
if ((cond == CMP_LT) || (cond == CMP_GE))
imm = ia64_is_imm8 (ins->inst_imm - 1);
if (ins->inst_imm == 0)
ins->sreg2 = IA64_R0;
else {
- NEW_INS (cfg, ins, temp, OP_I8CONST);
+ NEW_INS (cfg, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
}
}
- switch (next->opcode) {
- case OP_LBEQ:
- case OP_LBNE_UN:
- case OP_LBLE:
- case OP_LBGT:
- case OP_LBLE_UN:
- case OP_LBGT_UN:
- case OP_LBGE:
- case OP_LBLT:
- case OP_LBGE_UN:
- case OP_LBLT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_EQ:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
- case OP_COND_EXC_NE_UN:
- case OP_COND_EXC_LT_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
case OP_FCOMPARE: {
/* Instead of compare+b<cond>, ia64 has compare<cond>+br */
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
+ next = ins->next;
+
+ /* Branch opts can eliminate the branch */
+ if (!next || (!(MONO_IS_COND_BRANCH_OP (next) || MONO_IS_COND_EXC (next) || MONO_IS_SETCC (next)))) {
+ ins->opcode = OP_NOP;
+ break;
+ }
ins->opcode = opcode_to_ia64_cmp (next->opcode, ins->opcode);
- switch (next->opcode) {
- case OP_LBEQ:
- case OP_LBNE_UN:
- case OP_LBLE:
- case OP_LBGE:
- case OP_LBLT:
- case OP_LBGT:
- case OP_LBLE_UN:
- case OP_LBGE_UN:
- case OP_LBLT_UN:
- case OP_LBGT_UN:
- case OP_IBEQ:
- case OP_IBNE_UN:
- case OP_IBLE:
- case OP_IBLT:
- case OP_IBGT:
- case OP_IBGE:
- case OP_IBLE_UN:
- case OP_IBLT_UN:
- case OP_IBGE_UN:
- case OP_IBGT_UN:
- case OP_FBEQ:
- case OP_FBNE_UN:
- case OP_FBLT:
- case OP_FBLT_UN:
- case OP_FBGT:
- case OP_FBGT_UN:
- case OP_FBGE:
- case OP_FBGE_UN:
- case OP_FBLE:
- case OP_FBLE_UN:
+
+ if (MONO_IS_COND_BRANCH_OP (next)) {
next->opcode = OP_IA64_BR_COND;
if (! (next->flags & MONO_INST_BRLABEL))
next->inst_target_bb = next->inst_true_bb;
- break;
- case OP_COND_EXC_LT:
- case OP_COND_EXC_GT:
- case OP_COND_EXC_GT_UN:
- case OP_COND_EXC_LE_UN:
+ } else if (MONO_IS_COND_EXC (next)) {
next->opcode = OP_IA64_COND_EXC;
- break;
- case OP_CEQ:
- case OP_CLT:
- case OP_CGT:
- case OP_CLT_UN:
- case OP_CGT_UN:
- case OP_ICEQ:
- case OP_ICLT:
- case OP_ICGT:
- case OP_ICLT_UN:
- case OP_ICGT_UN:
- case OP_FCEQ:
- case OP_FCLT:
- case OP_FCGT:
- case OP_FCLT_UN:
- case OP_FCGT_UN:
+ } else if (MONO_IS_SETCC (next)) {
next->opcode = OP_IA64_CSET;
- break;
- default:
+ } else {
printf ("%s\n", mono_inst_name (next->opcode));
NOT_IMPLEMENTED;
}
+
break;
}
+ case OP_FCEQ:
+ case OP_FCGT:
+ case OP_FCGT_UN:
+ case OP_FCLT:
+ case OP_FCLT_UN:
+ /* The front end removes the fcompare, so introduce it again */
+ NEW_INS (cfg, temp, opcode_to_ia64_cmp (ins->opcode, OP_FCOMPARE));
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = ins->sreg2;
+
+ ins->opcode = OP_IA64_CSET;
+ break;
case OP_MUL_IMM:
case OP_LMUL_IMM:
case OP_IMUL_IMM: {
sum_reg = 0;
for (i = 0; i < 64; ++i) {
if (ins->inst_imm & (((gint64)1) << i)) {
- NEW_INS (cfg, ins, temp, shl_op);
+ NEW_INS (cfg, temp, shl_op);
temp->dreg = mono_regstate_next_int (cfg->rs);
temp->sreg1 = ins->sreg1;
temp->inst_imm = i;
if (sum_reg == 0)
sum_reg = temp->dreg;
else {
- NEW_INS (cfg, ins, temp2, OP_LADD);
+ NEW_INS (cfg, temp2, OP_LADD);
temp2->dreg = mono_regstate_next_int (cfg->rs);
temp2->sreg1 = sum_reg;
temp2->sreg2 = temp->dreg;
break;
}
case OP_LCONV_TO_OVF_U4:
- NEW_INS (cfg, ins, temp, OP_IA64_CMP4_LT);
+ NEW_INS (cfg, temp, OP_IA64_CMP4_LT);
temp->sreg1 = ins->sreg1;
temp->sreg2 = IA64_R0;
- NEW_INS (cfg, ins, temp, OP_IA64_COND_EXC);
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
temp->inst_p1 = (char*)"OverflowException";
ins->opcode = OP_MOVE;
break;
case OP_LCONV_TO_OVF_I4_UN:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = 0x7fffffff;
temp->dreg = mono_regstate_next_int (cfg->rs);
- NEW_INS (cfg, ins, temp2, OP_IA64_CMP4_GT_UN);
+ NEW_INS (cfg, temp2, OP_IA64_CMP4_GT_UN);
temp2->sreg1 = ins->sreg1;
temp2->sreg2 = temp->dreg;
- NEW_INS (cfg, ins, temp, OP_IA64_COND_EXC);
+ NEW_INS (cfg, temp, OP_IA64_COND_EXC);
temp->inst_p1 = (char*)"OverflowException";
ins->opcode = OP_MOVE;
case OP_FCONV_TO_U2:
case OP_FCONV_TO_I1:
case OP_FCONV_TO_U1:
- NEW_INS (cfg, ins, temp, OP_FCONV_TO_I8);
+ NEW_INS (cfg, temp, OP_FCONV_TO_I8);
temp->sreg1 = ins->sreg1;
temp->dreg = ins->dreg;
default:
break;
}
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
bb->max_vreg = cfg->rs->next_vreg;
}
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
ins = cfg->args [i];
if (sig->hasthis && (i == 0))
break;
case OP_VCALL:
case OP_VCALL_REG:
- case OP_VCALL_MEMBASE: {
+ case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE: {
ArgStorage storage;
cinfo = get_call_info (cfg, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
guint offset;
Ia64CodegenState code;
guint8 *code_start = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
else
ia64_movl (code, ins->dreg, ins->inst_c0);
break;
+ case OP_JUMP_TABLE:
+ add_patch_info (cfg, code, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ ia64_movl (code, ins->dreg, 0);
+ break;
case OP_MOVE:
ia64_mov (code, ins->dreg, ins->sreg1);
break;
ins->inst_c0 = code.buf - cfg->native_code;
break;
case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
break;
case OP_BR_REG:
ia64_mov_to_br (code, IA64_B6, ins->sreg1);
ia64_br_cond_reg (code, IA64_B6);
break;
- case OP_LADD:
case OP_IADD:
+ case OP_LADD:
ia64_add (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case OP_LAND:
+ case OP_ISUB:
+ case OP_LSUB:
+ ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
+ break;
case OP_IAND:
+ case OP_LAND:
ia64_and (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_IOR:
ia64_andcm_imm (code, ins->dreg, -1, ins->sreg1);
break;
case OP_ISHL:
+ case OP_LSHL:
ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ISHR:
ia64_zxt4 (code, GP_SCRATCH_REG, ins->sreg1);
ia64_shr_u (code, ins->dreg, GP_SCRATCH_REG, ins->sreg2);
break;
- case OP_LSHL:
- ia64_shl (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_LSHR_UN:
ia64_shr_u (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case OP_LSUB:
- case OP_ISUB:
- ia64_sub (code, ins->dreg, ins->sreg1, ins->sreg2);
- break;
case OP_IADDCC:
/* p6 and p7 is set if there is signed/unsigned overflow */
break;
case OP_ADD_IMM:
case OP_IADD_IMM:
+ case OP_LADD_IMM:
ia64_adds_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IAND_IMM:
case OP_AND_IMM:
+ case OP_LAND_IMM:
ia64_and_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IOR_IMM:
+ case OP_LOR_IMM:
ia64_or_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_IXOR_IMM:
+ case OP_LXOR_IMM:
ia64_xor_imm (code, ins->dreg, ins->inst_imm, ins->sreg1);
break;
case OP_SHL_IMM:
break;
case OP_STOREI8_MEMBASE_REG:
case OP_STORE_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_st8_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
break;
case OP_LOAD_MEMBASE:
case OP_LOADI8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ld8 (code, ins->dreg, ins->inst_basereg);
break;
ia64_no_stop (code);
ia64_add1_pred (code, 6, ins->dreg, IA64_R0, IA64_R0);
break;
+ case OP_ICONV_TO_I1:
case OP_LCONV_TO_I1:
/* FIXME: Is this needed ? */
ia64_sxt1 (code, ins->dreg, ins->sreg1);
break;
+ case OP_ICONV_TO_I2:
case OP_LCONV_TO_I2:
/* FIXME: Is this needed ? */
ia64_sxt2 (code, ins->dreg, ins->sreg1);
/* FIXME: Is this needed ? */
ia64_sxt4 (code, ins->dreg, ins->sreg1);
break;
+ case OP_ICONV_TO_U1:
case OP_LCONV_TO_U1:
/* FIXME: Is this needed */
ia64_zxt1 (code, ins->dreg, ins->sreg1);
break;
+ case OP_ICONV_TO_U2:
case OP_LCONV_TO_U2:
/* FIXME: Is this needed */
ia64_zxt2 (code, ins->dreg, ins->sreg1);
/* FIXME: Is this needed */
ia64_zxt4 (code, ins->dreg, ins->sreg1);
break;
+ case OP_ICONV_TO_I8:
+ case OP_ICONV_TO_I:
case OP_LCONV_TO_I8:
case OP_LCONV_TO_I:
ia64_sxt4 (code, ins->dreg, ins->sreg1);
ia64_fmov (code, ins->dreg, ins->sreg1);
break;
case OP_STORER8_MEMBASE_REG:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_destbasereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_destbasereg);
+ }
+ ins->inst_destbasereg = GP_SCRATCH_REG;
+ }
ia64_stfd_hint (code, ins->inst_destbasereg, ins->sreg1, 0);
break;
case OP_STORER4_MEMBASE_REG:
ia64_stfs_hint (code, ins->inst_destbasereg, FP_SCRATCH_REG, 0);
break;
case OP_LOADR8_MEMBASE:
+ if (ins->inst_offset != 0) {
+ /* This is generated by local regalloc */
+ if (ia64_is_imm14 (ins->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, ins->inst_offset, ins->inst_basereg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, GP_SCRATCH_REG, ins->inst_basereg);
+ }
+ ins->inst_basereg = GP_SCRATCH_REG;
+ }
ia64_ldfd (code, ins->dreg, ins->inst_basereg);
break;
case OP_LOADR4_MEMBASE:
ia64_ldfs (code, ins->dreg, ins->inst_basereg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
break;
- case OP_LCONV_TO_R4:
case OP_ICONV_TO_R4:
+ case OP_LCONV_TO_R4:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_s_sf (code, ins->dreg, ins->dreg, 0);
break;
- case OP_LCONV_TO_R8:
case OP_ICONV_TO_R8:
+ case OP_LCONV_TO_R8:
ia64_setf_sig (code, ins->dreg, ins->sreg1);
ia64_fcvt_xf (code, ins->dreg, ins->dreg);
ia64_fnorm_d_sf (code, ins->dreg, ins->dreg, 0);
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG: {
MonoCallInst *call = (MonoCallInst*)ins;
CallInfo *cinfo;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
MonoCallInst *call = (MonoCallInst*)ins;
if (ia64_is_imm14 (ins->inst_offset))
ia64_adds_imm (code, IA64_R8, ins->inst_offset, ins->sreg1);
else {
+ printf ("A: %lx\n", ins->inst_offset);
ia64_movl (code, GP_SCRATCH_REG, ins->inst_offset);
ia64_add (code, IA64_R8, GP_SCRATCH_REG, ins->sreg1);
}
break;
}
+ case OP_LOCALLOC_IMM: {
+ gint32 abi_offset;
+
+ /* FIXME: Sigaltstack support */
+
+ gssize size = ins->inst_imm;
+ size = (size + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
+
+ if (ia64_is_adds_imm (size))
+ ia64_adds_imm (code, GP_SCRATCH_REG, size, IA64_R0);
+ else
+ ia64_movl (code, GP_SCRATCH_REG, size);
+
+ ia64_sub (code, IA64_SP, IA64_SP, GP_SCRATCH_REG);
+ ia64_mov (code, ins->dreg, IA64_SP);
+
+ /* An area at sp is reserved by the ABI for parameter passing */
+ abi_offset = - ALIGN_TO (cfg->param_area + 16, MONO_ARCH_FRAME_ALIGNMENT);
+ if (ia64_is_adds_imm (abi_offset))
+ ia64_adds_imm (code, IA64_SP, abi_offset, IA64_SP);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, abi_offset);
+ ia64_add (code, IA64_SP, IA64_SP, GP_SCRATCH_REG2);
+ }
+
+ if (ins->flags & MONO_INST_INIT) {
+ /* Upper limit */
+ ia64_add (code, GP_SCRATCH_REG2, ins->dreg, GP_SCRATCH_REG);
+
+ ia64_codegen_set_one_ins_per_bundle (code, TRUE);
+
+ /* Init loop */
+ ia64_st8_inc_imm_hint (code, ins->dreg, IA64_R0, 8, 0);
+ ia64_cmp_lt (code, 8, 9, ins->dreg, GP_SCRATCH_REG2);
+ ia64_br_cond_pred (code, 8, -2);
+
+ ia64_codegen_set_one_ins_per_bundle (code, FALSE);
+
+ ia64_sub (code, ins->dreg, GP_SCRATCH_REG2, GP_SCRATCH_REG);
+ }
+
+ break;
+ }
case OP_TLS_GET:
ia64_adds_imm (code, ins->dreg, ins->inst_offset, IA64_TP);
ia64_ld8 (code, ins->dreg, ins->dreg);
/* Signal to endfilter that we are called by call_filter */
ia64_mov (code, GP_SCRATCH_REG, IA64_R0);
- /* Save the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ /* Branch target: */
+ if (ia64_is_imm14 (spvar->inst_offset))
+ ia64_adds_imm (code, GP_SCRATCH_REG2, spvar->inst_offset, cfg->frame_reg);
+ else {
+ ia64_movl (code, GP_SCRATCH_REG2, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG2, cfg->frame_reg, GP_SCRATCH_REG2);
+ }
+
+ /* Save the return address */
ia64_st8_hint (code, GP_SCRATCH_REG2, GP_SCRATCH_REG, 0);
ia64_codegen_set_one_ins_per_bundle (code, FALSE);
MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
/* Load the return address */
- ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ if (ia64_is_imm14 (spvar->inst_offset)) {
+ ia64_adds_imm (code, GP_SCRATCH_REG, spvar->inst_offset, cfg->frame_reg);
+ } else {
+ ia64_movl (code, GP_SCRATCH_REG, spvar->inst_offset);
+ ia64_add (code, GP_SCRATCH_REG, cfg->frame_reg, GP_SCRATCH_REG);
+ }
ia64_ld8_hint (code, GP_SCRATCH_REG, GP_SCRATCH_REG, 0);
/* Test caller */
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
inst = cfg->args [i];
if (sig->hasthis && (i == 0))
return ins;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+
+ if (cmethod->klass->image == mono_defaults.corlib &&
+ (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+ (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+
+ /*
+ * We don't use the generic version in mini_emit_inst_for_method () since we
+ * ia64 has atomic_add_imm opcodes.
+ */
+ if (strcmp (cmethod->name, "Increment") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_imm = 1;
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (strcmp (cmethod->name, "Decrement") == 0) {
+ guint32 opcode;
+
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_preg (cfg);
+ ins->inst_imm = -1;
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (strcmp (cmethod->name, "Add") == 0) {
+ guint32 opcode;
+ gboolean is_imm = FALSE;
+ gint64 imm = 0;
+
+ if ((args [1]->opcode == OP_ICONST) || (args [1]->opcode == OP_I8CONST)) {
+ imm = (args [1]->opcode == OP_ICONST) ? args [1]->inst_c0 : args [1]->inst_l;
+
+ is_imm = (imm == 1 || imm == 4 || imm == 8 || imm == 16 || imm == -1 || imm == -4 || imm == -8 || imm == -16);
+ }
+
+ if (is_imm) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_IMM_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->inst_imm = imm;
+ ins->type = (opcode == OP_ATOMIC_ADD_IMM_NEW_I4) ? STACK_I4 : STACK_I8;
+ } else {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_ATOMIC_ADD_NEW_I4;
+ else if (fsig->params [0]->type == MONO_TYPE_I8)
+ opcode = OP_ATOMIC_ADD_NEW_I8;
+ else
+ g_assert_not_reached ();
+
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->inst_basereg = args [0]->dreg;
+ ins->inst_offset = 0;
+ ins->sreg2 = args [1]->dreg;
+ ins->type = (opcode == OP_ATOMIC_ADD_NEW_I4) ? STACK_I4 : STACK_I8;
+ }
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ }
+
+ return ins;
+}
+
gboolean
mono_arch_print_tree (MonoInst *tree, int arity)
{
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->inst_imm = cinfo->sig_cookie.offset;
arg->inst_left = sig_arg;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
if (is_virtual && i == 0) {
/* the argument will be attached to the call instrucion */
arg->inst_left = in;
arg->inst_call = call;
arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, we'll need to reverse them later */
+ arg->next = call->out_args;
+ call->out_args = arg;
if (ainfo->regtype == RegTypeGeneral) {
arg->backend.reg3 = ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
}
}
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
cfg->param_area = MAX (cfg->param_area, 16); /* a0-a3 always present */
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
}
break;
}
+ last_ins = ins;
+ ins = ins->next;
+ }
+ bb->last_ins = last_ins;
+}
+
+static inline InstList*
+inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
+{
+ InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
+ item->data = data;
+ item->prev = NULL;
+ item->next = list;
+ if (list)
+ list->prev = item;
+ return item;
+}
+
+static void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = to_insert;
+ to_insert->next = ins;
+ } else {
+ to_insert->next = ins->next;
+ ins->next = to_insert;
}
}
-#define NEW_INS(cfg,ins,dest,op) do { \
+#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ insert_after_ins (bb, last_ins, (dest)); \
} while (0)
static int
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp;
+ MonoInst *ins, *next, *temp, *last_ins = NULL;
int imm;
/* setup the virtual reg allocator */
case OP_ADD_IMM:
case OP_ADDCC_IMM:
if (!mips_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
#if 0
case OP_SUB_IMM:
if (!mips_is_imm16 (-ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_OR_IMM:
case OP_XOR_IMM:
if ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_SBB_IMM:
case OP_SUBCC_IMM:
case OP_ADC_IMM:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
#endif
#if 0
case OP_COMPARE_IMM:
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- g_assert(next);
- if (compare_opcode_is_unsigned (next->opcode)) {
+ if (compare_opcode_is_unsigned (ins->next->opcode)) {
if (!ppc_is_uimm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
}
} else {
if (!ppc_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
break;
}
if (!ppc_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
*/
if (ppc_is_imm16 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
}
#if 0
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
+ last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
#endif
break;
}
+ last_ins = ins;
}
+ bb->last_ins = last_ins;
bb->max_vreg = cfg->rs->next_vreg;
}
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
int ins_cnt = 0;
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
MonoInst *target_label; \
target_label = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
target_label->opcode = OP_LABEL; \
- MONO_INST_LIST_ADD (&target_label->node, \
- &(targetbb)->ins_list); \
+ target_label->next = (targetbb)->code; \
target_label->inst_c0 = (targetbb)->native_offset; \
+ (targetbb)->code = target_label; \
inst = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
inst->opcode = op; \
(inst)->sreg1 = sr1; \
MonoInst *target_label; \
target_label = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
target_label->opcode = OP_LABEL; \
- MONO_INST_LIST_ADD (&target_label->node, \
- &(targetbb)->ins_list); \
+ target_label->next = (targetbb)->code; \
target_label->inst_c0 = (targetbb)->native_offset; \
+ (targetbb)->code = target_label; \
inst = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
inst->opcode = op; \
(inst)->sreg1 = sr1; \
-MINI_OP(OP_LOAD, "load")
-MINI_OP(OP_LDADDR, "ldaddr")
-MINI_OP(OP_STORE, "store")
-MINI_OP(OP_OBJADDR, "objaddr")
-MINI_OP(OP_VTADDR, "vtaddr")
-MINI_OP(OP_PHI, "phi")
-MINI_OP(OP_RENAME, "rename")
-MINI_OP(OP_COMPARE, "compare")
-MINI_OP(OP_COMPARE_IMM, "compare_imm")
-MINI_OP(OP_FCOMPARE, "fcompare")
-MINI_OP(OP_LCOMPARE, "lcompare")
-MINI_OP(OP_ICOMPARE, "icompare")
-MINI_OP(OP_ICOMPARE_IMM, "icompare_imm")
-MINI_OP(OP_LCOMPARE_IMM, "lcompare_imm")
-MINI_OP(OP_LOCAL, "local")
-MINI_OP(OP_ARG, "arg")
-MINI_OP(OP_ARGLIST, "oparglist")
-MINI_OP(OP_OUTARG, "outarg")
-MINI_OP(OP_OUTARG_REG, "outarg_reg")
-MINI_OP(OP_OUTARG_FREG, "outarg_freg")
-MINI_OP(OP_OUTARG_FREG_R4, "outarg_freg_r4")
-MINI_OP(OP_OUTARG_IMM, "outarg_imm")
-MINI_OP(OP_OUTARG_R4, "outarg_r4")
-MINI_OP(OP_OUTARG_R8, "outarg_r8")
-MINI_OP(OP_OUTARG_VT, "outarg_vt")
-MINI_OP(OP_OUTARG_MEMBASE, "outarg_membase")
-MINI_OP(OP_INARG_VT, "inarg_vt")
-MINI_OP(OP_RETARG, "retarg")
-MINI_OP(OP_SETRET, "setret")
-MINI_OP(OP_SETFRET, "setfret")
-MINI_OP(OP_SETLRET, "setlret")
-MINI_OP(OP_LOCALLOC, "localloc")
-MINI_OP(OP_LOCALLOC_IMM, "localloc_imm")
-MINI_OP(OP_CHECK_THIS, "checkthis")
-MINI_OP(OP_CHECK_THIS_PASSTHROUGH, "checkthis_passthrough")
-MINI_OP(OP_VOIDCALL, "voidcall")
-MINI_OP(OP_VOIDCALLVIRT, "voidcallvirt")
-MINI_OP(OP_VOIDCALL_REG, "voidcall_reg")
-MINI_OP(OP_VOIDCALL_MEMBASE, "voidcall_membase")
-MINI_OP(OP_TRAMPCALL_VTABLE, "trampcall_vtable")
-MINI_OP(OP_VOIDCALL_RGCTX, "voidcall_rgctx")
-MINI_OP(OP_LCALL_RGCTX, "lcall_rgctx")
-MINI_OP(OP_VCALL_RGCTX, "vcall_rgctx")
-MINI_OP(OP_CALL_RGCTX, "call_rgctx")
-MINI_OP(OP_FCALL_RGCTX, "fcall_rgctx")
-MINI_OP(OP_VOIDCALL_REG_RGCTX, "voidcall_reg_rgctx")
-MINI_OP(OP_LCALL_REG_RGCTX, "lcall_reg_rgctx")
-MINI_OP(OP_VCALL_REG_RGCTX, "vcall_reg_rgctx")
-MINI_OP(OP_CALL_REG_RGCTX, "call_reg_rgctx")
-MINI_OP(OP_VOIDCALLVIRT_IMT, "voidcallvirt_imt")
-MINI_OP(OP_CALLVIRT_IMT, "callvirt_imt")
-MINI_OP(OP_FCALL_REG_RGCTX, "fcall_reg_rgctx")
-MINI_OP(OP_FCALL, "fcall")
-MINI_OP(OP_FCALLVIRT, "fcallvirt")
-MINI_OP(OP_FCALL_REG, "fcall_reg")
-MINI_OP(OP_FCALL_MEMBASE, "fcall_membase")
-MINI_OP(OP_FCALLVIRT_IMT, "fcallvirt_imt")
-MINI_OP(OP_LCALL, "lcall")
-MINI_OP(OP_LCALLVIRT, "lcallvirt")
-MINI_OP(OP_LCALL_REG, "lcall_reg")
-MINI_OP(OP_LCALL_MEMBASE, "lcall_membase")
-MINI_OP(OP_LCALLVIRT_IMT, "lcallvirt_imt")
-MINI_OP(OP_VCALL, "vcall")
-MINI_OP(OP_VCALLVIRT, "vcallvirt")
-MINI_OP(OP_VCALL_REG, "vcall_reg")
-MINI_OP(OP_VCALL_MEMBASE, "vcall_membase")
-MINI_OP(OP_VCALLVIRT_IMT, "vcallvirt_imt")
-MINI_OP(OP_CALL, "call")
-MINI_OP(OP_CALLVIRT, "callvirt")
-MINI_OP(OP_CALL_REG, "call_reg")
-MINI_OP(OP_CALL_MEMBASE, "call_membase")
-MINI_OP(OP_VTARG_ADDR, "vtarg_addr")
-MINI_OP(OP_TRAP, "trap")
-MINI_OP(OP_ICONST, "iconst")
-MINI_OP(OP_I8CONST, "i8const")
-MINI_OP(OP_R4CONST, "r4const")
-MINI_OP(OP_R8CONST, "r8const")
-MINI_OP(OP_REGVAR, "regvar")
-MINI_OP(OP_REG, "reg")
-MINI_OP(OP_REGOFFSET, "regoffset")
-MINI_OP(OP_LABEL, "label")
-MINI_OP(OP_SWITCH, "switch")
-MINI_OP(OP_CHECK_ARRAY_TYPE, "check_array_type")
-MINI_OP(OP_ARRAY_RANK, "array_rank")
-MINI_OP(OP_THROW, "throw")
-MINI_OP(OP_THROW_OR_NULL, "throw_or_null")
-MINI_OP(OP_RETHROW, "rethrow")
-MINI_OP(OP_REFANYTYPE, "refanytype")
-MINI_OP(OP_NEWOBJ, "newobj")
-
-MINI_OP(OP_STORE_MEMBASE_IMM,"store_membase_imm")
-MINI_OP(OP_STORE_MEMBASE_REG,"store_membase_reg")
-MINI_OP(OP_STOREI1_MEMBASE_IMM, "storei1_membase_imm")
-MINI_OP(OP_STOREI1_MEMBASE_REG, "storei1_membase_reg")
-MINI_OP(OP_STOREI2_MEMBASE_IMM, "storei2_membase_imm")
-MINI_OP(OP_STOREI2_MEMBASE_REG, "storei2_membase_reg")
-MINI_OP(OP_STOREI4_MEMBASE_IMM, "storei4_membase_imm")
-MINI_OP(OP_STOREI4_MEMBASE_REG, "storei4_membase_reg")
-MINI_OP(OP_STOREI8_MEMBASE_IMM, "storei8_membase_imm")
-MINI_OP(OP_STOREI8_MEMBASE_REG, "storei8_membase_reg")
-MINI_OP(OP_STORER4_MEMBASE_REG, "storer4_membase_reg")
-MINI_OP(OP_STORER8_MEMBASE_REG, "storer8_membase_reg")
-MINI_OP(OP_LOAD_MEMBASE, "load_membase")
-MINI_OP(OP_LOADI1_MEMBASE,"loadi1_membase")
-MINI_OP(OP_LOADU1_MEMBASE,"loadu1_membase")
-MINI_OP(OP_LOADI2_MEMBASE,"loadi2_membase")
-MINI_OP(OP_LOADU2_MEMBASE,"loadu2_membase")
-MINI_OP(OP_LOADI4_MEMBASE,"loadi4_membase")
-MINI_OP(OP_LOADU4_MEMBASE,"loadu4_membase")
-MINI_OP(OP_LOADI8_MEMBASE,"loadi8_membase")
-MINI_OP(OP_LOADR4_MEMBASE,"loadr4_membase")
-MINI_OP(OP_LOADR8_MEMBASE,"loadr8_membase")
+MINI_OP(OP_LOAD, "load", NONE, NONE, NONE)
+MINI_OP(OP_LDADDR, "ldaddr", IREG, NONE, NONE)
+MINI_OP(OP_STORE, "store", NONE, NONE, NONE)
+MINI_OP(OP_OBJADDR, "objaddr", NONE, NONE, NONE)
+MINI_OP(OP_VTADDR, "vtaddr", NONE, NONE, NONE)
+MINI_OP(OP_NOP, "nop", NONE, NONE, NONE)
+MINI_OP(OP_PHI, "phi", IREG, NONE, NONE)
+MINI_OP(OP_FPHI, "fphi", FREG, NONE, NONE)
+MINI_OP(OP_VPHI, "vphi", VREG, NONE, NONE)
+MINI_OP(OP_RENAME, "rename", NONE, NONE, NONE)
+MINI_OP(OP_COMPARE, "compare", NONE, IREG, IREG)
+MINI_OP(OP_COMPARE_IMM, "compare_imm", NONE, IREG, NONE)
+MINI_OP(OP_FCOMPARE, "fcompare", NONE, FREG, FREG)
+MINI_OP(OP_LCOMPARE, "lcompare", NONE, LREG, LREG)
+MINI_OP(OP_ICOMPARE, "icompare", NONE, IREG, IREG)
+MINI_OP(OP_ICOMPARE_IMM, "icompare_imm", NONE, IREG, NONE)
+MINI_OP(OP_LCOMPARE_IMM, "lcompare_imm", NONE, LREG, NONE)
+MINI_OP(OP_LOCAL, "local", NONE, NONE, NONE)
+MINI_OP(OP_ARG, "arg", NONE, NONE, NONE)
+MINI_OP(OP_ARGLIST, "oparglist", NONE, IREG, NONE)
+MINI_OP(OP_OUTARG, "outarg", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_REG, "outarg_reg", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_FREG, "outarg_freg", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_FREG_R4, "outarg_freg_r4", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_IMM, "outarg_imm", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_R4, "outarg_r4", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_R8, "outarg_r8", NONE, NONE, NONE)
+MINI_OP(OP_OUTARG_VT, "outarg_vt", NONE, VREG, NONE)
+MINI_OP(OP_OUTARG_VTRETADDR, "outarg_vtretaddr", IREG, NONE, NONE)
+MINI_OP(OP_OUTARG_MEMBASE, "outarg_membase", NONE, NONE, NONE)
+MINI_OP(OP_INARG_VT, "inarg_vt", NONE, NONE, NONE)
+MINI_OP(OP_RETARG, "retarg", NONE, NONE, NONE)
+MINI_OP(OP_SETRET, "setret", NONE, NONE, NONE)
+MINI_OP(OP_SETFRET, "setfret", FREG, FREG, NONE)
+MINI_OP(OP_SETLRET, "setlret", NONE, IREG, IREG)
+MINI_OP(OP_LOCALLOC, "localloc", IREG, IREG, NONE)
+MINI_OP(OP_LOCALLOC_IMM, "localloc_imm", IREG, NONE, NONE)
+MINI_OP(OP_CHECK_THIS, "checkthis", NONE, IREG, NONE)
+MINI_OP(OP_CHECK_THIS_PASSTHROUGH, "checkthis_passthrough", NONE, NONE, NONE)
+MINI_OP(OP_VOIDCALL, "voidcall", NONE, NONE, NONE)
+MINI_OP(OP_VOIDCALLVIRT, "voidcallvirt", NONE, NONE, NONE)
+MINI_OP(OP_VOIDCALL_REG, "voidcall_reg", NONE, IREG, NONE)
+MINI_OP(OP_VOIDCALL_MEMBASE, "voidcall_membase", NONE, IREG, NONE)
+MINI_OP(OP_FCALL, "fcall", FREG, NONE, NONE)
+MINI_OP(OP_FCALLVIRT, "fcallvirt", FREG, NONE, NONE)
+MINI_OP(OP_FCALL_REG, "fcall_reg", FREG, IREG, NONE)
+MINI_OP(OP_FCALL_MEMBASE, "fcall_membase", FREG, IREG, NONE)
+MINI_OP(OP_LCALL, "lcall", LREG, NONE, NONE)
+MINI_OP(OP_LCALLVIRT, "lcallvirt", LREG, NONE, NONE)
+MINI_OP(OP_LCALL_REG, "lcall_reg", LREG, IREG, NONE)
+MINI_OP(OP_LCALL_MEMBASE, "lcall_membase", LREG, IREG, NONE)
+MINI_OP(OP_VCALL, "vcall", VREG, NONE, NONE)
+MINI_OP(OP_VCALLVIRT, "vcallvirt", VREG, NONE, NONE)
+MINI_OP(OP_VCALL_REG, "vcall_reg", VREG, IREG, NONE)
+MINI_OP(OP_VCALL_MEMBASE, "vcall_membase", VREG, IREG, NONE)
+MINI_OP(OP_VOIDCALL_RGCTX, "voidcall_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_LCALL_RGCTX, "lcall_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_VCALL_RGCTX, "vcall_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_CALL_RGCTX, "call_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_FCALL_RGCTX, "fcall_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_VOIDCALL_REG_RGCTX, "voidcall_reg_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_LCALL_REG_RGCTX, "lcall_reg_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_VCALL_REG_RGCTX, "vcall_reg_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_CALL_REG_RGCTX, "call_reg_rgctx", NONE, NONE, NONE)
+MINI_OP(OP_VOIDCALLVIRT_IMT, "voidcallvirt_imt", NONE, NONE, NONE)
+MINI_OP(OP_CALLVIRT_IMT, "callvirt_imt", NONE, NONE, NONE)
+MINI_OP(OP_FCALLVIRT_IMT, "fcallvirt_imt", NONE, NONE, NONE)
+MINI_OP(OP_LCALLVIRT_IMT, "lcallvirt_imt", NONE, NONE, NONE)
+MINI_OP(OP_VCALLVIRT_IMT, "vcallvirt_imt", NONE, NONE, NONE)
+MINI_OP(OP_FCALL_REG_RGCTX, "fcall_reg_rgctx", NONE, NONE, NONE)
+
+MINI_OP(OP_TRAMPCALL_VTABLE, "trampcall_vtable", NONE, NONE, NONE)
+
+/* Represents the decomposed vcall which doesn't return a vtype no more */
+MINI_OP(OP_VCALL2, "vcall2", NONE, NONE, NONE)
+MINI_OP(OP_VCALL2_REG, "vcall2_reg", NONE, IREG, NONE)
+MINI_OP(OP_VCALL2_MEMBASE, "vcall2_membase", NONE, IREG, NONE)
+
+MINI_OP(OP_CALL, "call", IREG, NONE, NONE)
+MINI_OP(OP_CALL_REG, "call_reg", IREG, IREG, NONE)
+MINI_OP(OP_CALL_MEMBASE, "call_membase", IREG, IREG, NONE)
+MINI_OP(OP_CALLVIRT, "callvirt", IREG, NONE, NONE)
+MINI_OP(OP_TRAP, "trap", NONE, NONE, NONE)
+MINI_OP(OP_ICONST, "iconst", IREG, NONE, NONE)
+MINI_OP(OP_I8CONST, "i8const", LREG, NONE, NONE)
+MINI_OP(OP_R4CONST, "r4const", FREG, NONE, NONE)
+MINI_OP(OP_R8CONST, "r8const", FREG, NONE, NONE)
+MINI_OP(OP_REGVAR, "regvar", NONE, NONE, NONE)
+MINI_OP(OP_REG, "reg", NONE, NONE, NONE)
+MINI_OP(OP_REGOFFSET, "regoffset", NONE, NONE, NONE)
+MINI_OP(OP_VTARG_ADDR, "vtarg_addr", NONE, NONE, NONE)
+MINI_OP(OP_LABEL, "label", NONE, NONE, NONE)
+MINI_OP(OP_SWITCH, "switch", NONE, IREG, NONE)
+MINI_OP(OP_CHECK_ARRAY_TYPE, "check_array_type", NONE, NONE, NONE)
+MINI_OP(OP_ARRAY_RANK, "array_rank", NONE, NONE, NONE)
+MINI_OP(OP_THROW, "throw", NONE, IREG, NONE)
+MINI_OP(OP_THROW_OR_NULL, "throw_or_null", NONE, NONE, NONE)
+MINI_OP(OP_RETHROW, "rethrow", NONE, IREG, NONE)
+MINI_OP(OP_REFANYTYPE, "refanytype", NONE, NONE, NONE)
+
+/* MONO_IS_STORE_MEMBASE depends on the order here */
+MINI_OP(OP_STORE_MEMBASE_REG,"store_membase_reg", IREG, IREG, NONE)
+MINI_OP(OP_STOREI1_MEMBASE_REG, "storei1_membase_reg", IREG, IREG, NONE)
+MINI_OP(OP_STOREI2_MEMBASE_REG, "storei2_membase_reg", IREG, IREG, NONE)
+MINI_OP(OP_STOREI4_MEMBASE_REG, "storei4_membase_reg", IREG, IREG, NONE)
+MINI_OP(OP_STOREI8_MEMBASE_REG, "storei8_membase_reg", IREG, IREG, NONE)
+MINI_OP(OP_STORER4_MEMBASE_REG, "storer4_membase_reg", IREG, FREG, NONE)
+MINI_OP(OP_STORER8_MEMBASE_REG, "storer8_membase_reg", IREG, FREG, NONE)
+
+MINI_OP(OP_STORE_MEMBASE_IMM,"store_membase_imm", IREG, NONE, NONE)
+MINI_OP(OP_STOREI1_MEMBASE_IMM, "storei1_membase_imm", IREG, NONE, NONE)
+MINI_OP(OP_STOREI2_MEMBASE_IMM, "storei2_membase_imm", IREG, NONE, NONE)
+MINI_OP(OP_STOREI4_MEMBASE_IMM, "storei4_membase_imm", IREG, NONE, NONE)
+MINI_OP(OP_STOREI8_MEMBASE_IMM, "storei8_membase_imm", IREG, NONE, NONE)
+MINI_OP(OP_STOREV_MEMBASE, "storev_membase", IREG, VREG, NONE)
+
+/* MONO_IS_LOAD_MEMBASE depends on the order here */
+MINI_OP(OP_LOAD_MEMBASE, "load_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADI1_MEMBASE,"loadi1_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADU1_MEMBASE,"loadu1_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADI2_MEMBASE,"loadi2_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADU2_MEMBASE,"loadu2_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADI4_MEMBASE,"loadi4_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADU4_MEMBASE,"loadu4_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADI8_MEMBASE,"loadi8_membase", IREG, IREG, NONE)
+MINI_OP(OP_LOADR4_MEMBASE,"loadr4_membase", FREG, IREG, NONE)
+MINI_OP(OP_LOADR8_MEMBASE,"loadr8_membase", FREG, IREG, NONE)
+MINI_OP(OP_LOADV_MEMBASE, "loadv_membase", VREG, IREG, NONE)
+
/* indexed loads: dreg = load at (sreg1 + sreg2)*/
-MINI_OP(OP_LOAD_MEMINDEX, "load_memindex")
-MINI_OP(OP_LOADI1_MEMINDEX,"loadi1_memindex")
-MINI_OP(OP_LOADU1_MEMINDEX,"loadu1_memindex")
-MINI_OP(OP_LOADI2_MEMINDEX,"loadi2_memindex")
-MINI_OP(OP_LOADU2_MEMINDEX,"loadu2_memindex")
-MINI_OP(OP_LOADI4_MEMINDEX,"loadi4_memindex")
-MINI_OP(OP_LOADU4_MEMINDEX,"loadu4_memindex")
-MINI_OP(OP_LOADI8_MEMINDEX,"loadi8_memindex")
-MINI_OP(OP_LOADR4_MEMINDEX,"loadr4_memindex")
-MINI_OP(OP_LOADR8_MEMINDEX,"loadr8_memindex")
+MINI_OP(OP_LOAD_MEMINDEX, "load_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADI1_MEMINDEX,"loadi1_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADU1_MEMINDEX,"loadu1_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADI2_MEMINDEX,"loadi2_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADU2_MEMINDEX,"loadu2_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADI4_MEMINDEX,"loadi4_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADU4_MEMINDEX,"loadu4_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADI8_MEMINDEX,"loadi8_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADR4_MEMINDEX,"loadr4_memindex", NONE, NONE, NONE)
+MINI_OP(OP_LOADR8_MEMINDEX,"loadr8_memindex", NONE, NONE, NONE)
/* indexed stores: store sreg1 at (destbasereg + sreg2) */
-MINI_OP(OP_STORE_MEMINDEX,"store_memindex")
-MINI_OP(OP_STOREI1_MEMINDEX,"storei1_memindex")
-MINI_OP(OP_STOREI2_MEMINDEX,"storei2_memindex")
-MINI_OP(OP_STOREI4_MEMINDEX,"storei4_memindex")
-MINI_OP(OP_STOREI8_MEMINDEX,"storei8_memindex")
-MINI_OP(OP_STORER4_MEMINDEX,"storer4_memindex")
-MINI_OP(OP_STORER8_MEMINDEX,"storer8_memindex")
-
-/* Loads from an absolute address */
-MINI_OP(OP_LOAD_MEM, "load_mem")
-MINI_OP(OP_LOADI8_MEM, "loadi8_mem")
-MINI_OP(OP_LOADI4_MEM, "loadi4_mem")
-MINI_OP(OP_LOADU1_MEM, "loadu1_mem")
-MINI_OP(OP_LOADU2_MEM, "loadu2_mem")
-MINI_OP(OP_STORE_MEM_IMM, "store_mem_imm")
-
-MINI_OP(OP_LOADR8_SPILL_MEMBASE,"loadr8_spill_membase")
-MINI_OP(OP_LOADU4_MEM,"loadu4_mem")
-MINI_OP(OP_MOVE, "move")
-MINI_OP(OP_FMOVE, "fmove")
-
-MINI_OP(OP_ADD_IMM, "add_imm")
-MINI_OP(OP_SUB_IMM, "sub_imm")
-MINI_OP(OP_MUL_IMM, "mul_imm")
-MINI_OP(OP_DIV_IMM, "div_imm")
-MINI_OP(OP_DIV_UN_IMM, "div_un_imm")
-MINI_OP(OP_REM_IMM, "rem_imm")
-MINI_OP(OP_REM_UN_IMM, "rem_un_imm")
-MINI_OP(OP_AND_IMM, "and_imm")
-MINI_OP(OP_OR_IMM, "or_imm")
-MINI_OP(OP_XOR_IMM, "xor_imm")
-MINI_OP(OP_SHL_IMM, "shl_imm")
-MINI_OP(OP_SHR_IMM, "shr_imm")
-MINI_OP(OP_SHR_UN_IMM, "shr_un_imm")
-
-MINI_OP(OP_NOP, "nop")
-MINI_OP(OP_BR, "br")
-MINI_OP(OP_JMP, "jmp")
-MINI_OP(OP_BREAK, "break")
-MINI_OP(OP_CKFINITE, "ckfinite")
-
-/* Must be in the same order as the matching CEE_ branch opcodes */
-MINI_OP(OP_CEQ, "ceq")
-MINI_OP(OP_CGT, "cgt")
-MINI_OP(OP_CGT_UN,"cgt.un")
-MINI_OP(OP_CLT, "clt")
-MINI_OP(OP_CLT_UN,"clt.un")
+/* MONO_IS_STORE_MEMINDEX depends on the order here */
+MINI_OP(OP_STORE_MEMINDEX,"store_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STOREI1_MEMINDEX,"storei1_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STOREI2_MEMINDEX,"storei2_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STOREI4_MEMINDEX,"storei4_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STOREI8_MEMINDEX,"storei8_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STORER4_MEMINDEX,"storer4_memindex", NONE, NONE, NONE)
+MINI_OP(OP_STORER8_MEMINDEX,"storer8_memindex", NONE, NONE, NONE)
+
+MINI_OP(OP_LOADR8_SPILL_MEMBASE,"loadr8_spill_membase", NONE, NONE, NONE)
+MINI_OP(OP_LOAD_MEM,"load_mem", IREG, NONE, NONE)
+MINI_OP(OP_LOADU1_MEM,"loadu1_mem", IREG, NONE, NONE)
+MINI_OP(OP_LOADU2_MEM,"loadu2_mem", IREG, NONE, NONE)
+MINI_OP(OP_LOADI4_MEM,"loadi4_mem", IREG, NONE, NONE)
+MINI_OP(OP_LOADU4_MEM,"loadu4_mem", IREG, NONE, NONE)
+MINI_OP(OP_LOADI8_MEM,"loadi8_mem", IREG, NONE, NONE)
+MINI_OP(OP_STORE_MEM_IMM, "store_mem_imm", NONE, NONE, NONE)
+
+MINI_OP(OP_MOVE, "move", IREG, IREG, NONE)
+MINI_OP(OP_LMOVE, "lmove", IREG, IREG, NONE)
+MINI_OP(OP_FMOVE, "fmove", FREG, FREG, NONE)
+MINI_OP(OP_VMOVE, "vmove", VREG, VREG, NONE)
+
+MINI_OP(OP_VZERO, "vzero", VREG, NONE, NONE)
+
+MINI_OP(OP_ADD_IMM, "add_imm", IREG, IREG, NONE)
+MINI_OP(OP_SUB_IMM, "sub_imm", IREG, IREG, NONE)
+MINI_OP(OP_MUL_IMM, "mul_imm", IREG, IREG, NONE)
+MINI_OP(OP_DIV_IMM, "div_imm", IREG, IREG, NONE)
+MINI_OP(OP_DIV_UN_IMM, "div_un_imm", IREG, IREG, NONE)
+MINI_OP(OP_REM_IMM, "rem_imm", IREG, IREG, NONE)
+MINI_OP(OP_REM_UN_IMM, "rem_un_imm", IREG, IREG, NONE)
+MINI_OP(OP_AND_IMM, "and_imm", IREG, IREG, NONE)
+MINI_OP(OP_OR_IMM, "or_imm", IREG, IREG, NONE)
+MINI_OP(OP_XOR_IMM, "xor_imm", IREG, IREG, NONE)
+MINI_OP(OP_SHL_IMM, "shl_imm", IREG, IREG, NONE)
+MINI_OP(OP_SHR_IMM, "shr_imm", IREG, IREG, NONE)
+MINI_OP(OP_SHR_UN_IMM, "shr_un_imm", IREG, IREG, NONE)
+
+MINI_OP(OP_BR, "br", NONE, NONE, NONE)
+MINI_OP(OP_JMP, "jmp", NONE, NONE, NONE)
+/* Same as OP_JMP, but the passing of arguments is done similarly to calls */
+MINI_OP(OP_TAILCALL, "tailcall", NONE, NONE, NONE)
+MINI_OP(OP_BREAK, "break", NONE, NONE, NONE)
+
+MINI_OP(OP_CEQ, "ceq", IREG, NONE, NONE)
+MINI_OP(OP_CGT, "cgt", IREG, NONE, NONE)
+MINI_OP(OP_CGT_UN,"cgt.un", IREG, NONE, NONE)
+MINI_OP(OP_CLT, "clt", IREG, NONE, NONE)
+MINI_OP(OP_CLT_UN,"clt.un", IREG, NONE, NONE)
/* exceptions: must be in the same order as the matching CEE_ branch opcodes */
-MINI_OP(OP_COND_EXC_EQ, "cond_exc_eq")
-MINI_OP(OP_COND_EXC_GE, "cond_exc_ge")
-MINI_OP(OP_COND_EXC_GT, "cond_exc_gt")
-MINI_OP(OP_COND_EXC_LE, "cond_exc_le")
-MINI_OP(OP_COND_EXC_LT, "cond_exc_lt")
-MINI_OP(OP_COND_EXC_NE_UN, "cond_exc_ne_un")
-MINI_OP(OP_COND_EXC_GE_UN, "cond_exc_ge_un")
-MINI_OP(OP_COND_EXC_GT_UN, "cond_exc_gt_un")
-MINI_OP(OP_COND_EXC_LE_UN, "cond_exc_le_un")
-MINI_OP(OP_COND_EXC_LT_UN, "cond_exc_lt_un")
-
-MINI_OP(OP_COND_EXC_OV, "cond_exc_ov")
-MINI_OP(OP_COND_EXC_NO, "cond_exc_no")
-MINI_OP(OP_COND_EXC_C, "cond_exc_c")
-MINI_OP(OP_COND_EXC_NC, "cond_exc_nc")
-
-MINI_OP(OP_COND_EXC_IEQ, "cond_exc_ieq")
-MINI_OP(OP_COND_EXC_IGE, "cond_exc_ige")
-MINI_OP(OP_COND_EXC_IGT, "cond_exc_igt")
-MINI_OP(OP_COND_EXC_ILE, "cond_exc_ile")
-MINI_OP(OP_COND_EXC_ILT, "cond_exc_ilt")
-MINI_OP(OP_COND_EXC_INE_UN, "cond_exc_ine_un")
-MINI_OP(OP_COND_EXC_IGE_UN, "cond_exc_ige_un")
-MINI_OP(OP_COND_EXC_IGT_UN, "cond_exc_igt_un")
-MINI_OP(OP_COND_EXC_ILE_UN, "cond_exc_ile_un")
-MINI_OP(OP_COND_EXC_ILT_UN, "cond_exc_ilt_un")
-
-MINI_OP(OP_COND_EXC_IOV, "cond_exc_iov")
-MINI_OP(OP_COND_EXC_INO, "cond_exc_ino")
-MINI_OP(OP_COND_EXC_IC, "cond_exc_ic")
-MINI_OP(OP_COND_EXC_INC, "cond_exc_inc")
+MINI_OP(OP_COND_EXC_EQ, "cond_exc_eq", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_GE, "cond_exc_ge", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_GT, "cond_exc_gt", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_LE, "cond_exc_le", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_LT, "cond_exc_lt", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_NE_UN, "cond_exc_ne_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_GE_UN, "cond_exc_ge_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_GT_UN, "cond_exc_gt_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_LE_UN, "cond_exc_le_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_LT_UN, "cond_exc_lt_un", NONE, NONE, NONE)
+
+MINI_OP(OP_COND_EXC_OV, "cond_exc_ov", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_NO, "cond_exc_no", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_C, "cond_exc_c", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_NC, "cond_exc_nc", NONE, NONE, NONE)
+
+MINI_OP(OP_COND_EXC_IEQ, "cond_exc_ieq", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_IGE, "cond_exc_ige", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_IGT, "cond_exc_igt", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_ILE, "cond_exc_ile", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_ILT, "cond_exc_ilt", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_INE_UN, "cond_exc_ine_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_IGE_UN, "cond_exc_ige_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_IGT_UN, "cond_exc_igt_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_ILE_UN, "cond_exc_ile_un", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_ILT_UN, "cond_exc_ilt_un", NONE, NONE, NONE)
+
+MINI_OP(OP_COND_EXC_IOV, "cond_exc_iov", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_INO, "cond_exc_ino", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_IC, "cond_exc_ic", NONE, NONE, NONE)
+MINI_OP(OP_COND_EXC_INC, "cond_exc_inc", NONE, NONE, NONE)
/* 64 bit opcodes: must be in the same order as the matching CEE_ opcodes: binops_op_map */
-MINI_OP(OP_LADD, "long_add")
-MINI_OP(OP_LSUB, "long_sub")
-MINI_OP(OP_LMUL, "long_mul")
-MINI_OP(OP_LDIV, "long_div")
-MINI_OP(OP_LDIV_UN, "long_div_un")
-MINI_OP(OP_LREM, "long_rem")
-MINI_OP(OP_LREM_UN, "long_rem_un")
-MINI_OP(OP_LAND, "long_and")
-MINI_OP(OP_LOR, "long_or")
-MINI_OP(OP_LXOR, "long_xor")
-MINI_OP(OP_LSHL, "long_shl")
-MINI_OP(OP_LSHR, "long_shr")
-MINI_OP(OP_LSHR_UN, "long_shr_un")
+MINI_OP(OP_LADD, "long_add", LREG, LREG, LREG)
+MINI_OP(OP_LSUB, "long_sub", LREG, LREG, LREG)
+MINI_OP(OP_LMUL, "long_mul", LREG, LREG, LREG)
+MINI_OP(OP_LDIV, "long_div", LREG, LREG, LREG)
+MINI_OP(OP_LDIV_UN, "long_div_un", LREG, LREG, LREG)
+MINI_OP(OP_LREM, "long_rem", LREG, LREG, LREG)
+MINI_OP(OP_LREM_UN, "long_rem_un", LREG, LREG, LREG)
+MINI_OP(OP_LAND, "long_and", LREG, LREG, LREG)
+MINI_OP(OP_LOR, "long_or", LREG, LREG, LREG)
+MINI_OP(OP_LXOR, "long_xor", LREG, LREG, LREG)
+MINI_OP(OP_LSHL, "long_shl", LREG, LREG, IREG)
+MINI_OP(OP_LSHR, "long_shr", LREG, LREG, IREG)
+MINI_OP(OP_LSHR_UN, "long_shr_un", LREG, LREG, IREG)
/* 64 bit opcodes: must be in the same order as the matching CEE_ opcodes: unops_op_map */
-MINI_OP(OP_LNEG, "long_neg")
-MINI_OP(OP_LNOT, "long_not")
-MINI_OP(OP_LCONV_TO_I1,"long_conv_to_i1")
-MINI_OP(OP_LCONV_TO_I2,"long_conv_to_i2")
-MINI_OP(OP_LCONV_TO_I4,"long_conv_to_i4")
-MINI_OP(OP_LCONV_TO_I8,"long_conv_to_i8")
-MINI_OP(OP_LCONV_TO_R4,"long_conv_to_r4")
-MINI_OP(OP_LCONV_TO_R8,"long_conv_to_r8")
-MINI_OP(OP_LCONV_TO_U4,"long_conv_to_u4")
-MINI_OP(OP_LCONV_TO_U8,"long_conv_to_u8")
-
-MINI_OP(OP_LCONV_TO_U2, "long_conv_to_u2")
-MINI_OP(OP_LCONV_TO_U1, "long_conv_to_u1")
-MINI_OP(OP_LCONV_TO_I, "long_conv_to_i")
-MINI_OP(OP_LCONV_TO_OVF_I,"long_conv_to_ovf_i")
-MINI_OP(OP_LCONV_TO_OVF_U,"long_conv_to_ovf_u")
-MINI_OP(OP_LADD_OVF, "long_add_ovf")
-MINI_OP(OP_LADD_OVF_UN, "long_add_ovf_un")
-MINI_OP(OP_LMUL_OVF, "long_mul_ovf")
-MINI_OP(OP_LMUL_OVF_UN, "long_mul_ovf_un")
-MINI_OP(OP_LSUB_OVF, "long_sub_ovf")
-MINI_OP(OP_LSUB_OVF_UN, "long_sub_ovf_un")
-
-MINI_OP(OP_LCONV_TO_OVF_I1_UN,"long_conv_to_ovf_i1_un")
-MINI_OP(OP_LCONV_TO_OVF_I2_UN,"long_conv_to_ovf_i2_un")
-MINI_OP(OP_LCONV_TO_OVF_I4_UN,"long_conv_to_ovf_i4_un")
-MINI_OP(OP_LCONV_TO_OVF_I8_UN,"long_conv_to_ovf_i8_un")
-MINI_OP(OP_LCONV_TO_OVF_U1_UN,"long_conv_to_ovf_u1_un")
-MINI_OP(OP_LCONV_TO_OVF_U2_UN,"long_conv_to_ovf_u2_un")
-MINI_OP(OP_LCONV_TO_OVF_U4_UN,"long_conv_to_ovf_u4_un")
-MINI_OP(OP_LCONV_TO_OVF_U8_UN,"long_conv_to_ovf_u8_un")
-MINI_OP(OP_LCONV_TO_OVF_I_UN, "long_conv_to_ovf_i_un")
-MINI_OP(OP_LCONV_TO_OVF_U_UN, "long_conv_to_ovf_u_un")
-
-MINI_OP(OP_LCONV_TO_OVF_I1,"long_conv_to_ovf_i1")
-MINI_OP(OP_LCONV_TO_OVF_U1,"long_conv_to_ovf_u1")
-MINI_OP(OP_LCONV_TO_OVF_I2,"long_conv_to_ovf_i2")
-MINI_OP(OP_LCONV_TO_OVF_U2,"long_conv_to_ovf_u2")
-MINI_OP(OP_LCONV_TO_OVF_I4,"long_conv_to_ovf_i4")
-MINI_OP(OP_LCONV_TO_OVF_U4,"long_conv_to_ovf_u4")
-MINI_OP(OP_LCONV_TO_OVF_I8,"long_conv_to_ovf_i8")
-MINI_OP(OP_LCONV_TO_OVF_U8,"long_conv_to_ovf_u8")
-
-MINI_OP(OP_LAND_IMM, "long_and_imm")
-MINI_OP(OP_LOR_IMM, "long_or_imm")
-MINI_OP(OP_LXOR_IMM, "long_xor_imm")
-
-MINI_OP(OP_LCEQ, "long_ceq")
-MINI_OP(OP_LCGT, "long_cgt")
-MINI_OP(OP_LCGT_UN,"long_cgt_un")
-MINI_OP(OP_LCLT, "long_clt")
-MINI_OP(OP_LCLT_UN,"long_clt_un")
-
-MINI_OP(OP_LCONV_TO_R_UN,"long_conv_to_r_un")
-MINI_OP(OP_LCONV_TO_U, "long_conv_to_u")
-MINI_OP(OP_LSHR_IMM, "long_shr_imm")
-MINI_OP(OP_LSHR_UN_IMM, "long_shr_un_imm")
-MINI_OP(OP_LSHL_IMM, "long_shl_imm")
-MINI_OP(OP_LADD_IMM, "long_add_imm")
-MINI_OP(OP_LSUB_IMM, "long_sub_imm")
-MINI_OP(OP_LMUL_IMM, "long_mul_imm")
-
-MINI_OP(OP_LBEQ, "long_beq")
-MINI_OP(OP_LBGE, "long_bge")
-MINI_OP(OP_LBGT, "long_bgt")
-MINI_OP(OP_LBLE, "long_ble")
-MINI_OP(OP_LBLT, "long_blt")
-MINI_OP(OP_LBNE_UN, "long_bne_un")
-MINI_OP(OP_LBGE_UN, "long_bge_un")
-MINI_OP(OP_LBGT_UN, "long_bgt_un")
-MINI_OP(OP_LBLE_UN, "long_ble_un")
-MINI_OP(OP_LBLT_UN, "long_blt_un")
-
-MINI_OP(OP_LONG_SHRUN_32, "long_shr_un_32")
+MINI_OP(OP_LNEG, "long_neg", LREG, LREG, NONE)
+MINI_OP(OP_LNOT, "long_not", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_I1,"long_conv_to_i1", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_I2,"long_conv_to_i2", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_I4,"long_conv_to_i4", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_I8,"long_conv_to_i8", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_R4,"long_conv_to_r4", FREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_R8,"long_conv_to_r8", FREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_U4,"long_conv_to_u4", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_U8,"long_conv_to_u8", LREG, LREG, NONE)
+
+MINI_OP(OP_LCONV_TO_U2, "long_conv_to_u2", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_U1, "long_conv_to_u1", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_I, "long_conv_to_i", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I,"long_conv_to_ovf_i", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U,"long_conv_to_ovf_u", LREG, LREG, NONE)
+
+MINI_OP(OP_LADD_OVF, "long_add_ovf", LREG, LREG, LREG)
+MINI_OP(OP_LADD_OVF_UN, "long_add_ovf_un", LREG, LREG, LREG)
+MINI_OP(OP_LMUL_OVF, "long_mul_ovf", LREG, LREG, LREG)
+MINI_OP(OP_LMUL_OVF_UN, "long_mul_ovf_un", LREG, LREG, LREG)
+MINI_OP(OP_LSUB_OVF, "long_sub_ovf", LREG, LREG, LREG)
+MINI_OP(OP_LSUB_OVF_UN, "long_sub_ovf_un", LREG, LREG, LREG)
+
+MINI_OP(OP_LCONV_TO_OVF_I1_UN,"long_conv_to_ovf_i1_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I2_UN,"long_conv_to_ovf_i2_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I4_UN,"long_conv_to_ovf_i4_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I8_UN,"long_conv_to_ovf_i8_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U1_UN,"long_conv_to_ovf_u1_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U2_UN,"long_conv_to_ovf_u2_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U4_UN,"long_conv_to_ovf_u4_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U8_UN,"long_conv_to_ovf_u8_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I_UN, "long_conv_to_ovf_i_un", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U_UN, "long_conv_to_ovf_u_un", LREG, LREG, NONE)
+
+MINI_OP(OP_LCONV_TO_OVF_I1,"long_conv_to_ovf_i1", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U1,"long_conv_to_ovf_u1", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I2,"long_conv_to_ovf_i2", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U2,"long_conv_to_ovf_u2", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I4,"long_conv_to_ovf_i4", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U4,"long_conv_to_ovf_u4", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_I8,"long_conv_to_ovf_i8", LREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_OVF_U8,"long_conv_to_ovf_u8", LREG, LREG, NONE)
+
+/* mono_decompose_long_opts () depends on the order here */
+MINI_OP(OP_LCEQ, "long_ceq", LREG, NONE, NONE)
+MINI_OP(OP_LCGT, "long_cgt", LREG, NONE, NONE)
+MINI_OP(OP_LCGT_UN,"long_cgt_un", LREG, NONE, NONE)
+MINI_OP(OP_LCLT, "long_clt", LREG, NONE, NONE)
+MINI_OP(OP_LCLT_UN,"long_clt_un", LREG, NONE, NONE)
+
+MINI_OP(OP_LCONV_TO_R_UN,"long_conv_to_r_un", FREG, LREG, NONE)
+MINI_OP(OP_LCONV_TO_U, "long_conv_to_u", IREG, LREG, NONE)
+
+MINI_OP(OP_LADD_IMM, "long_add_imm", LREG, LREG, NONE)
+MINI_OP(OP_LSUB_IMM, "long_sub_imm", LREG, LREG, NONE)
+MINI_OP(OP_LMUL_IMM, "long_mul_imm", LREG, LREG, NONE)
+MINI_OP(OP_LAND_IMM, "long_and_imm", LREG, LREG, NONE)
+MINI_OP(OP_LOR_IMM, "long_or_imm", LREG, LREG, NONE)
+MINI_OP(OP_LXOR_IMM, "long_xor_imm", LREG, LREG, NONE)
+MINI_OP(OP_LSHL_IMM, "long_shl_imm", LREG, LREG, NONE)
+MINI_OP(OP_LSHR_IMM, "long_shr_imm", LREG, LREG, NONE)
+MINI_OP(OP_LSHR_UN_IMM, "long_shr_un_imm", LREG, LREG, NONE)
+MINI_OP(OP_LDIV_IMM, "long_div_imm", LREG, LREG, NONE)
+MINI_OP(OP_LDIV_UN_IMM, "long_div_un_imm", LREG, LREG, NONE)
+MINI_OP(OP_LREM_IMM, "long_rem_imm", LREG, LREG, NONE)
+MINI_OP(OP_LREM_UN_IMM, "long_rem_un_imm", LREG, LREG, NONE)
+
+/* mono_decompose_long_opts () depends on the order here */
+MINI_OP(OP_LBEQ, "long_beq", NONE, NONE, NONE)
+MINI_OP(OP_LBGE, "long_bge", NONE, NONE, NONE)
+MINI_OP(OP_LBGT, "long_bgt", NONE, NONE, NONE)
+MINI_OP(OP_LBLE, "long_ble", NONE, NONE, NONE)
+MINI_OP(OP_LBLT, "long_blt", NONE, NONE, NONE)
+MINI_OP(OP_LBNE_UN, "long_bne_un", NONE, NONE, NONE)
+MINI_OP(OP_LBGE_UN, "long_bge_un", NONE, NONE, NONE)
+MINI_OP(OP_LBGT_UN, "long_bgt_un", NONE, NONE, NONE)
+MINI_OP(OP_LBLE_UN, "long_ble_un", NONE, NONE, NONE)
+MINI_OP(OP_LBLT_UN, "long_blt_un", NONE, NONE, NONE)
+
+MINI_OP(OP_LSHR_UN_32, "long_shr_un_32", NONE, NONE, NONE)
+
+/* Variants of the original opcodes which take the two parts of the long as two arguments */
+MINI_OP(OP_LCONV_TO_R8_2,"long_conv_to_r8_2", FREG, IREG, IREG)
+MINI_OP(OP_LCONV_TO_R4_2,"long_conv_to_r4_2", FREG, IREG, IREG)
+MINI_OP(OP_LCONV_TO_R_UN_2,"long_conv_to_r_un_2", FREG, IREG, IREG)
+MINI_OP(OP_LCONV_TO_OVF_I4_2,"long_conv_to_ovf_i4_2", IREG, IREG, IREG)
/* 32 bit opcodes: must be in the same order as the matching CEE_ opcodes: binops_op_map */
-MINI_OP(OP_IADD, "int_add")
-MINI_OP(OP_ISUB, "int_sub")
-MINI_OP(OP_IMUL, "int_mul")
-MINI_OP(OP_IDIV, "int_div")
-MINI_OP(OP_IDIV_UN, "int_div_un")
-MINI_OP(OP_IREM, "int_rem")
-MINI_OP(OP_IREM_UN, "int_rem_un")
-MINI_OP(OP_IAND, "int_and")
-MINI_OP(OP_IOR, "int_or")
-MINI_OP(OP_IXOR, "int_xor")
-MINI_OP(OP_ISHL, "int_shl")
-MINI_OP(OP_ISHR, "int_shr")
-MINI_OP(OP_ISHR_UN, "int_shr_un")
+MINI_OP(OP_IADD, "int_add", IREG, IREG, IREG)
+MINI_OP(OP_ISUB, "int_sub", IREG, IREG, IREG)
+MINI_OP(OP_IMUL, "int_mul", IREG, IREG, IREG)
+MINI_OP(OP_IDIV, "int_div", IREG, IREG, IREG)
+MINI_OP(OP_IDIV_UN, "int_div_un", IREG, IREG, IREG)
+MINI_OP(OP_IREM, "int_rem", IREG, IREG, IREG)
+MINI_OP(OP_IREM_UN, "int_rem_un", IREG, IREG, IREG)
+MINI_OP(OP_IAND, "int_and", IREG, IREG, IREG)
+MINI_OP(OP_IOR, "int_or", IREG, IREG, IREG)
+MINI_OP(OP_IXOR, "int_xor", IREG, IREG, IREG)
+MINI_OP(OP_ISHL, "int_shl", IREG, IREG, IREG)
+MINI_OP(OP_ISHR, "int_shr", IREG, IREG, IREG)
+MINI_OP(OP_ISHR_UN, "int_shr_un", IREG, IREG, IREG)
/* 32 bit opcodes: must be in the same order as the matching CEE_ opcodes: unops_op_map */
-MINI_OP(OP_INEG, "int_neg")
-MINI_OP(OP_INOT, "int_not")
-MINI_OP(OP_ICONV_TO_I1,"int_conv_to_i1")
-MINI_OP(OP_ICONV_TO_I2,"int_conv_to_i2")
-MINI_OP(OP_ICONV_TO_I4,"int_conv_to_i4")
-MINI_OP(OP_ICONV_TO_I8,"int_conv_to_i8")
-MINI_OP(OP_ICONV_TO_R4,"int_conv_to_r4")
-MINI_OP(OP_ICONV_TO_R8,"int_conv_to_r8")
-MINI_OP(OP_ICONV_TO_U4,"int_conv_to_u4")
-MINI_OP(OP_ICONV_TO_U8,"int_conv_to_u8")
+MINI_OP(OP_INEG, "int_neg", IREG, IREG, NONE)
+MINI_OP(OP_INOT, "int_not", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_I1,"int_conv_to_i1", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_I2,"int_conv_to_i2", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_I4,"int_conv_to_i4", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_I8,"int_conv_to_i8", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_R4,"int_conv_to_r4", FREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_R8,"int_conv_to_r8", FREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_U4,"int_conv_to_u4", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_U8,"int_conv_to_u8", LREG, IREG, NONE)
+
+MINI_OP(OP_ICONV_TO_R_UN, "int_conv_to_r_un", FREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_U, "int_conv_to_u", IREG, IREG, NONE)
/* 32 bit opcodes: must be in the same order as the matching CEE_ opcodes: ovfops_op_map */
-MINI_OP(OP_ICONV_TO_U2, "int_conv_to_u2")
-MINI_OP(OP_ICONV_TO_U1, "int_conv_to_u1")
-MINI_OP(OP_ICONV_TO_I, "int_conv_to_i")
-MINI_OP(OP_ICONV_TO_OVF_I,"int_conv_to_ovf_i")
-MINI_OP(OP_ICONV_TO_OVF_U,"int_conv_to_ovf_u")
-MINI_OP(OP_IADD_OVF, "int_add_ovf")
-MINI_OP(OP_IADD_OVF_UN, "int_add_ovf_un")
-MINI_OP(OP_IMUL_OVF, "int_mul_ovf")
-MINI_OP(OP_IMUL_OVF_UN, "int_mul_ovf_un")
-MINI_OP(OP_ISUB_OVF, "int_sub_ovf")
-MINI_OP(OP_ISUB_OVF_UN, "int_sub_ovf_un")
+MINI_OP(OP_ICONV_TO_U2, "int_conv_to_u2", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_U1, "int_conv_to_u1", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_I, "int_conv_to_i", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I,"int_conv_to_ovf_i", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U,"int_conv_to_ovf_u", IREG, IREG, NONE)
+MINI_OP(OP_IADD_OVF, "int_add_ovf", IREG, IREG, IREG)
+MINI_OP(OP_IADD_OVF_UN, "int_add_ovf_un", IREG, IREG, IREG)
+MINI_OP(OP_IMUL_OVF, "int_mul_ovf", IREG, IREG, IREG)
+MINI_OP(OP_IMUL_OVF_UN, "int_mul_ovf_un", IREG, IREG, IREG)
+MINI_OP(OP_ISUB_OVF, "int_sub_ovf", IREG, IREG, IREG)
+MINI_OP(OP_ISUB_OVF_UN, "int_sub_ovf_un", IREG, IREG, IREG)
/* 32 bit opcodes: must be in the same order as the matching CEE_ opcodes: ovf2ops_op_map */
-MINI_OP(OP_ICONV_TO_OVF_I1_UN,"int_conv_to_ovf_i1_un")
-MINI_OP(OP_ICONV_TO_OVF_I2_UN,"int_conv_to_ovf_i2_un")
-MINI_OP(OP_ICONV_TO_OVF_I4_UN,"int_conv_to_ovf_i4_un")
-MINI_OP(OP_ICONV_TO_OVF_I8_UN,"int_conv_to_ovf_i8_un")
-MINI_OP(OP_ICONV_TO_OVF_U1_UN,"int_conv_to_ovf_u1_un")
-MINI_OP(OP_ICONV_TO_OVF_U2_UN,"int_conv_to_ovf_u2_un")
-MINI_OP(OP_ICONV_TO_OVF_U4_UN,"int_conv_to_ovf_u4_un")
-MINI_OP(OP_ICONV_TO_OVF_U8_UN,"int_conv_to_ovf_u8_un")
-MINI_OP(OP_ICONV_TO_OVF_I_UN, "int_conv_to_ovf_i_un")
-MINI_OP(OP_ICONV_TO_OVF_U_UN, "int_conv_to_ovf_u_un")
+MINI_OP(OP_ICONV_TO_OVF_I1_UN,"int_conv_to_ovf_i1_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I2_UN,"int_conv_to_ovf_i2_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I4_UN,"int_conv_to_ovf_i4_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I8_UN,"int_conv_to_ovf_i8_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U1_UN,"int_conv_to_ovf_u1_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U2_UN,"int_conv_to_ovf_u2_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U4_UN,"int_conv_to_ovf_u4_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U8_UN,"int_conv_to_ovf_u8_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I_UN, "int_conv_to_ovf_i_un", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U_UN, "int_conv_to_ovf_u_un", IREG, IREG, NONE)
/* 32 bit opcodes: must be in the same order as the matching CEE_ opcodes: ovf3ops_op_map */
-MINI_OP(OP_ICONV_TO_OVF_I1,"int_conv_to_ovf_i1")
-MINI_OP(OP_ICONV_TO_OVF_U1,"int_conv_to_ovf_u1")
-MINI_OP(OP_ICONV_TO_OVF_I2,"int_conv_to_ovf_i2")
-MINI_OP(OP_ICONV_TO_OVF_U2,"int_conv_to_ovf_u2")
-MINI_OP(OP_ICONV_TO_OVF_I4,"int_conv_to_ovf_i4")
-MINI_OP(OP_ICONV_TO_OVF_U4,"int_conv_to_ovf_u4")
-MINI_OP(OP_ICONV_TO_OVF_I8,"int_conv_to_ovf_i8")
-MINI_OP(OP_ICONV_TO_OVF_U8,"int_conv_to_ovf_u8")
-
-MINI_OP(OP_IADC, "int_adc")
-MINI_OP(OP_IADC_IMM, "int_adc_imm")
-MINI_OP(OP_ISBB, "int_sbb")
-MINI_OP(OP_ISBB_IMM, "int_sbb_imm")
-MINI_OP(OP_IADDCC, "int_addcc")
-MINI_OP(OP_ISUBCC, "int_subcc")
-
-MINI_OP(OP_IADD_IMM, "int_add_imm")
-MINI_OP(OP_ISUB_IMM, "int_sub_imm")
-MINI_OP(OP_IMUL_IMM, "int_mul_imm")
-MINI_OP(OP_IDIV_IMM, "int_div_imm")
-MINI_OP(OP_IDIV_UN_IMM, "int_div_un_imm")
-MINI_OP(OP_IREM_IMM, "int_rem_imm")
-MINI_OP(OP_IREM_UN_IMM, "int_rem_un_imm")
-MINI_OP(OP_IAND_IMM, "int_and_imm")
-MINI_OP(OP_IOR_IMM, "int_or_imm")
-MINI_OP(OP_IXOR_IMM, "int_xor_imm")
-MINI_OP(OP_ISHL_IMM, "int_shl_imm")
-MINI_OP(OP_ISHR_IMM, "int_shr_imm")
-MINI_OP(OP_ISHR_UN_IMM, "int_shr_un_imm")
-MINI_OP(OP_ICONV_TO_R_UN,"int_conv_to_r_un")
-
-MINI_OP(OP_ICEQ, "int_ceq")
-MINI_OP(OP_ICGT, "int_cgt")
-MINI_OP(OP_ICGT_UN,"int_cgt_un")
-MINI_OP(OP_ICLT, "int_clt")
-MINI_OP(OP_ICLT_UN,"int_clt_un")
-
-MINI_OP(OP_IBEQ, "int_beq")
-MINI_OP(OP_IBGE, "int_bge")
-MINI_OP(OP_IBGT, "int_bgt")
-MINI_OP(OP_IBLE, "int_ble")
-MINI_OP(OP_IBLT, "int_blt")
-MINI_OP(OP_IBNE_UN, "int_bne_un")
-MINI_OP(OP_IBGE_UN, "int_bge_un")
-MINI_OP(OP_IBGT_UN, "int_bgt_un")
-MINI_OP(OP_IBLE_UN, "int_ble_un")
-MINI_OP(OP_IBLT_UN, "int_blt_un")
-
-MINI_OP(OP_FBEQ, "float_beq")
-MINI_OP(OP_FBGE, "float_bge")
-MINI_OP(OP_FBGT, "float_bgt")
-MINI_OP(OP_FBLE, "float_ble")
-MINI_OP(OP_FBLT, "float_blt")
-MINI_OP(OP_FBNE_UN,"float_bne_un")
-MINI_OP(OP_FBGE_UN,"float_bge_un")
-MINI_OP(OP_FBGT_UN,"float_bgt_un")
-MINI_OP(OP_FBLE_UN,"float_ble_un")
-MINI_OP(OP_FBLT_UN,"float_blt_un")
+MINI_OP(OP_ICONV_TO_OVF_I1,"int_conv_to_ovf_i1", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U1,"int_conv_to_ovf_u1", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I2,"int_conv_to_ovf_i2", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U2,"int_conv_to_ovf_u2", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I4,"int_conv_to_ovf_i4", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U4,"int_conv_to_ovf_u4", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_I8,"int_conv_to_ovf_i8", IREG, IREG, NONE)
+MINI_OP(OP_ICONV_TO_OVF_U8,"int_conv_to_ovf_u8", IREG, IREG, NONE)
+
+MINI_OP(OP_IADC, "int_adc", IREG, IREG, IREG)
+MINI_OP(OP_IADC_IMM, "int_adc_imm", IREG, IREG, NONE)
+MINI_OP(OP_ISBB, "int_sbb", IREG, IREG, IREG)
+MINI_OP(OP_ISBB_IMM, "int_sbb_imm", IREG, IREG, NONE)
+MINI_OP(OP_IADDCC, "int_addcc", IREG, IREG, IREG)
+MINI_OP(OP_ISUBCC, "int_subcc", IREG, IREG, IREG)
+
+MINI_OP(OP_IADD_IMM, "int_add_imm", IREG, IREG, NONE)
+MINI_OP(OP_ISUB_IMM, "int_sub_imm", IREG, IREG, NONE)
+MINI_OP(OP_IMUL_IMM, "int_mul_imm", IREG, IREG, NONE)
+MINI_OP(OP_IDIV_IMM, "int_div_imm", IREG, IREG, NONE)
+MINI_OP(OP_IDIV_UN_IMM, "int_div_un_imm", IREG, IREG, NONE)
+MINI_OP(OP_IREM_IMM, "int_rem_imm", IREG, IREG, NONE)
+MINI_OP(OP_IREM_UN_IMM, "int_rem_un_imm", IREG, IREG, NONE)
+MINI_OP(OP_IAND_IMM, "int_and_imm", IREG, IREG, NONE)
+MINI_OP(OP_IOR_IMM, "int_or_imm", IREG, IREG, NONE)
+MINI_OP(OP_IXOR_IMM, "int_xor_imm", IREG, IREG, NONE)
+MINI_OP(OP_ISHL_IMM, "int_shl_imm", IREG, IREG, NONE)
+MINI_OP(OP_ISHR_IMM, "int_shr_imm", IREG, IREG, NONE)
+MINI_OP(OP_ISHR_UN_IMM, "int_shr_un_imm", IREG, IREG, NONE)
+
+MINI_OP(OP_ICEQ, "int_ceq", IREG, NONE, NONE)
+MINI_OP(OP_ICGT, "int_cgt", IREG, NONE, NONE)
+MINI_OP(OP_ICGT_UN,"int_cgt_un", IREG, NONE, NONE)
+MINI_OP(OP_ICLT, "int_clt", IREG, NONE, NONE)
+MINI_OP(OP_ICLT_UN,"int_clt_un", IREG, NONE, NONE)
+
+MINI_OP(OP_IBEQ, "int_beq", NONE, NONE, NONE)
+MINI_OP(OP_IBGE, "int_bge", NONE, NONE, NONE)
+MINI_OP(OP_IBGT, "int_bgt", NONE, NONE, NONE)
+MINI_OP(OP_IBLE, "int_ble", NONE, NONE, NONE)
+MINI_OP(OP_IBLT, "int_blt", NONE, NONE, NONE)
+MINI_OP(OP_IBNE_UN, "int_bne_un", NONE, NONE, NONE)
+MINI_OP(OP_IBGE_UN, "int_bge_un", NONE, NONE, NONE)
+MINI_OP(OP_IBGT_UN, "int_bgt_un", NONE, NONE, NONE)
+MINI_OP(OP_IBLE_UN, "int_ble_un", NONE, NONE, NONE)
+MINI_OP(OP_IBLT_UN, "int_blt_un", NONE, NONE, NONE)
+
+MINI_OP(OP_FBEQ, "float_beq", NONE, NONE, NONE)
+MINI_OP(OP_FBGE, "float_bge", NONE, NONE, NONE)
+MINI_OP(OP_FBGT, "float_bgt", NONE, NONE, NONE)
+MINI_OP(OP_FBLE, "float_ble", NONE, NONE, NONE)
+MINI_OP(OP_FBLT, "float_blt", NONE, NONE, NONE)
+MINI_OP(OP_FBNE_UN, "float_bne_un", NONE, NONE, NONE)
+MINI_OP(OP_FBGE_UN, "float_bge_un", NONE, NONE, NONE)
+MINI_OP(OP_FBGT_UN, "float_bgt_un", NONE, NONE, NONE)
+MINI_OP(OP_FBLE_UN, "float_ble_un", NONE, NONE, NONE)
+MINI_OP(OP_FBLT_UN, "float_blt_un", NONE, NONE, NONE)
/* float opcodes: must be in the same order as the matching CEE_ opcodes: binops_op_map */
-MINI_OP(OP_FADD, "float_add")
-MINI_OP(OP_FSUB, "float_sub")
-MINI_OP(OP_FMUL, "float_mul")
-MINI_OP(OP_FDIV, "float_div")
-MINI_OP(OP_FDIV_UN,"float_div_un")
-MINI_OP(OP_FREM, "float_rem")
-MINI_OP(OP_FREM_UN,"float_rem_un")
+MINI_OP(OP_FADD, "float_add", FREG, FREG, FREG)
+MINI_OP(OP_FSUB, "float_sub", FREG, FREG, FREG)
+MINI_OP(OP_FMUL, "float_mul", FREG, FREG, FREG)
+MINI_OP(OP_FDIV, "float_div", FREG, FREG, FREG)
+MINI_OP(OP_FDIV_UN,"float_div_un", FREG, FREG, FREG)
+MINI_OP(OP_FREM, "float_rem", FREG, FREG, FREG)
+MINI_OP(OP_FREM_UN,"float_rem_un", FREG, FREG, FREG)
/* float opcodes: must be in the same order as the matching CEE_ opcodes: unops_op_map */
-MINI_OP(OP_FNEG, "float_neg")
-MINI_OP(OP_FNOT, "float_not")
-MINI_OP(OP_FCONV_TO_I1,"float_conv_to_i1")
-MINI_OP(OP_FCONV_TO_I2,"float_conv_to_i2")
-MINI_OP(OP_FCONV_TO_I4,"float_conv_to_i4")
-MINI_OP(OP_FCONV_TO_I8,"float_conv_to_i8")
-MINI_OP(OP_FCONV_TO_R4,"float_conv_to_r4")
-MINI_OP(OP_FCONV_TO_R8,"float_conv_to_r8")
-MINI_OP(OP_FCONV_TO_U4,"float_conv_to_u4")
-MINI_OP(OP_FCONV_TO_U8,"float_conv_to_u8")
-
-MINI_OP(OP_FCONV_TO_U2, "float_conv_to_u2")
-MINI_OP(OP_FCONV_TO_U1, "float_conv_to_u1")
-MINI_OP(OP_FCONV_TO_I, "float_conv_to_i")
-MINI_OP(OP_FCONV_TO_OVF_I,"float_conv_to_ovf_i")
-MINI_OP(OP_FCONV_TO_OVF_U,"float_conv_to_ovd_u")
-MINI_OP(OP_FADD_OVF, "float_add_ovf")
-MINI_OP(OP_FADD_OVF_UN, "float_add_ovf_un")
-MINI_OP(OP_FMUL_OVF, "float_mul_ovf")
-MINI_OP(OP_FMUL_OVF_UN, "float_mul_ovf_un")
-MINI_OP(OP_FSUB_OVF, "float_sub_ovf")
-MINI_OP(OP_FSUB_OVF_UN, "float_sub_ovf_un")
-
-MINI_OP(OP_FCONV_TO_OVF_I1_UN,"float_conv_to_ovf_i1_un")
-MINI_OP(OP_FCONV_TO_OVF_I2_UN,"float_conv_to_ovf_i2_un")
-MINI_OP(OP_FCONV_TO_OVF_I4_UN,"float_conv_to_ovf_i4_un")
-MINI_OP(OP_FCONV_TO_OVF_I8_UN,"float_conv_to_ovf_i8_un")
-MINI_OP(OP_FCONV_TO_OVF_U1_UN,"float_conv_to_ovf_u1_un")
-MINI_OP(OP_FCONV_TO_OVF_U2_UN,"float_conv_to_ovf_u2_un")
-MINI_OP(OP_FCONV_TO_OVF_U4_UN,"float_conv_to_ovf_u4_un")
-MINI_OP(OP_FCONV_TO_OVF_U8_UN,"float_conv_to_ovf_u8_un")
-MINI_OP(OP_FCONV_TO_OVF_I_UN, "float_conv_to_ovf_i_un")
-MINI_OP(OP_FCONV_TO_OVF_U_UN, "float_conv_to_ovf_u_un")
-
-MINI_OP(OP_FCONV_TO_OVF_I1,"float_conv_to_ovf_i1")
-MINI_OP(OP_FCONV_TO_OVF_U1,"float_conv_to_ovf_u1")
-MINI_OP(OP_FCONV_TO_OVF_I2,"float_conv_to_ovf_i2")
-MINI_OP(OP_FCONV_TO_OVF_U2,"float_conv_to_ovf_u2")
-MINI_OP(OP_FCONV_TO_OVF_I4,"float_conv_to_ovf_i4")
-MINI_OP(OP_FCONV_TO_OVF_U4,"float_conv_to_ovf_u4")
-MINI_OP(OP_FCONV_TO_OVF_I8,"float_conv_to_ovf_i8")
-MINI_OP(OP_FCONV_TO_OVF_U8,"float_conv_to_ovf_u8")
-
-MINI_OP(OP_FCEQ, "float_ceq")
-MINI_OP(OP_FCGT, "float_cgt")
-MINI_OP(OP_FCGT_UN,"float_cgt_un")
-MINI_OP(OP_FCLT, "float_clt")
-MINI_OP(OP_FCLT_UN,"float_clt_un")
-
-MINI_OP(OP_FCEQ_MEMBASE, "float_ceq_membase")
-MINI_OP(OP_FCGT_MEMBASE, "float_cgt_membase")
-MINI_OP(OP_FCGT_UN_MEMBASE,"float_cgt_un_membase")
-MINI_OP(OP_FCLT_MEMBASE, "float_clt_membase")
-MINI_OP(OP_FCLT_UN_MEMBASE,"float_clt_un_membase")
-
-MINI_OP(OP_FCONV_TO_U, "float_conv_to_u")
-
-MINI_OP(OP_GROUP, "group")
+MINI_OP(OP_FNEG, "float_neg", FREG, FREG, NONE)
+MINI_OP(OP_FNOT, "float_not", FREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_I1,"float_conv_to_i1", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_I2,"float_conv_to_i2", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_I4,"float_conv_to_i4", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_I8,"float_conv_to_i8", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_R4,"float_conv_to_r4", FREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_R8,"float_conv_to_r8", FREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_U4,"float_conv_to_u4", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_U8,"float_conv_to_u8", IREG, FREG, NONE)
+
+MINI_OP(OP_FCONV_TO_U2, "float_conv_to_u2", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_U1, "float_conv_to_u1", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_I, "float_conv_to_i", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I,"float_conv_to_ovf_i", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U,"float_conv_to_ovd_u", IREG, FREG, NONE)
+
+MINI_OP(OP_FADD_OVF, "float_add_ovf", FREG, FREG, FREG)
+MINI_OP(OP_FADD_OVF_UN, "float_add_ovf_un", FREG, FREG, FREG)
+MINI_OP(OP_FMUL_OVF, "float_mul_ovf", FREG, FREG, FREG)
+MINI_OP(OP_FMUL_OVF_UN, "float_mul_ovf_un", FREG, FREG, FREG)
+MINI_OP(OP_FSUB_OVF, "float_sub_ovf", FREG, FREG, FREG)
+MINI_OP(OP_FSUB_OVF_UN, "float_sub_ovf_un", FREG, FREG, FREG)
+
+MINI_OP(OP_FCONV_TO_OVF_I1_UN,"float_conv_to_ovf_i1_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I2_UN,"float_conv_to_ovf_i2_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I4_UN,"float_conv_to_ovf_i4_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I8_UN,"float_conv_to_ovf_i8_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U1_UN,"float_conv_to_ovf_u1_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U2_UN,"float_conv_to_ovf_u2_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U4_UN,"float_conv_to_ovf_u4_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U8_UN,"float_conv_to_ovf_u8_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I_UN, "float_conv_to_ovf_i_un", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U_UN, "float_conv_to_ovf_u_un", IREG, FREG, NONE)
+
+MINI_OP(OP_FCONV_TO_OVF_I1,"float_conv_to_ovf_i1", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U1,"float_conv_to_ovf_u1", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I2,"float_conv_to_ovf_i2", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U2,"float_conv_to_ovf_u2", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I4,"float_conv_to_ovf_i4", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U4,"float_conv_to_ovf_u4", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_I8,"float_conv_to_ovf_i8", IREG, FREG, NONE)
+MINI_OP(OP_FCONV_TO_OVF_U8,"float_conv_to_ovf_u8", IREG, FREG, NONE)
+
+/* These do the comparison too */
+MINI_OP(OP_FCEQ, "float_ceq", IREG, FREG, FREG)
+MINI_OP(OP_FCGT, "float_cgt", IREG, FREG, FREG)
+MINI_OP(OP_FCGT_UN,"float_cgt_un", IREG, FREG, FREG)
+MINI_OP(OP_FCLT, "float_clt", IREG, FREG, FREG)
+MINI_OP(OP_FCLT_UN,"float_clt_un", IREG, FREG, FREG)
+
+MINI_OP(OP_FCEQ_MEMBASE, "float_ceq_membase", NONE, NONE, NONE)
+MINI_OP(OP_FCGT_MEMBASE, "float_cgt_membase", NONE, NONE, NONE)
+MINI_OP(OP_FCGT_UN_MEMBASE,"float_cgt_un_membase", NONE, NONE, NONE)
+MINI_OP(OP_FCLT_MEMBASE, "float_clt_membase", NONE, NONE, NONE)
+MINI_OP(OP_FCLT_UN_MEMBASE,"float_clt_un_membase", NONE, NONE, NONE)
+
+MINI_OP(OP_FCONV_TO_U, "float_conv_to_u", NONE, NONE, NONE)
+MINI_OP(OP_CKFINITE, "ckfinite", FREG, FREG, NONE)
+
+/* Return the low 32 bits of a double vreg */
+MINI_OP(OP_FGETLOW32, "float_getlow32", IREG, FREG, NONE)
+/* Return the high 32 bits of a double vreg */
+MINI_OP(OP_FGETHIGH32, "float_gethigh32", IREG, FREG, NONE)
+
+MINI_OP(OP_GROUP, "group", NONE, NONE, NONE)
+MINI_OP(OP_JUMP_TABLE, "jump_table", IREG, NONE, NONE)
/* aot compiler */
-MINI_OP(OP_AOTCONST, "aot_const")
-MINI_OP(OP_PATCH_INFO, "patch_info")
-MINI_OP(OP_GOT_ENTRY, "got_entry")
+MINI_OP(OP_AOTCONST, "aot_const", IREG, NONE, NONE)
+MINI_OP(OP_PATCH_INFO, "patch_info", NONE, NONE, NONE)
+MINI_OP(OP_GOT_ENTRY, "got_entry", IREG, IREG, NONE)
/* used to impl unbox */
-MINI_OP(OP_UNBOXCAST , "unboxcast")
-MINI_OP(OP_UNBOXCAST_REG , "unboxcast_reg")
+MINI_OP(OP_UNBOXCAST , "unboxcast", NONE, NONE, NONE)
+MINI_OP(OP_UNBOXCAST_REG , "unboxcast_reg", NONE, NONE, NONE)
/* exception related opcodes */
-MINI_OP(OP_CALL_HANDLER , "call_handler")
-MINI_OP(OP_START_HANDLER , "start_handler")
-MINI_OP(OP_ENDFILTER, "endfilter")
-MINI_OP(OP_ENDFINALLY, "endfinally")
+MINI_OP(OP_CALL_HANDLER , "call_handler", NONE, NONE, NONE)
+MINI_OP(OP_START_HANDLER , "start_handler", NONE, NONE, NONE)
+MINI_OP(OP_ENDFILTER, "endfilter", NONE, IREG, NONE)
+MINI_OP(OP_ENDFINALLY, "endfinally", NONE, NONE, NONE)
/* inline (long)int * (long)int */
-MINI_OP(OP_BIGMUL, "bigmul")
-MINI_OP(OP_BIGMUL_UN, "bigmul_un")
-MINI_OP(OP_MIN, "min")
-MINI_OP(OP_MAX, "max")
-MINI_OP(OP_IMIN, "int_min")
-MINI_OP(OP_IMAX, "int_max")
-MINI_OP(OP_LMIN, "long_min")
-MINI_OP(OP_LMAX, "long_max")
-MINI_OP(OP_IMIN_UN, "int_min_un")
-MINI_OP(OP_IMAX_UN, "int_max_un")
-MINI_OP(OP_LMIN_UN, "long_min_un")
-MINI_OP(OP_LMAX_UN, "long_max_un")
+MINI_OP(OP_BIGMUL, "bigmul", NONE, NONE, NONE)
+MINI_OP(OP_BIGMUL_UN, "bigmul_un", NONE, NONE, NONE)
+MINI_OP(OP_IMIN_UN, "int_min_un", IREG, IREG, IREG)
+MINI_OP(OP_IMAX_UN, "int_max_un", IREG, IREG, IREG)
+MINI_OP(OP_LMIN_UN, "long_min_un", LREG, LREG, LREG)
+MINI_OP(OP_LMAX_UN, "long_max_un", LREG, LREG, LREG)
+
+MINI_OP(OP_MIN, "min", IREG, IREG, IREG)
+MINI_OP(OP_MAX, "max", IREG, IREG, IREG)
+
+MINI_OP(OP_IMIN, "int_min", IREG, IREG, IREG)
+MINI_OP(OP_IMAX, "int_max", IREG, IREG, IREG)
+MINI_OP(OP_LMIN, "long_min", LREG, LREG, LREG)
+MINI_OP(OP_LMAX, "long_max", LREG, LREG, LREG)
/* opcodes most architecture have */
-MINI_OP(OP_ADC, "adc")
-MINI_OP(OP_ADC_IMM, "adc_imm")
-MINI_OP(OP_SBB, "sbb")
-MINI_OP(OP_SBB_IMM, "sbb_imm")
-MINI_OP(OP_ADDCC, "addcc")
-MINI_OP(OP_ADDCC_IMM, "addcc_imm")
-MINI_OP(OP_SUBCC, "subcc")
-MINI_OP(OP_SUBCC_IMM, "subcc_imm")
-MINI_OP(OP_BR_REG, "br_reg")
-MINI_OP(OP_SEXT_I1, "sext_i1")
-MINI_OP(OP_SEXT_I2, "sext_i2")
-MINI_OP(OP_SEXT_I4, "sext_i4")
-MINI_OP(OP_ZEXT_I1, "zext_i1")
-MINI_OP(OP_ZEXT_I2, "zext_i2")
-MINI_OP(OP_ZEXT_I4, "zext_i4")
-MINI_OP(OP_CNE, "cne")
+MINI_OP(OP_ADC, "adc", IREG, IREG, IREG)
+MINI_OP(OP_ADC_IMM, "adc_imm", IREG, IREG, NONE)
+MINI_OP(OP_SBB, "sbb", IREG, IREG, IREG)
+MINI_OP(OP_SBB_IMM, "sbb_imm", IREG, IREG, NONE)
+MINI_OP(OP_ADDCC, "addcc", IREG, IREG, IREG)
+MINI_OP(OP_ADDCC_IMM, "addcc_imm", IREG, IREG, NONE)
+MINI_OP(OP_SUBCC, "subcc", IREG, IREG, IREG)
+MINI_OP(OP_SUBCC_IMM, "subcc_imm", IREG, IREG, NONE)
+MINI_OP(OP_BR_REG, "br_reg", NONE, IREG, NONE)
+MINI_OP(OP_SEXT_I1, "sext_i1", IREG, IREG, NONE)
+MINI_OP(OP_SEXT_I2, "sext_i2", IREG, IREG, NONE)
+MINI_OP(OP_SEXT_I4, "sext_i4", IREG, IREG, NONE)
+MINI_OP(OP_ZEXT_I1, "zext_i1", IREG, IREG, NONE)
+MINI_OP(OP_ZEXT_I2, "zext_i2", IREG, IREG, NONE)
+MINI_OP(OP_ZEXT_I4, "zext_i4", IREG, IREG, NONE)
+MINI_OP(OP_CNE, "cne", NONE, NONE, NONE)
/* to implement the upper half of long32 add and sub */
-MINI_OP(OP_ADD_OVF_CARRY, "add_ovf_carry")
-MINI_OP(OP_SUB_OVF_CARRY, "sub_ovf_carry")
-MINI_OP(OP_ADD_OVF_UN_CARRY, "add_ovf_un_carry")
-MINI_OP(OP_SUB_OVF_UN_CARRY, "sub_ovf_un_carry")
+MINI_OP(OP_ADD_OVF_CARRY, "add_ovf_carry", NONE, NONE, NONE)
+MINI_OP(OP_SUB_OVF_CARRY, "sub_ovf_carry", NONE, NONE, NONE)
+MINI_OP(OP_ADD_OVF_UN_CARRY, "add_ovf_un_carry", NONE, NONE, NONE)
+MINI_OP(OP_SUB_OVF_UN_CARRY, "sub_ovf_un_carry", NONE, NONE, NONE)
/* FP functions usually done by the CPU */
-MINI_OP(OP_SIN, "sin")
-MINI_OP(OP_COS, "cos")
-MINI_OP(OP_ABS, "abs")
-MINI_OP(OP_TAN, "tan")
-MINI_OP(OP_ATAN, "atan")
-MINI_OP(OP_SQRT, "sqrt")
+MINI_OP(OP_SIN, "sin", FREG, FREG, NONE)
+MINI_OP(OP_COS, "cos", FREG, FREG, NONE)
+MINI_OP(OP_ABS, "abs", FREG, FREG, NONE)
+MINI_OP(OP_TAN, "tan", FREG, FREG, NONE)
+MINI_OP(OP_ATAN, "atan", FREG, FREG, NONE)
+MINI_OP(OP_SQRT, "sqrt", FREG, FREG, NONE)
/* to optimize strings */
-MINI_OP(OP_GETCHR, "getchar")
-MINI_OP(OP_STR_CHAR_ADDR, "str_char_addr")
-MINI_OP(OP_STRLEN, "strlen")
-MINI_OP(OP_GETTYPE, "gettype")
-MINI_OP(OP_GETHASHCODE, "gethashcode")
-/* get adrress of element in a 2D array */
-MINI_OP(OP_LDELEMA2D, "getldelema2")
+MINI_OP(OP_GETCHR, "getchar", NONE, NONE, NONE)
+MINI_OP(OP_STR_CHAR_ADDR, "str_char_addr", NONE, NONE, NONE)
+MINI_OP(OP_STRLEN, "strlen", NONE, NONE, NONE)
+MINI_OP(OP_GETTYPE, "gettype", NONE, NONE, NONE)
+MINI_OP(OP_GETHASHCODE, "gethashcode", NONE, NONE, NONE)
+MINI_OP(OP_NEWARR, "newarr", IREG, IREG, NONE)
+MINI_OP(OP_LDLEN, "ldlen", IREG, IREG, NONE)
+MINI_OP(OP_BOUNDS_CHECK, "bounds_check", NONE, IREG, IREG)
+/* get adress of element in a 2D array */
+MINI_OP(OP_LDELEMA2D, "getldelema2", NONE, NONE, NONE)
/* inlined small memcpy with constant length */
-MINI_OP(OP_MEMCPY, "memcpy")
+MINI_OP(OP_MEMCPY, "memcpy", NONE, NONE, NONE)
/* inlined small memset with constant length */
-MINI_OP(OP_MEMSET, "memset")
+MINI_OP(OP_MEMSET, "memset", NONE, NONE, NONE)
/* type check that support custom remoting types */
-MINI_OP(OP_CISINST, "cisinst")
-MINI_OP(OP_CCASTCLASS, "ccastclass")
-MINI_OP(OP_SAVE_LMF, "save_lmf")
-MINI_OP(OP_RESTORE_LMF, "restore_lmf")
+MINI_OP(OP_CISINST, "cisinst", NONE, NONE, NONE)
+MINI_OP(OP_CCASTCLASS, "ccastclass", NONE, NONE, NONE)
+MINI_OP(OP_SAVE_LMF, "save_lmf", NONE, NONE, NONE)
+MINI_OP(OP_RESTORE_LMF, "restore_lmf", NONE, NONE, NONE)
/* mkrefany/refanyval for generic sharing */
-MINI_OP(OP_MKREFANY_REGS, "mkrefany_regs")
-MINI_OP(OP_REFANYVAL_REG, "refanyval_reg")
+MINI_OP(OP_MKREFANY_REGS, "mkrefany_regs", NONE, NONE, NONE)
+MINI_OP(OP_REFANYVAL_REG, "refanyval_reg", NONE, NONE, NONE)
/* arch-dep tls access */
-MINI_OP(OP_TLS_GET, "tls_get")
+MINI_OP(OP_TLS_GET, "tls_get", IREG, NONE, NONE)
-MINI_OP(OP_LOAD_GOTADDR, "load_gotaddr")
-MINI_OP(OP_DUMMY_USE, "dummy_use")
-MINI_OP(OP_DUMMY_STORE, "dummy_store")
-MINI_OP(OP_NOT_REACHED, "not_reached")
-MINI_OP(OP_NOT_NULL, "not_null")
+MINI_OP(OP_LOAD_GOTADDR, "load_gotaddr", IREG, NONE, NONE)
+MINI_OP(OP_DUMMY_USE, "dummy_use", NONE, IREG, NONE)
+MINI_OP(OP_DUMMY_STORE, "dummy_store", NONE, NONE, NONE)
+MINI_OP(OP_NOT_REACHED, "not_reached", NONE, NONE, NONE)
+MINI_OP(OP_NOT_NULL, "not_null", NONE, IREG, NONE)
/* Atomic specific
Interlocked::Increment and Interlocked:Decrement
and atomic_add_i4 by Interlocked::Add
*/
-MINI_OP(OP_ATOMIC_ADD_I4, "atomic_add_i4")
-MINI_OP(OP_ATOMIC_ADD_NEW_I4, "atomic_add_new_i4")
-MINI_OP(OP_ATOMIC_ADD_IMM_I4, "atomic_add_imm_i4")
-MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I4, "atomic_add_imm_new_i4")
-MINI_OP(OP_ATOMIC_EXCHANGE_I4, "atomic_exchange_i4")
-
-MINI_OP(OP_ATOMIC_ADD_I8, "atomic_add_i8")
-MINI_OP(OP_ATOMIC_ADD_NEW_I8, "atomic_add_new_i8")
-MINI_OP(OP_ATOMIC_ADD_IMM_I8, "atomic_add_imm_i8")
-MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I8, "atomic_add_imm_new_i8")
-MINI_OP(OP_ATOMIC_EXCHANGE_I8, "atomic_exchange_i8")
-MINI_OP(OP_MEMORY_BARRIER, "memory_barrier")
+MINI_OP(OP_ATOMIC_ADD_I4, "atomic_add_i4", IREG, IREG, IREG)
+MINI_OP(OP_ATOMIC_ADD_NEW_I4, "atomic_add_new_i4", IREG, IREG, IREG)
+MINI_OP(OP_ATOMIC_ADD_IMM_I4, "atomic_add_imm_i4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I4, "atomic_add_imm_new_i4", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_EXCHANGE_I4, "atomic_exchange_i4", IREG, IREG, IREG)
+
+MINI_OP(OP_ATOMIC_ADD_I8, "atomic_add_i8", IREG, IREG, IREG)
+MINI_OP(OP_ATOMIC_ADD_NEW_I8, "atomic_add_new_i8", IREG, IREG, IREG)
+MINI_OP(OP_ATOMIC_ADD_IMM_I8, "atomic_add_imm_i8", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_ADD_IMM_NEW_I8, "atomic_add_imm_new_i8", IREG, IREG, NONE)
+MINI_OP(OP_ATOMIC_EXCHANGE_I8, "atomic_exchange_i8", IREG, IREG, IREG)
+MINI_OP(OP_MEMORY_BARRIER, "memory_barrier", NONE, NONE, NONE)
/* CompareExchange where the value to store is a constant */
/* backend->data holds the constant value */
-MINI_OP(OP_ATOMIC_CAS_IMM_I4, "atomic_cas_imm_i4")
-MINI_OP(OP_ATOMIC_CAS_IMM_I8, "atomic_cas_imm_i8")
+MINI_OP(OP_ATOMIC_CAS_IMM_I4, "atomic_cas_imm_i4", IREG, IREG, IREG)
+MINI_OP(OP_ATOMIC_CAS_IMM_I8, "atomic_cas_imm_i8", IREG, IREG, IREG)
+
+/* Conditional move opcodes.
+ * Must be in the same order as the matching CEE_B... opcodes
+ * sreg2 will be assigned to dreg if the condition is true.
+ * sreg1 should be equal to dreg and models the fact the instruction doesn't necessary
+ * modify dreg. The sreg1==dreg condition could be violated by SSA, so the local
+ * register allocator or the code generator should generate a mov dreg, sreg1 before
+ * the cmov in those cases.
+ * These opcodes operate on pointer sized values.
+ */
+MINI_OP(OP_CMOV_IEQ, "cmov_ieq", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_IGE, "cmov_ige", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_IGT, "cmov_igt", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_ILE, "cmov_ile", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_ILT, "cmov_ilt", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_INE_UN, "cmov_ine_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_IGE_UN, "cmov_ige_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_IGT_UN, "cmov_igt_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_ILE_UN, "cmov_ile_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_ILT_UN, "cmov_ilt_un", IREG, IREG, IREG)
+
+MINI_OP(OP_CMOV_LEQ, "cmov_leq", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LGE, "cmov_lge", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LGT, "cmov_lgt", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LLE, "cmov_lle", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LLT, "cmov_llt", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LNE_UN, "cmov_lne_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LGE_UN, "cmov_lge_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LGT_UN, "cmov_lgt_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LLE_UN, "cmov_lle_un", IREG, IREG, IREG)
+MINI_OP(OP_CMOV_LLT_UN, "cmov_llt_un", IREG, IREG, IREG)
/* Arch specific opcodes */
#if defined(__i386__) || defined(__x86_64__)
-MINI_OP(OP_X86_TEST_NULL, "x86_test_null")
-MINI_OP(OP_X86_COMPARE_MEMBASE_REG,"x86_compare_membase_reg")
-MINI_OP(OP_X86_COMPARE_MEMBASE_IMM,"x86_compare_membase_imm")
-MINI_OP(OP_X86_COMPARE_MEM_IMM, "x86_compare_mem_imm")
-MINI_OP(OP_X86_COMPARE_MEMBASE8_IMM,"x86_compare_membase8_imm")
-MINI_OP(OP_X86_COMPARE_REG_MEMBASE,"x86_compare_reg_membase")
-MINI_OP(OP_X86_INC_REG, "x86_inc_reg")
-MINI_OP(OP_X86_INC_MEMBASE, "x86_inc_membase")
-MINI_OP(OP_X86_DEC_REG, "x86_dec_reg")
-MINI_OP(OP_X86_DEC_MEMBASE, "x86_dec_membase")
-MINI_OP(OP_X86_ADD_MEMBASE_IMM, "x86_add_membase_imm")
-MINI_OP(OP_X86_SUB_MEMBASE_IMM, "x86_sub_membase_imm")
-MINI_OP(OP_X86_AND_MEMBASE_IMM, "x86_and_membase_imm")
-MINI_OP(OP_X86_OR_MEMBASE_IMM, "x86_or_membase_imm")
-MINI_OP(OP_X86_XOR_MEMBASE_IMM, "x86_xor_membase_imm")
-
-MINI_OP(OP_X86_ADD_MEMBASE_REG, "x86_add_membase_reg")
-MINI_OP(OP_X86_SUB_MEMBASE_REG, "x86_sub_membase_reg")
-MINI_OP(OP_X86_AND_MEMBASE_REG, "x86_and_membase_reg")
-MINI_OP(OP_X86_OR_MEMBASE_REG, "x86_or_membase_reg")
-MINI_OP(OP_X86_XOR_MEMBASE_REG, "x86_xor_membase_reg")
-MINI_OP(OP_X86_MUL_MEMBASE_REG, "x86_mul_membase_reg")
-
-MINI_OP(OP_X86_ADD_REG_MEMBASE, "x86_add_reg_membase")
-MINI_OP(OP_X86_SUB_REG_MEMBASE, "x86_sub_reg_membase")
-MINI_OP(OP_X86_MUL_REG_MEMBASE, "x86_mul_reg_membase")
-MINI_OP(OP_X86_AND_REG_MEMBASE, "x86_and_reg_membase")
-MINI_OP(OP_X86_OR_REG_MEMBASE, "x86_or_reg_membase")
-MINI_OP(OP_X86_XOR_REG_MEMBASE, "x86_xor_reg_membase")
-
-MINI_OP(OP_X86_PUSH_MEMBASE, "x86_push_membase")
-MINI_OP(OP_X86_PUSH_IMM, "x86_push_imm")
-MINI_OP(OP_X86_PUSH, "x86_push")
-MINI_OP(OP_X86_PUSH_FP, "x86_push_fp")
-MINI_OP(OP_X86_PUSH_OBJ, "x86_push_obj")
-MINI_OP(OP_X86_PUSH_GOT_ENTRY, "x86_push_got_entry")
-MINI_OP(OP_X86_LEA, "x86_lea")
-MINI_OP(OP_X86_LEA_MEMBASE, "x86_lea_membase")
-MINI_OP(OP_X86_XCHG, "x86_xchg")
-MINI_OP(OP_X86_FPOP, "x86_fpop")
-MINI_OP(OP_X86_FP_LOAD_I8, "x86_fp_load_i8")
-MINI_OP(OP_X86_FP_LOAD_I4, "x86_fp_load_i4")
-MINI_OP(OP_X86_SETEQ_MEMBASE, "x86_seteq_membase")
-MINI_OP(OP_X86_SETNE_MEMBASE, "x86_setne_membase")
-MINI_OP(OP_X86_OUTARG_ALIGN_STACK, "x86_outarg_align_stack")
-MINI_OP(OP_X86_FXCH, "x86_fxch")
+MINI_OP(OP_X86_TEST_NULL, "x86_test_null", NONE, NONE, NONE)
+MINI_OP(OP_X86_COMPARE_MEMBASE_REG,"x86_compare_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_COMPARE_MEMBASE_IMM,"x86_compare_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_COMPARE_MEM_IMM, "x86_compare_mem_imm", NONE, NONE, NONE)
+MINI_OP(OP_X86_COMPARE_MEMBASE8_IMM,"x86_compare_membase8_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_COMPARE_REG_MEMBASE,"x86_compare_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_X86_INC_REG, "x86_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_X86_INC_MEMBASE, "x86_inc_membase", NONE, IREG, NONE)
+MINI_OP(OP_X86_DEC_REG, "x86_dec_reg", NONE, NONE, NONE)
+MINI_OP(OP_X86_DEC_MEMBASE, "x86_dec_membase", NONE, IREG, NONE)
+MINI_OP(OP_X86_ADD_MEMBASE_IMM, "x86_add_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_SUB_MEMBASE_IMM, "x86_sub_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_AND_MEMBASE_IMM, "x86_and_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_OR_MEMBASE_IMM, "x86_or_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_XOR_MEMBASE_IMM, "x86_xor_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_X86_ADD_MEMBASE_REG, "x86_add_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_SUB_MEMBASE_REG, "x86_sub_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_AND_MEMBASE_REG, "x86_and_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_OR_MEMBASE_REG, "x86_or_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_XOR_MEMBASE_REG, "x86_xor_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_X86_MUL_MEMBASE_REG, "x86_mul_membase_reg", NONE, IREG, IREG)
+
+MINI_OP(OP_X86_ADD_REG_MEMBASE, "x86_add_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_X86_SUB_REG_MEMBASE, "x86_sub_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_X86_MUL_REG_MEMBASE, "x86_mul_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_X86_AND_REG_MEMBASE, "x86_and_reg_membase", IREG, IREG, IREG)
+MINI_OP(OP_X86_OR_REG_MEMBASE, "x86_or_reg_membase", IREG, IREG, IREG)
+MINI_OP(OP_X86_XOR_REG_MEMBASE, "x86_xor_reg_membase", IREG, IREG, IREG)
+
+MINI_OP(OP_X86_PUSH_MEMBASE, "x86_push_membase", NONE, IREG, NONE)
+MINI_OP(OP_X86_PUSH_IMM, "x86_push_imm", NONE, NONE, NONE)
+MINI_OP(OP_X86_PUSH, "x86_push", NONE, IREG, NONE)
+MINI_OP(OP_X86_PUSH_FP, "x86_push_fp", NONE, NONE, NONE)
+MINI_OP(OP_X86_PUSH_OBJ, "x86_push_obj", NONE, IREG, NONE)
+MINI_OP(OP_X86_PUSH_GOT_ENTRY, "x86_push_got_entry", NONE, IREG, NONE)
+MINI_OP(OP_X86_LEA, "x86_lea", IREG, IREG, IREG)
+MINI_OP(OP_X86_LEA_MEMBASE, "x86_lea_membase", IREG, IREG, NONE)
+MINI_OP(OP_X86_XCHG, "x86_xchg", NONE, NONE, NONE)
+MINI_OP(OP_X86_FPOP, "x86_fpop", NONE, FREG, NONE)
+MINI_OP(OP_X86_FP_LOAD_I8, "x86_fp_load_i8", NONE, NONE, NONE)
+MINI_OP(OP_X86_FP_LOAD_I4, "x86_fp_load_i4", NONE, NONE, NONE)
+MINI_OP(OP_X86_SETEQ_MEMBASE, "x86_seteq_membase", NONE, IREG, NONE)
+MINI_OP(OP_X86_SETNE_MEMBASE, "x86_setne_membase", NONE, IREG, NONE)
+MINI_OP(OP_X86_SHRD, "x86_shrd", IREG, IREG, IREG)
+MINI_OP(OP_X86_FXCH, "x86_fxch", NONE, NONE, NONE)
#endif
#if defined(__x86_64__)
-MINI_OP(OP_AMD64_TEST_NULL, "amd64_test_null")
-MINI_OP(OP_AMD64_SET_XMMREG_R4, "amd64_set_xmmreg_r4")
-MINI_OP(OP_AMD64_SET_XMMREG_R8, "amd64_set_xmmreg_r8")
-MINI_OP(OP_AMD64_OUTARG_XMMREG_R4, "amd64_outarg_xmmreg_r4")
-MINI_OP(OP_AMD64_OUTARG_XMMREG_R8, "amd64_outarg_xmmreg_r8")
-MINI_OP(OP_AMD64_ICOMPARE_MEMBASE_REG, "amd64_icompare_membase_reg")
-MINI_OP(OP_AMD64_ICOMPARE_MEMBASE_IMM, "amd64_icompare_membase_imm")
-MINI_OP(OP_AMD64_ICOMPARE_REG_MEMBASE, "amd64_icompare_reg_membase")
-MINI_OP(OP_AMD64_COMPARE_MEMBASE_REG, "amd64_compare_membase_reg")
-MINI_OP(OP_AMD64_COMPARE_MEMBASE_IMM, "amd64_compare_membase_imm")
-MINI_OP(OP_AMD64_COMPARE_REG_MEMBASE, "amd64_compare_reg_membase")
-
-MINI_OP(OP_AMD64_ADD_MEMBASE_REG, "amd64_add_membase_reg")
-MINI_OP(OP_AMD64_SUB_MEMBASE_REG, "amd64_sub_membase_reg")
-MINI_OP(OP_AMD64_AND_MEMBASE_REG, "amd64_and_membase_reg")
-MINI_OP(OP_AMD64_OR_MEMBASE_REG, "amd64_or_membase_reg")
-MINI_OP(OP_AMD64_XOR_MEMBASE_REG, "amd64_xor_membase_reg")
-MINI_OP(OP_AMD64_MUL_MEMBASE_REG, "amd64_mul_membase_reg")
-
-MINI_OP(OP_AMD64_ADD_MEMBASE_IMM, "amd64_add_membase_imm")
-MINI_OP(OP_AMD64_SUB_MEMBASE_IMM, "amd64_sub_membase_imm")
-MINI_OP(OP_AMD64_AND_MEMBASE_IMM, "amd64_and_membase_imm")
-MINI_OP(OP_AMD64_OR_MEMBASE_IMM, "amd64_or_membase_imm")
-MINI_OP(OP_AMD64_XOR_MEMBASE_IMM, "amd64_xor_membase_imm")
-MINI_OP(OP_AMD64_MUL_MEMBASE_IMM, "amd64_mul_membase_imm")
-
-MINI_OP(OP_AMD64_ADD_REG_MEMBASE, "amd64_add_reg_membase")
-MINI_OP(OP_AMD64_SUB_REG_MEMBASE, "amd64_sub_reg_membase")
-MINI_OP(OP_AMD64_AND_REG_MEMBASE, "amd64_and_reg_membase")
-MINI_OP(OP_AMD64_OR_REG_MEMBASE, "amd64_or_reg_membase")
-MINI_OP(OP_AMD64_XOR_REG_MEMBASE, "amd64_xor_reg_membase")
-MINI_OP(OP_AMD64_MUL_REG_MEMBASE, "amd64_mul_reg_membase")
-
-MINI_OP(OP_AMD64_OUTARG_ALIGN_STACK, "amd64_outarg_align_stack")
-MINI_OP(OP_AMD64_LOADI8_MEMINDEX, "amd64_loadi8_memindex")
-MINI_OP(OP_AMD64_SAVE_SP_TO_LMF, "amd64_save_sp_to_lmf")
+MINI_OP(OP_AMD64_TEST_NULL, "amd64_test_null", NONE, NONE, NONE)
+MINI_OP(OP_AMD64_SET_XMMREG_R4, "amd64_set_xmmreg_r4", FREG, FREG, NONE)
+MINI_OP(OP_AMD64_SET_XMMREG_R8, "amd64_set_xmmreg_r8", FREG, FREG, NONE)
+MINI_OP(OP_AMD64_OUTARG_XMMREG_R4, "amd64_outarg_xmmreg_r4", NONE, NONE, NONE)
+MINI_OP(OP_AMD64_OUTARG_XMMREG_R8, "amd64_outarg_xmmreg_r8", NONE, NONE, NONE)
+MINI_OP(OP_AMD64_ICOMPARE_MEMBASE_REG, "amd64_icompare_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_ICOMPARE_MEMBASE_IMM, "amd64_icompare_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_ICOMPARE_REG_MEMBASE, "amd64_icompare_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_COMPARE_MEMBASE_REG, "amd64_compare_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_COMPARE_MEMBASE_IMM, "amd64_compare_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_COMPARE_REG_MEMBASE, "amd64_compare_reg_membase", NONE, IREG, IREG)
+
+MINI_OP(OP_AMD64_ADD_MEMBASE_REG, "amd64_add_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_SUB_MEMBASE_REG, "amd64_sub_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_AND_MEMBASE_REG, "amd64_and_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_OR_MEMBASE_REG, "amd64_or_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_XOR_MEMBASE_REG, "amd64_xor_membase_reg", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_MUL_MEMBASE_REG, "amd64_mul_membase_reg", NONE, IREG, IREG)
+
+MINI_OP(OP_AMD64_ADD_MEMBASE_IMM, "amd64_add_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_SUB_MEMBASE_IMM, "amd64_sub_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_AND_MEMBASE_IMM, "amd64_and_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_OR_MEMBASE_IMM, "amd64_or_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_XOR_MEMBASE_IMM, "amd64_xor_membase_imm", NONE, IREG, NONE)
+MINI_OP(OP_AMD64_MUL_MEMBASE_IMM, "amd64_mul_membase_imm", NONE, IREG, NONE)
+
+MINI_OP(OP_AMD64_ADD_REG_MEMBASE, "amd64_add_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_SUB_REG_MEMBASE, "amd64_sub_reg_membase", NONE, IREG, IREG)
+MINI_OP(OP_AMD64_AND_REG_MEMBASE, "amd64_and_reg_membase", IREG, IREG, IREG)
+MINI_OP(OP_AMD64_OR_REG_MEMBASE, "amd64_or_reg_membase", IREG, IREG, IREG)
+MINI_OP(OP_AMD64_XOR_REG_MEMBASE, "amd64_xor_reg_membase", IREG, IREG, IREG)
+MINI_OP(OP_AMD64_MUL_REG_MEMBASE, "amd64_mul_reg_membase", NONE, IREG, IREG)
+
+MINI_OP(OP_AMD64_OUTARG_ALIGN_STACK, "amd64_outarg_align_stack", NONE, NONE, NONE)
+MINI_OP(OP_AMD64_LOADI8_MEMINDEX, "amd64_loadi8_memindex", NONE, NONE, NONE)
+MINI_OP(OP_AMD64_SAVE_SP_TO_LMF, "amd64_save_sp_to_lmf", NONE, NONE, NONE)
#endif
#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__)
-MINI_OP(OP_PPC_SUBFIC, "ppc_subfic")
-MINI_OP(OP_PPC_SUBFZE, "ppc_subfze")
-MINI_OP(OP_CHECK_FINITE, "ppc_check_finite")
+MINI_OP(OP_PPC_SUBFIC, "ppc_subfic", IREG, IREG, NONE)
+MINI_OP(OP_PPC_SUBFZE, "ppc_subfze", IREG, IREG, NONE)
+MINI_OP(OP_CHECK_FINITE, "ppc_check_finite", NONE, IREG, NONE)
#endif
#if defined(__arm__)
-MINI_OP(OP_ARM_RSBS_IMM, "arm_rsbs_imm")
-MINI_OP(OP_ARM_RSC_IMM, "arm_rsc_imm")
+MINI_OP(OP_ARM_RSBS_IMM, "arm_rsbs_imm", IREG, IREG, NONE)
+MINI_OP(OP_ARM_RSC_IMM, "arm_rsc_imm", IREG, IREG, NONE)
#endif
#if defined(__sparc__) || defined(sparc)
-MINI_OP(OP_SPARC_OUTARG_REGPAIR, "sparc_outarg_regpair")
-MINI_OP(OP_SPARC_OUTARG_MEM, "sparc_outarg_mem")
-MINI_OP(OP_SPARC_OUTARG_MEMPAIR, "sparc_outarg_mempair")
-MINI_OP(OP_SPARC_OUTARG_SPLIT_REG_STACK, "sparc_outarg_split_reg_stack")
-MINI_OP(OP_SPARC_OUTARG_FLOAT_REG, "sparc_outarg_float_reg")
-MINI_OP(OP_SPARC_OUTARG_DOUBLE_REG, "sparc_outarg_double_reg")
-MINI_OP(OP_SPARC_OUTARG_FLOAT, "sparc_outarg_float")
-MINI_OP(OP_SPARC_OUTARG_REGPAIR_FLOAT, "sparc_outarg_float")
-MINI_OP(OP_SPARC_SETFREG_FLOAT, "sparc_setfreg_float")
-MINI_OP(OP_SPARC_BRZ, "sparc_brz")
-MINI_OP(OP_SPARC_BRLEZ, "sparc_brlez")
-MINI_OP(OP_SPARC_BRLZ, "sparc_brlz")
-MINI_OP(OP_SPARC_BRNZ, "sparc_brnz")
-MINI_OP(OP_SPARC_BRGZ, "sparc_brgz")
-MINI_OP(OP_SPARC_BRGEZ, "sparc_brgez")
-MINI_OP(OP_SPARC_COND_EXC_EQZ, "sparc_cond_exc_eqz")
-MINI_OP(OP_SPARC_COND_EXC_GEZ, "sparc_cond_exc_gez")
-MINI_OP(OP_SPARC_COND_EXC_GTZ, "sparc_cond_exc_gtz")
-MINI_OP(OP_SPARC_COND_EXC_LEZ, "sparc_cond_exc_lez")
-MINI_OP(OP_SPARC_COND_EXC_LTZ, "sparc_cond_exc_ltz")
-MINI_OP(OP_SPARC_COND_EXC_NEZ, "sparc_cond_exc_nez")
+MINI_OP(OP_SPARC_OUTARG_REGPAIR, "sparc_outarg_regpair", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_MEM, "sparc_outarg_mem", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_MEMPAIR, "sparc_outarg_mempair", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_SPLIT_REG_STACK, "sparc_outarg_split_reg_stack", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_FLOAT_REG, "sparc_outarg_float_reg", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_DOUBLE_REG, "sparc_outarg_double_reg", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_FLOAT, "sparc_outarg_float", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_OUTARG_REGPAIR_FLOAT, "sparc_outarg_float", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_SETFREG_FLOAT, "sparc_setfreg_float", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRZ, "sparc_brz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRLEZ, "sparc_brlez", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRLZ, "sparc_brlz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRNZ, "sparc_brnz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRGZ, "sparc_brgz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_BRGEZ, "sparc_brgez", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_EQZ, "sparc_cond_exc_eqz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_GEZ, "sparc_cond_exc_gez", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_GTZ, "sparc_cond_exc_gtz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_LEZ, "sparc_cond_exc_lez", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_LTZ, "sparc_cond_exc_ltz", NONE, NONE, NONE)
+MINI_OP(OP_SPARC_COND_EXC_NEZ, "sparc_cond_exc_nez", NONE, NONE, NONE)
#endif
#if defined(__s390__) || defined(s390)
-MINI_OP(OP_S390_LOADARG, "s390_loadarg")
-MINI_OP(OP_S390_ARGREG, "s390_argreg")
-MINI_OP(OP_S390_ARGPTR, "s390_argptr")
-MINI_OP(OP_S390_STKARG, "s390_stkarg")
-MINI_OP(OP_S390_MOVE, "s390_move")
-MINI_OP(OP_S390_SETF4RET, "s390_setf4ret")
-MINI_OP(OP_S390_BKCHAIN, "s390_bkchain")
+MINI_OP(OP_S390_LOADARG, "s390_loadarg", NONE, NONE, NONE)
+MINI_OP(OP_S390_ARGREG, "s390_argreg", NONE, NONE, NONE)
+MINI_OP(OP_S390_ARGPTR, "s390_argptr", NONE, NONE, NONE)
+MINI_OP(OP_S390_STKARG, "s390_stkarg", NONE, NONE, NONE)
+MINI_OP(OP_S390_MOVE, "s390_move", IREG, IREG, NONE)
+MINI_OP(OP_S390_SETF4RET, "s390_setf4ret", FREG, FREG, NONE)
+MINI_OP(OP_S390_BKCHAIN, "s390_bkchain", NONE, NONE, NONE)
+MINI_OP(OP_S390_LADD, "s390_long_add", LREG, IREG, IREG)
+MINI_OP(OP_S390_LADD_OVF, "s390_long_add_ovf", LREG, IREG, IREG)
+MINI_OP(OP_S390_LADD_OVF_UN, "s390_long_add_ovf_un", LREG, IREG, IREG)
+MINI_OP(OP_S390_LSUB, "s390_long_sub", LREG, IREG, IREG)
+MINI_OP(OP_S390_LSUB_OVF, "s390_long_sub_ovf", LREG, IREG, IREG)
+MINI_OP(OP_S390_LSUB_OVF_UN, "s390_long_sub_ovf_un", LREG, IREG, IREG)
+MINI_OP(OP_S390_LNEG, "s390_long_neg", LREG, IREG, IREG)
+MINI_OP(OP_S390_IADD_OVF, "s390_int_add_ovf", IREG, IREG, IREG)
+MINI_OP(OP_S390_IADD_OVF_UN, "s390_int_add_ovf_un", IREG, IREG, IREG)
+MINI_OP(OP_S390_ISUB_OVF, "s390_int_sub_ovf", IREG, IREG, IREG)
+MINI_OP(OP_S390_ISUB_OVF_UN, "s390_int_sub_ovf_un", IREG, IREG, IREG)
#endif
#if defined(__ia64__)
-MINI_OP(OP_IA64_LOAD, "ia64_load")
-MINI_OP(OP_IA64_LOADI1, "ia64_loadi1")
-MINI_OP(OP_IA64_LOADU1, "ia64_loadu1")
-MINI_OP(OP_IA64_LOADI2, "ia64_loadi2")
-MINI_OP(OP_IA64_LOADU2, "ia64_loadu2")
-MINI_OP(OP_IA64_LOADI4, "ia64_loadi4")
-MINI_OP(OP_IA64_LOADU4, "ia64_loadu4")
-MINI_OP(OP_IA64_LOADI8, "ia64_loadi8")
-MINI_OP(OP_IA64_LOADU8, "ia64_loadu8")
-MINI_OP(OP_IA64_LOADR4, "ia64_loadr4")
-MINI_OP(OP_IA64_LOADR8, "ia64_loadr8")
-MINI_OP(OP_IA64_STORE, "ia64_store")
-MINI_OP(OP_IA64_STOREI1, "ia64_storei1")
-MINI_OP(OP_IA64_STOREU1, "ia64_storeu1")
-MINI_OP(OP_IA64_STOREI2, "ia64_storei2")
-MINI_OP(OP_IA64_STOREU2, "ia64_storeu2")
-MINI_OP(OP_IA64_STOREI4, "ia64_storei4")
-MINI_OP(OP_IA64_STOREU4, "ia64_storeu4")
-MINI_OP(OP_IA64_STOREI8, "ia64_storei8")
-MINI_OP(OP_IA64_STOREU8, "ia64_storeu8")
-MINI_OP(OP_IA64_STORER4, "ia64_storer4")
-MINI_OP(OP_IA64_STORER8, "ia64_storer8")
-
-MINI_OP(OP_IA64_CMP4_EQ, "ia64_cmp4_eq")
-MINI_OP(OP_IA64_CMP4_NE, "ia64_cmp4_ne")
-MINI_OP(OP_IA64_CMP4_LE, "ia64_cmp4_le")
-MINI_OP(OP_IA64_CMP4_LT, "ia64_cmp4_lt")
-MINI_OP(OP_IA64_CMP4_GE, "ia64_cmp4_ge")
-MINI_OP(OP_IA64_CMP4_GT, "ia64_cmp4_gt")
-MINI_OP(OP_IA64_CMP4_LE_UN, "ia64_cmp4_le_un")
-MINI_OP(OP_IA64_CMP4_LT_UN, "ia64_cmp4_lt_un")
-MINI_OP(OP_IA64_CMP4_GE_UN, "ia64_cmp4_ge_un")
-MINI_OP(OP_IA64_CMP4_GT_UN, "ia64_cmp4_gt_un")
-MINI_OP(OP_IA64_CMP_EQ, "ia64_cmp_eq")
-MINI_OP(OP_IA64_CMP_NE, "ia64_cmp_ne")
-MINI_OP(OP_IA64_CMP_LE, "ia64_cmp_le")
-MINI_OP(OP_IA64_CMP_LT, "ia64_cmp_lt")
-MINI_OP(OP_IA64_CMP_GE, "ia64_cmp_ge")
-MINI_OP(OP_IA64_CMP_GT, "ia64_cmp_gt")
-MINI_OP(OP_IA64_CMP_LT_UN, "ia64_cmp_lt_un")
-MINI_OP(OP_IA64_CMP_GT_UN, "ia64_cmp_gt_un")
-MINI_OP(OP_IA64_CMP_GE_UN, "ia64_cmp_ge_un")
-MINI_OP(OP_IA64_CMP_LE_UN, "ia64_cmp_le_un")
-
-MINI_OP(OP_IA64_CMP4_EQ_IMM, "ia64_cmp4_eq_imm")
-MINI_OP(OP_IA64_CMP4_NE_IMM, "ia64_cmp4_ne_imm")
-MINI_OP(OP_IA64_CMP4_LE_IMM, "ia64_cmp4_le_imm")
-MINI_OP(OP_IA64_CMP4_LT_IMM, "ia64_cmp4_lt_imm")
-MINI_OP(OP_IA64_CMP4_GE_IMM, "ia64_cmp4_ge_imm")
-MINI_OP(OP_IA64_CMP4_GT_IMM, "ia64_cmp4_gt_imm")
-MINI_OP(OP_IA64_CMP4_LE_UN_IMM, "ia64_cmp4_le_un_imm")
-MINI_OP(OP_IA64_CMP4_LT_UN_IMM, "ia64_cmp4_lt_un_imm")
-MINI_OP(OP_IA64_CMP4_GE_UN_IMM, "ia64_cmp4_ge_un_imm")
-MINI_OP(OP_IA64_CMP4_GT_UN_IMM, "ia64_cmp4_gt_un_imm")
-MINI_OP(OP_IA64_CMP_EQ_IMM, "ia64_cmp_eq_imm")
-MINI_OP(OP_IA64_CMP_NE_IMM, "ia64_cmp_ne_imm")
-MINI_OP(OP_IA64_CMP_LE_IMM, "ia64_cmp_le_imm")
-MINI_OP(OP_IA64_CMP_LT_IMM, "ia64_cmp_lt_imm")
-MINI_OP(OP_IA64_CMP_GE_IMM, "ia64_cmp_ge_imm")
-MINI_OP(OP_IA64_CMP_GT_IMM, "ia64_cmp_gt_imm")
-MINI_OP(OP_IA64_CMP_LT_UN_IMM, "ia64_cmp_lt_un_imm")
-MINI_OP(OP_IA64_CMP_GT_UN_IMM, "ia64_cmp_gt_un_imm")
-MINI_OP(OP_IA64_CMP_GE_UN_IMM, "ia64_cmp_ge_un_imm")
-MINI_OP(OP_IA64_CMP_LE_UN_IMM, "ia64_cmp_le_un_imm")
-
-MINI_OP(OP_IA64_FCMP_EQ, "ia64_fcmp_eq")
-MINI_OP(OP_IA64_FCMP_NE, "ia64_fcmp_ne")
-MINI_OP(OP_IA64_FCMP_LE, "ia64_fcmp_le")
-MINI_OP(OP_IA64_FCMP_LT, "ia64_fcmp_lt")
-MINI_OP(OP_IA64_FCMP_GE, "ia64_fcmp_ge")
-MINI_OP(OP_IA64_FCMP_GT, "ia64_fcmp_gt")
-MINI_OP(OP_IA64_FCMP_LT_UN, "ia64_fcmp_lt_un")
-MINI_OP(OP_IA64_FCMP_GT_UN, "ia64_fcmp_gt_un")
-MINI_OP(OP_IA64_FCMP_GE_UN, "ia64_fcmp_ge_un")
-MINI_OP(OP_IA64_FCMP_LE_UN, "ia64_fcmp_le_un")
-
-MINI_OP(OP_IA64_BR_COND, "ia64_br_cond")
-MINI_OP(OP_IA64_COND_EXC, "ia64_cond_exc")
-MINI_OP(OP_IA64_CSET, "ia64_cset")
-
-MINI_OP(OP_IA64_OUTARG_R4, "ia64_outarg_r4")
-MINI_OP(OP_IA64_STOREI1_MEMBASE_INC_REG, "ia64_storei1_membase_inc_reg")
-MINI_OP(OP_IA64_STOREI2_MEMBASE_INC_REG, "ia64_storei2_membase_inc_reg")
-MINI_OP(OP_IA64_STOREI4_MEMBASE_INC_REG, "ia64_storei4_membase_inc_reg")
-MINI_OP(OP_IA64_STOREI8_MEMBASE_INC_REG, "ia64_storei8_membase_inc_reg")
-MINI_OP(OP_IA64_STORER4_MEMBASE_INC_REG, "ia64_storer4_membase_inc_reg")
-MINI_OP(OP_IA64_STORER8_MEMBASE_INC_REG, "ia64_storer8_membase_inc_reg")
-MINI_OP(OP_IA64_LOADI1_MEMBASE_INC,"ia64_loadi1_membase_inc")
-MINI_OP(OP_IA64_LOADU1_MEMBASE_INC,"ia64_loadu1_membase_inc")
-MINI_OP(OP_IA64_LOADI2_MEMBASE_INC,"ia64_loadi2_membase_inc")
-MINI_OP(OP_IA64_LOADU2_MEMBASE_INC,"ia64_loadu2_membase_inc")
-MINI_OP(OP_IA64_LOADI4_MEMBASE_INC,"ia64_loadi4_membase_inc")
-MINI_OP(OP_IA64_LOADU4_MEMBASE_INC,"ia64_loadu4_membase_inc")
-MINI_OP(OP_IA64_LOADI8_MEMBASE_INC,"ia64_loadi8_membase_inc")
-MINI_OP(OP_IA64_LOADR4_MEMBASE_INC,"ia64_loadr4_membase_inc")
-MINI_OP(OP_IA64_LOADR8_MEMBASE_INC,"ia64_loadr8_membase_inc")
+MINI_OP(OP_IA64_LOAD, "ia64_load", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI1, "ia64_loadi1", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU1, "ia64_loadu1", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI2, "ia64_loadi2", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU2, "ia64_loadu2", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI4, "ia64_loadi4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU4, "ia64_loadu4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI8, "ia64_loadi8", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU8, "ia64_loadu8", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADR4, "ia64_loadr4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADR8, "ia64_loadr8", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STORE, "ia64_store", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI1, "ia64_storei1", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREU1, "ia64_storeu1", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI2, "ia64_storei2", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREU2, "ia64_storeu2", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI4, "ia64_storei4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREU4, "ia64_storeu4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI8, "ia64_storei8", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREU8, "ia64_storeu8", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STORER4, "ia64_storer4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STORER8, "ia64_storer8", NONE, NONE, NONE)
+
+MINI_OP(OP_IA64_CMP4_EQ, "ia64_cmp4_eq", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_NE, "ia64_cmp4_ne", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LE, "ia64_cmp4_le", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LT, "ia64_cmp4_lt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GE, "ia64_cmp4_ge", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GT, "ia64_cmp4_gt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LE_UN, "ia64_cmp4_le_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LT_UN, "ia64_cmp4_lt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GE_UN, "ia64_cmp4_ge_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GT_UN, "ia64_cmp4_gt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_EQ, "ia64_cmp_eq", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_NE, "ia64_cmp_ne", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LE, "ia64_cmp_le", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LT, "ia64_cmp_lt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GE, "ia64_cmp_ge", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GT, "ia64_cmp_gt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LT_UN, "ia64_cmp_lt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GT_UN, "ia64_cmp_gt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GE_UN, "ia64_cmp_ge_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LE_UN, "ia64_cmp_le_un", NONE, NONE, NONE)
+
+MINI_OP(OP_IA64_CMP4_EQ_IMM, "ia64_cmp4_eq_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_NE_IMM, "ia64_cmp4_ne_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LE_IMM, "ia64_cmp4_le_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LT_IMM, "ia64_cmp4_lt_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GE_IMM, "ia64_cmp4_ge_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GT_IMM, "ia64_cmp4_gt_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LE_UN_IMM, "ia64_cmp4_le_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_LT_UN_IMM, "ia64_cmp4_lt_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GE_UN_IMM, "ia64_cmp4_ge_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP4_GT_UN_IMM, "ia64_cmp4_gt_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_EQ_IMM, "ia64_cmp_eq_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_NE_IMM, "ia64_cmp_ne_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LE_IMM, "ia64_cmp_le_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LT_IMM, "ia64_cmp_lt_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GE_IMM, "ia64_cmp_ge_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GT_IMM, "ia64_cmp_gt_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LT_UN_IMM, "ia64_cmp_lt_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GT_UN_IMM, "ia64_cmp_gt_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_GE_UN_IMM, "ia64_cmp_ge_un_imm", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CMP_LE_UN_IMM, "ia64_cmp_le_un_imm", NONE, NONE, NONE)
+
+MINI_OP(OP_IA64_FCMP_EQ, "ia64_fcmp_eq", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_NE, "ia64_fcmp_ne", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_LE, "ia64_fcmp_le", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_LT, "ia64_fcmp_lt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_GE, "ia64_fcmp_ge", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_GT, "ia64_fcmp_gt", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_LT_UN, "ia64_fcmp_lt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_GT_UN, "ia64_fcmp_gt_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_GE_UN, "ia64_fcmp_ge_un", NONE, NONE, NONE)
+MINI_OP(OP_IA64_FCMP_LE_UN, "ia64_fcmp_le_un", NONE, NONE, NONE)
+
+MINI_OP(OP_IA64_BR_COND, "ia64_br_cond", NONE, NONE, NONE)
+MINI_OP(OP_IA64_COND_EXC, "ia64_cond_exc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_CSET, "ia64_cset", NONE, NONE, NONE)
+
+MINI_OP(OP_IA64_OUTARG_R4, "ia64_outarg_r4", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI1_MEMBASE_INC_REG, "ia64_storei1_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI2_MEMBASE_INC_REG, "ia64_storei2_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI4_MEMBASE_INC_REG, "ia64_storei4_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STOREI8_MEMBASE_INC_REG, "ia64_storei8_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STORER4_MEMBASE_INC_REG, "ia64_storer4_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_STORER8_MEMBASE_INC_REG, "ia64_storer8_membase_inc_reg", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI1_MEMBASE_INC,"ia64_loadi1_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU1_MEMBASE_INC,"ia64_loadu1_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI2_MEMBASE_INC,"ia64_loadi2_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU2_MEMBASE_INC,"ia64_loadu2_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI4_MEMBASE_INC,"ia64_loadi4_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADU4_MEMBASE_INC,"ia64_loadu4_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADI8_MEMBASE_INC,"ia64_loadi8_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADR4_MEMBASE_INC,"ia64_loadr4_membase_inc", NONE, NONE, NONE)
+MINI_OP(OP_IA64_LOADR8_MEMBASE_INC,"ia64_loadr8_membase_inc", NONE, NONE, NONE)
#endif
#if defined(__alpha__)
MONO_INST_NEW (cfg, arg, OP_OUTARG);
arg->inst_imm = cinfo->sig_cookie.offset;
arg->inst_left = sig_arg;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
if (is_virtual && i == 0) {
/* the argument will be attached to the call instrucion */
arg->inst_left = in;
arg->inst_call = call;
arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
if (ainfo->regtype == RegTypeGeneral) {
arg->backend.reg3 = ainfo->reg;
call->used_iregs |= 1 << ainfo->reg;
}
}
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
+
call->stack_usage = cinfo->stack_usage;
cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
cfg->flags |= MONO_CFG_HAS_CALLS;
return call;
}
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in, *ins;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = calculate_sizes (sig, sig->pinvoke);
+
+ for (i = 0; i < n; ++i) {
+ ArgInfo *ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* FIXME: */
+ NOT_IMPLEMENTED;
+ }
+
+ in = call->args [i];
+
+ if (ainfo->regtype == RegTypeGeneral) {
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ }
+ } else if (ainfo->regtype == RegTypeStructByAddr) {
+ if (ainfo->offset) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ }
+ } else if (ainfo->regtype == RegTypeStructByVal) {
+ /* this is further handled in mono_arch_emit_outarg_vt () */
+ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
+ ins->opcode = OP_OUTARG_VT;
+ ins->sreg1 = in->dreg;
+ ins->klass = in->klass;
+ ins->inst_p0 = call;
+ ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else if (ainfo->regtype == RegTypeBase) {
+ if (!t->byref && ((t->type == MONO_TYPE_I8) || (t->type == MONO_TYPE_U8))) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
+ } else if (!t->byref && ((t->type == MONO_TYPE_R4) || (t->type == MONO_TYPE_R8))) {
+ if (t->type == MONO_TYPE_R8)
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
+ else
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
+ } else {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, ppc_r1, ainfo->offset, in->dreg);
+ }
+ } else if (ainfo->regtype == RegTypeFP) {
+ MONO_INST_NEW (cfg, ins, OP_FMOVE);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, TRUE);
+ cfg->flags |= MONO_CFG_HAS_FPOUT;
+ } else {
+ g_assert_not_reached ();
+ }
+ }
+
+ if (cinfo->struct_ret) {
+ MonoInst *vtarg;
+
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->struct_ret, FALSE);
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+ cfg->param_area = MAX (cfg->param_area, cinfo->stack_usage);
+ cfg->flags |= MONO_CFG_HAS_CALLS;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = ins->inst_p1;
+ int ovf_size = ainfo->vtsize;
+ int doffset = ainfo->offset;
+ int i, soffset, dreg;
+
+ /* FIXME: handle darwin's 1/2 byte structs */
+ soffset = 0;
+ for (i = 0; i < ainfo->size; ++i) {
+ dreg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, soffset);
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg + i, FALSE);
+ soffset += sizeof (gpointer);
+ }
+ //g_print ("vt size: %d at R%d + %d\n", doffset, vt->inst_basereg, vt->inst_offset);
+ if (ovf_size != 0)
+ mini_emit_memcpy2 (cfg, ppc_r1, doffset, src->dreg, soffset, ovf_size * sizeof (gpointer), 0);
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_SETLRET);
+ ins->sreg1 = val->dreg + 1;
+ ins->sreg2 = val->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ return;
+ }
+ if (ret->type == MONO_TYPE_R8 || ret->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ return;
+ }
+ }
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
+/* FIXME: this is just a useless hint: fix the interface to include the opcode */
+gboolean
+mono_arch_is_inst_imm (gint64 imm)
+{
+ return TRUE;
+}
+
/*
* Allow tracing to work with this interface (with an optional argument)
*/
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
switch (ins->opcode) {
case OP_MUL_IMM:
/* remove unnecessary multiplication with 1 */
if (last_ins && (last_ins->opcode == OP_STOREI1_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? CEE_CONV_I1 : CEE_CONV_U1;
+ ins->opcode = (ins->opcode == OP_LOADI1_MEMBASE) ? OP_ICONV_TO_I1 : OP_ICONV_TO_U1;
ins->sreg1 = last_ins->sreg1;
}
break;
if (last_ins && (last_ins->opcode == OP_STOREI2_MEMBASE_REG) &&
ins->inst_basereg == last_ins->inst_destbasereg &&
ins->inst_offset == last_ins->inst_offset) {
- ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? CEE_CONV_I2 : CEE_CONV_U2;
+ ins->opcode = (ins->opcode == OP_LOADI2_MEMBASE) ? OP_ICONV_TO_I2 : OP_ICONV_TO_U2;
ins->sreg1 = last_ins->sreg1;
}
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
case OP_MOVE:
ins->opcode = OP_MOVE;
/*
}
break;
}
+ last_ins = ins;
+ ins = ins->next;
+ }
+ bb->last_ins = last_ins;
+}
+
+void
+mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
+{
+ switch (ins->opcode) {
+ case OP_ICONV_TO_R4: {
+ /* FIXME: change precision for CEE_CONV_R4 */
+ static const guint64 adjust_val = 0x4330000080000000ULL;
+ int msw_reg = mono_regstate_next_int (cfg->rs);
+ int xored = mono_regstate_next_int (cfg->rs);
+ int adj_reg = mono_regstate_next_float (cfg->rs);
+ int tmp_reg = mono_regstate_next_float (cfg->rs);
+ MONO_EMIT_NEW_ICONST (cfg, msw_reg, 0x43300000);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ppc_sp, -8, msw_reg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_XOR_IMM, xored, ins->sreg1, 0x80000000);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, ppc_sp, -4, xored);
+ MONO_EMIT_NEW_LOAD_R8 (cfg, adj_reg, &adjust_val);
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, tmp_reg, ppc_sp, -8);
+ MONO_EMIT_NEW_BIALU (cfg, OP_FSUB, ins->dreg, tmp_reg, adj_reg);
+ ins->opcode = OP_NOP;
+ break;
+ }
}
}
PPC_BR_LT
};
-#define NEW_INS(cfg,ins,dest,op) do { \
+#define NEW_INS(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
- MONO_INST_LIST_ADD_TAIL (&(dest)->node, &(ins)->node); \
+ mono_bblock_insert_after_ins (bb, last_ins, (dest)); \
} while (0)
static int
{
switch (op) {
case OP_ADD_IMM:
- return CEE_ADD;
+ return OP_IADD;
case OP_SUB_IMM:
- return CEE_SUB;
+ return OP_ISUB;
case OP_AND_IMM:
- return CEE_AND;
+ return OP_IAND;
case OP_COMPARE_IMM:
return OP_COMPARE;
+ case OP_ICOMPARE_IMM:
+ return OP_ICOMPARE;
case OP_ADDCC_IMM:
- return OP_ADDCC;
+ return OP_IADDCC;
case OP_ADC_IMM:
- return OP_ADC;
+ return OP_IADC;
case OP_SUBCC_IMM:
- return OP_SUBCC;
+ return OP_ISUBCC;
case OP_SBB_IMM:
- return OP_SBB;
+ return OP_ISBB;
case OP_OR_IMM:
- return CEE_OR;
+ return OP_IOR;
case OP_XOR_IMM:
- return CEE_XOR;
+ return OP_IXOR;
case OP_MUL_IMM:
- return CEE_MUL;
+ return OP_IMUL;
case OP_LOAD_MEMBASE:
return OP_LOAD_MEMINDEX;
case OP_LOADI4_MEMBASE:
case OP_STOREI4_MEMBASE_IMM:
return OP_STOREI4_MEMBASE_REG;
}
- g_assert_not_reached ();
+ return mono_op_imm_to_op (op);
}
+//#define map_to_reg_reg_op(op) (cfg->new_ir? mono_op_imm_to_op (op): map_to_reg_reg_op (op))
+
#define compare_opcode_is_unsigned(opcode) \
(((opcode) >= CEE_BNE_UN && (opcode) <= CEE_BLT_UN) || \
+ (((opcode) >= OP_IBNE_UN && (opcode) <= OP_IBLT_UN) || \
((opcode) >= OP_COND_EXC_NE_UN && (opcode) <= OP_COND_EXC_LT_UN) || \
- ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN))
+ ((opcode) >= OP_COND_EXC_INE_UN && (opcode) <= OP_COND_EXC_ILT_UN) || \
+ ((opcode) == OP_CLT_UN || (opcode) == OP_CGT_UN || (opcode) == OP_ICLT_UN || (opcode) == OP_ICGT_UN)))
/*
* Remove from the instruction list the instructions that can't be
* represented with very simple instructions with no register
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next, *temp;
+ MonoInst *ins, *next, *temp, *last_ins = NULL;
int imm;
/* setup the virtual reg allocator */
MONO_BB_FOR_EACH_INS (bb, ins) {
loop_start:
switch (ins->opcode) {
+ case OP_IDIV_UN_IMM:
+ case OP_IDIV_IMM:
+ case OP_IREM_IMM:
+ case OP_IREM_UN_IMM:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->sreg2 = temp->dreg;
+ if (ins->opcode == OP_IDIV_IMM)
+ ins->opcode = OP_IDIV;
+ else if (ins->opcode == OP_IREM_IMM)
+ ins->opcode = OP_IREM;
+ else if (ins->opcode == OP_IDIV_UN_IMM)
+ ins->opcode = OP_IDIV_UN;
+ else if (ins->opcode == OP_IREM_UN_IMM)
+ ins->opcode = OP_IREM_UN;
+ last_ins = temp;
+ /* handle rem separately */
+ goto loop_start;
+ case OP_IREM:
+ case OP_IREM_UN: {
+ MonoInst *mul;
+ /* we change a rem dest, src1, src2 to
+ * div temp1, src1, src2
+ * mul temp2, temp1, src2
+ * sub dest, src1, temp2
+ */
+ NEW_INS (cfg, mul, OP_IMUL);
+ NEW_INS (cfg, temp, ins->opcode == OP_IREM? OP_IDIV: OP_IDIV_UN);
+ temp->sreg1 = ins->sreg1;
+ temp->sreg2 = ins->sreg2;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ mul->sreg1 = temp->dreg;
+ mul->sreg2 = ins->sreg2;
+ mul->dreg = mono_regstate_next_int (cfg->rs);
+ ins->opcode = OP_ISUB;
+ ins->sreg2 = mul->dreg;
+ break;
+ }
+ case OP_IADD_IMM:
case OP_ADD_IMM:
case OP_ADDCC_IMM:
if (!ppc_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
}
break;
+ case OP_ISUB_IMM:
case OP_SUB_IMM:
if (!ppc_is_imm16 (-ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
}
break;
+ case OP_IAND_IMM:
+ case OP_IOR_IMM:
+ case OP_IXOR_IMM:
case OP_AND_IMM:
case OP_OR_IMM:
case OP_XOR_IMM:
if ((ins->inst_imm & 0xffff0000) && (ins->inst_imm & 0xffff)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
}
break;
+ case OP_ISBB_IMM:
+ case OP_IADC_IMM:
case OP_SBB_IMM:
case OP_SUBCC_IMM:
case OP_ADC_IMM:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
break;
case OP_COMPARE_IMM:
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_ICOMPARE_IMM:
+ next = ins->next;
g_assert(next);
if (compare_opcode_is_unsigned (next->opcode)) {
if (!ppc_is_uimm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
}
} else {
if (!ppc_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
}
}
break;
+ case OP_IMUL_IMM:
case OP_MUL_IMM:
if (ins->inst_imm == 1) {
ins->opcode = OP_MOVE;
break;
}
if (!ppc_is_imm16 (ins->inst_imm)) {
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
}
break;
+ case OP_LOCALLOC_IMM:
+ NEW_INS (cfg, temp, OP_ICONST);
+ temp->inst_c0 = ins->inst_imm;
+ temp->dreg = mono_regstate_next_int (cfg->rs);
+ ins->sreg1 = temp->dreg;
+ ins->opcode = OP_LOCALLOC;
+ break;
case OP_LOAD_MEMBASE:
case OP_LOADI4_MEMBASE:
case OP_LOADU4_MEMBASE:
*/
if (ppc_is_imm16 (ins->inst_offset))
break;
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg2 = temp->dreg;
case OP_STOREI1_MEMBASE_IMM:
case OP_STOREI2_MEMBASE_IMM:
case OP_STOREI4_MEMBASE_IMM:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->sreg1 = temp->dreg;
ins->opcode = map_to_reg_reg_op (ins->opcode);
+ last_ins = temp;
goto loop_start; /* make it handle the possibly big ins->inst_offset */
case OP_R8CONST:
case OP_R4CONST:
- NEW_INS (cfg, ins, temp, OP_ICONST);
+ NEW_INS (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_p0;
temp->dreg = mono_regstate_next_int (cfg->rs);
ins->inst_basereg = temp->dreg;
ins->inst_offset = 0;
ins->opcode = ins->opcode == OP_R4CONST? OP_LOADR4_MEMBASE: OP_LOADR8_MEMBASE;
+ last_ins = temp;
/* make it handle the possibly big ins->inst_offset
* later optimize to use lis + load_membase
*/
goto loop_start;
}
+ last_ins = ins;
}
+ bb->last_ins = last_ins;
bb->max_vreg = cfg->rs->next_vreg;
}
// g_print ("patched with 0x%08x\n", ins);
}
+static guint8*
+emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
+{
+ switch (ins->opcode) {
+ case OP_FCALL:
+ case OP_FCALL_REG:
+ case OP_FCALL_MEMBASE:
+ if (ins->dreg != ppc_f1)
+ ppc_fmr (code, ins->dreg, ppc_f1);
+ break;
+ }
+
+ return code;
+}
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos;
mono_debug_record_line_number (cfg, ins, offset);
switch (ins->opcode) {
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
+ break;
case OP_TLS_GET:
emit_tls_access (code, ins->dreg, ins->inst_offset);
break;
ppc_lbzx (code, ins->dreg, ins->sreg2, ins->inst_basereg);
ppc_extsb (code, ins->dreg, ins->dreg);
break;
- case CEE_CONV_I1:
+ case OP_ICONV_TO_I1:
ppc_extsb (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_I2:
+ case OP_ICONV_TO_I2:
ppc_extsh (code, ins->dreg, ins->sreg1);
break;
- case CEE_CONV_U1:
+ case OP_ICONV_TO_U1:
ppc_rlwinm (code, ins->dreg, ins->sreg1, 0, 24, 31);
break;
- case CEE_CONV_U2:
+ case OP_ICONV_TO_U2:
ppc_rlwinm (code, ins->dreg, ins->sreg1, 0, 16, 31);
break;
case OP_COMPARE:
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_ICOMPARE:
+ next = ins->next;
if (next && compare_opcode_is_unsigned (next->opcode))
ppc_cmpl (code, 0, 0, ins->sreg1, ins->sreg2);
else
ppc_cmp (code, 0, 0, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_ICOMPARE_IMM:
+ next = ins->next;
if (next && compare_opcode_is_unsigned (next->opcode)) {
if (ppc_is_uimm16 (ins->inst_imm)) {
ppc_cmpli (code, 0, 0, ins->sreg1, (ins->inst_imm & 0xffff));
ppc_break (code);
break;
case OP_ADDCC:
+ case OP_IADDCC:
ppc_addc (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_ADD:
+ case OP_IADD:
ppc_add (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADC:
+ case OP_IADC:
ppc_adde (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_ADDCC_IMM:
}
break;
case OP_ADD_IMM:
+ case OP_IADD_IMM:
if (ppc_is_imm16 (ins->inst_imm)) {
ppc_addi (code, ins->dreg, ins->sreg1, ins->inst_imm);
} else {
g_assert_not_reached ();
}
break;
- case CEE_ADD_OVF:
+ case OP_IADD_OVF:
/* check XER [0-3] (SO, OV, CA): we can't use mcrxr
*/
ppc_addo (code, ins->dreg, ins->sreg1, ins->sreg2);
ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_ADD_OVF_UN:
+ case OP_IADD_OVF_UN:
/* check XER [0-3] (SO, OV, CA): we can't use mcrxr
*/
ppc_addco (code, ins->dreg, ins->sreg1, ins->sreg2);
ppc_andisd (code, ppc_r0, ppc_r0, (1<<13));
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_SUB_OVF:
+ case OP_ISUB_OVF:
/* check XER [0-3] (SO, OV, CA): we can't use mcrxr
*/
ppc_subfo (code, ins->dreg, ins->sreg2, ins->sreg1);
ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_SUB_OVF_UN:
+ case OP_ISUB_OVF_UN:
/* check XER [0-3] (SO, OV, CA): we can't use mcrxr
*/
ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_TRUE, PPC_BR_EQ, "OverflowException");
break;
case OP_SUBCC:
+ case OP_ISUBCC:
ppc_subfc (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
- case CEE_SUB:
+ case OP_ISUB:
ppc_subf (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_SBB:
+ case OP_ISBB:
ppc_subfe (code, ins->dreg, ins->sreg2, ins->sreg1);
break;
case OP_SUB_IMM:
+ case OP_ISUB_IMM:
// we add the negated value
if (ppc_is_imm16 (-ins->inst_imm))
ppc_addi (code, ins->dreg, ins->sreg1, -ins->inst_imm);
case OP_PPC_SUBFZE:
ppc_subfze (code, ins->dreg, ins->sreg1);
break;
- case CEE_AND:
+ case OP_IAND:
/* FIXME: the ppc macros as inconsistent here: put dest as the first arg! */
ppc_and (code, ins->sreg1, ins->dreg, ins->sreg2);
break;
case OP_AND_IMM:
+ case OP_IAND_IMM:
if (!(ins->inst_imm & 0xffff0000)) {
ppc_andid (code, ins->sreg1, ins->dreg, ins->inst_imm);
} else if (!(ins->inst_imm & 0xffff)) {
g_assert_not_reached ();
}
break;
- case CEE_DIV: {
+ case OP_IDIV: {
guint32 *divisor_is_m1;
/* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
*/
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
break;
}
- case CEE_DIV_UN:
+ case OP_IDIV_UN:
ppc_divwuod (code, ins->dreg, ins->sreg1, ins->sreg2);
ppc_mfspr (code, ppc_r0, ppc_xer);
ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "DivideByZeroException");
break;
case OP_DIV_IMM:
- case CEE_REM:
- case CEE_REM_UN:
+ case OP_IREM:
+ case OP_IREM_UN:
case OP_REM_IMM:
g_assert_not_reached ();
- case CEE_OR:
+ case OP_IOR:
ppc_or (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_OR_IMM:
+ case OP_IOR_IMM:
if (!(ins->inst_imm & 0xffff0000)) {
ppc_ori (code, ins->sreg1, ins->dreg, ins->inst_imm);
} else if (!(ins->inst_imm & 0xffff)) {
g_assert_not_reached ();
}
break;
- case CEE_XOR:
+ case OP_IXOR:
ppc_xor (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
+ case OP_IXOR_IMM:
case OP_XOR_IMM:
if (!(ins->inst_imm & 0xffff0000)) {
ppc_xori (code, ins->sreg1, ins->dreg, ins->inst_imm);
g_assert_not_reached ();
}
break;
- case CEE_SHL:
+ case OP_ISHL:
ppc_slw (code, ins->sreg1, ins->dreg, ins->sreg2);
break;
case OP_SHL_IMM:
+ case OP_ISHL_IMM:
ppc_rlwinm (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f), 0, (31 - (ins->inst_imm & 0x1f)));
break;
- case CEE_SHR:
+ case OP_ISHR:
ppc_sraw (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
case OP_SHR_IMM:
+ case OP_ISHR_IMM:
ppc_srawi (code, ins->dreg, ins->sreg1, (ins->inst_imm & 0x1f));
break;
case OP_SHR_UN_IMM:
+ case OP_ISHR_UN_IMM:
if (ins->inst_imm)
ppc_rlwinm (code, ins->dreg, ins->sreg1, (32 - (ins->inst_imm & 0x1f)), (ins->inst_imm & 0x1f), 31);
else
ppc_mr (code, ins->dreg, ins->sreg1);
break;
- case CEE_SHR_UN:
+ case OP_ISHR_UN:
ppc_srw (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
- case CEE_NOT:
+ case OP_INOT:
ppc_not (code, ins->dreg, ins->sreg1);
break;
- case CEE_NEG:
+ case OP_INEG:
ppc_neg (code, ins->dreg, ins->sreg1);
break;
- case CEE_MUL:
+ case OP_IMUL:
ppc_mullw (code, ins->dreg, ins->sreg1, ins->sreg2);
break;
+ case OP_IMUL_IMM:
case OP_MUL_IMM:
if (ppc_is_imm16 (ins->inst_imm)) {
ppc_mulli (code, ins->dreg, ins->sreg1, ins->inst_imm);
g_assert_not_reached ();
}
break;
- case CEE_MUL_OVF:
+ case OP_IMUL_OVF:
/* we annot use mcrxr, since it's not implemented on some processors
* XER format: SO, OV, CA, reserved [21 bits], count [8 bits]
*/
ppc_andisd (code, ppc_r0, ppc_r0, (1<<14));
EMIT_COND_SYSTEM_EXCEPTION_FLAGS (PPC_BR_FALSE, PPC_BR_EQ, "OverflowException");
break;
- case CEE_MUL_OVF_UN:
+ case OP_IMUL_OVF_UN:
/* we first multiply to get the high word and compare to 0
* to set the flags, then the result is discarded and then
* we multiply to get the lower * bits result
ppc_lis (code, ins->dreg, 0);
ppc_ori (code, ins->dreg, ins->dreg, 0);
break;
- case CEE_CONV_I4:
- case CEE_CONV_U4:
+ case OP_ICONV_TO_I4:
+ case OP_ICONV_TO_U4:
case OP_MOVE:
ppc_mr (code, ins->dreg, ins->sreg1);
break;
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
} else {
ppc_bl (code, 0);
}
+ /* FIXME: this should be handled somewhere else in the new jit */
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
ppc_mtlr (code, ins->sreg1);
ppc_blrl (code);
+ /* FIXME: this should be handled somewhere else in the new jit */
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
ppc_lwz (code, ppc_r0, ins->inst_offset, ins->sreg1);
ppc_mtlr (code, ppc_r0);
ppc_blrl (code);
+ /* FIXME: this should be handled somewhere else in the new jit */
+ code = emit_move_return_value (cfg, ins, code);
break;
case OP_OUTARG:
g_assert_not_reached ();
}
break;
}
- case OP_START_HANDLER:
+ case OP_START_HANDLER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
ppc_mflr (code, ppc_r0);
- if (ppc_is_imm16 (ins->inst_left->inst_offset)) {
- ppc_stw (code, ppc_r0, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
+ if (ppc_is_imm16 (spvar->inst_offset)) {
+ ppc_stw (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, ins->inst_left->inst_offset);
- ppc_stwx (code, ppc_r0, ppc_r11, ins->inst_left->inst_basereg);
+ ppc_load (code, ppc_r11, spvar->inst_offset);
+ ppc_stwx (code, ppc_r0, ppc_r11, spvar->inst_basereg);
}
break;
- case OP_ENDFILTER:
+ }
+ case OP_ENDFILTER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (ins->sreg1 != ppc_r3)
ppc_mr (code, ppc_r3, ins->sreg1);
- if (ppc_is_imm16 (ins->inst_left->inst_offset)) {
- ppc_lwz (code, ppc_r0, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
+ if (ppc_is_imm16 (spvar->inst_offset)) {
+ ppc_lwz (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
} else {
- ppc_load (code, ppc_r11, ins->inst_left->inst_offset);
- ppc_lwzx (code, ppc_r0, ins->inst_left->inst_basereg, ppc_r11);
+ ppc_load (code, ppc_r11, spvar->inst_offset);
+ ppc_lwzx (code, ppc_r0, spvar->inst_basereg, ppc_r11);
}
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
break;
- case OP_ENDFINALLY:
- ppc_lwz (code, ppc_r0, ins->inst_left->inst_offset, ins->inst_left->inst_basereg);
+ }
+ case OP_ENDFINALLY: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ ppc_lwz (code, ppc_r0, spvar->inst_offset, spvar->inst_basereg);
ppc_mtlr (code, ppc_r0);
ppc_blr (code);
break;
+ }
case OP_CALL_HANDLER:
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
ppc_bl (code, 0);
ppc_bcctr (code, PPC_BR_ALWAYS, 0);
break;
case OP_CEQ:
+ case OP_ICEQ:
ppc_li (code, ins->dreg, 0);
ppc_bc (code, PPC_BR_FALSE, PPC_BR_EQ, 2);
ppc_li (code, ins->dreg, 1);
break;
case OP_CLT:
case OP_CLT_UN:
+ case OP_ICLT:
+ case OP_ICLT_UN:
ppc_li (code, ins->dreg, 1);
ppc_bc (code, PPC_BR_TRUE, PPC_BR_LT, 2);
ppc_li (code, ins->dreg, 0);
break;
case OP_CGT:
case OP_CGT_UN:
+ case OP_ICGT:
+ case OP_ICGT_UN:
ppc_li (code, ins->dreg, 1);
ppc_bc (code, PPC_BR_TRUE, PPC_BR_GT, 2);
ppc_li (code, ins->dreg, 0);
case OP_COND_EXC_LE_UN:
EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_EQ, ins->inst_p1);
break;
+ case OP_COND_EXC_IEQ:
+ case OP_COND_EXC_INE_UN:
+ case OP_COND_EXC_ILT:
+ case OP_COND_EXC_ILT_UN:
+ case OP_COND_EXC_IGT:
+ case OP_COND_EXC_IGT_UN:
+ case OP_COND_EXC_IGE:
+ case OP_COND_EXC_IGE_UN:
+ case OP_COND_EXC_ILE:
+ case OP_COND_EXC_ILE_UN:
+ EMIT_COND_SYSTEM_EXCEPTION (ins->opcode - OP_COND_EXC_IEQ, ins->inst_p1);
+ break;
case OP_COND_EXC_C:
/* check XER [0-3] (SO, OV, CA): we can't use mcrxr
*/
case OP_COND_EXC_NO:
g_assert_not_reached ();
break;
- case CEE_BEQ:
- case CEE_BNE_UN:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN:
- EMIT_COND_BRANCH (ins, ins->opcode - CEE_BEQ);
+ case OP_IBEQ:
+ case OP_IBNE_UN:
+ case OP_IBLT:
+ case OP_IBLT_UN:
+ case OP_IBGT:
+ case OP_IBGT_UN:
+ case OP_IBGE:
+ case OP_IBGE_UN:
+ case OP_IBLE:
+ case OP_IBLE_UN:
+ EMIT_COND_BRANCH (ins, ins->opcode - OP_IBEQ);
break;
/* floating point opcodes */
g_assert_not_reached ();
/* Implemented as helper calls */
break;
+ case OP_LCONV_TO_OVF_I4_2:
case OP_LCONV_TO_OVF_I: {
guint32 *negative_branch, *msword_positive_branch, *msword_negative_branch, *ovf_ex_target;
// Check if its negative
ppc_rlwinmd (code, ins->sreg1, ins->sreg1, 1, 31, 31);
EMIT_COND_SYSTEM_EXCEPTION (CEE_BEQ - CEE_BEQ, "ArithmeticException");
break;
+ case OP_JUMP_TABLE:
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ ppc_load (code, ins->dreg, 0x0f0f0f0f);
+ break;
}
default:
g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
return ins;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ /* FIXME: */
+ return NULL;
+}
+
gboolean
mono_arch_print_tree (MonoInst *tree, int arity)
{
#define MONO_ARCH_IMT_REG ppc_r12
#define MONO_ARCH_COMMON_VTABLE_TRAMPOLINE 1
+#define MONO_ARCH_ENABLE_NORMALIZE_OPCODES 1
+#define MONO_ARCH_NO_IOV_CHECK 1
+#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
+
#define MONO_ARCH_USE_SIGACTION 1
#define MONO_ARCH_NEED_DIV_CHECK 1
s390_ldr (code, ins->dreg, ins->sreg1); \
}
+#define MONO_EMIT_NEW_MOVE2(cfg,dest,offset,src,imm,size) do { \
+ MonoInst *inst; \
+ int tmpr = 0; \
+ int sReg, dReg; \
+ MONO_INST_NEW (cfg, inst, OP_NOP); \
+ if (size > 256) { \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_ICONST(cfg,tmpr,size); \
+ inst->dreg = dest; \
+ inst->inst_offset = offset; \
+ inst->sreg1 = src; \
+ inst->inst_imm = imm; \
+ inst->sreg2 = tmpr; \
+ } else { \
+ if (s390_is_uimm12(offset)) { \
+ inst->dreg = dest; \
+ inst->inst_offset = offset; \
+ } else { \
+ dReg = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_BIALU_IMM(cfg, OP_ADD_IMM, \
+ dReg, dest, offset); \
+ inst->dreg = dReg; \
+ inst->inst_offset = 0; \
+ } \
+ if (s390_is_uimm12(imm)) { \
+ inst->sreg1 = src; \
+ inst->inst_imm = imm; \
+ } else { \
+ sReg = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_BIALU_IMM(cfg, OP_ADD_IMM, \
+ sReg, src, imm); \
+ inst->sreg1 = sReg; \
+ inst->inst_imm = 0; \
+ } \
+ } \
+ inst->opcode = OP_S390_MOVE; \
+ inst->backend.size = size; \
+ MONO_ADD_INS (cfg->cbb, inst); \
+ } while (0)
+
+#define MONO_OUTPUT_VTR2(cfg, size, dr, sr, so) do { \
+ int reg = mono_alloc_preg (cfg); \
+ switch (size) { \
+ case 0: \
+ MONO_EMIT_NEW_ICONST(cfg, reg, 0); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+ break; \
+ case 1: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, \
+ reg, sr, so); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+ break; \
+ case 2: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, \
+ reg, sr, so); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+ break; \
+ case 4: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOAD_MEMBASE, \
+ reg, sr, so); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+ break; \
+ case 8: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOAD_MEMBASE, \
+ reg, sr, so); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+ reg = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOAD_MEMBASE, \
+ reg, sr, so + sizeof (guint32)); \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr + 1, FALSE); \
+ break; \
+ } \
+} while (0)
+
+#define MONO_OUTPUT_VTS2(cfg, size, dr, dx, sr, so) do { \
+ int tmpr; \
+ switch (size) { \
+ case 0: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_ICONST(cfg, tmpr, 0); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 1: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, \
+ tmpr, sr, so); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 2: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, \
+ tmpr, sr, so); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 4: \
+ case 8: \
+ MONO_EMIT_NEW_MOVE2 (cfg, dr, dx, sr, so, size); \
+ break; \
+ } \
+} while (0)
+
#undef DEBUG
#define DEBUG(a) if (cfg->verbose_level > 1) a
RegTypeGeneral,
RegTypeBase,
RegTypeFP,
+ RegTypeFPR4,
RegTypeStructByVal,
+ RegTypeStructByValInFP,
RegTypeStructByAddr
} ArgStorage;
/*========================= End of Function ========================*/
-// static int lc = 0;
+//static int lc = 0;
/*------------------------------------------------------------------*/
/* */
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
cfg->frame_reg = s390_r11;
+ /* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */
+ top = 12;
for (i = 8; i < top; ++i) {
- if (cfg->frame_reg != i)
+ if (cfg->frame_reg != i)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
}
/*========================= End of Function ========================*/
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_is_inst_imm */
+/* */
+/*------------------------------------------------------------------*/
+
+gboolean
+mono_arch_is_inst_imm (gint64 imm)
+{
+ /* The lowering pass will take care of it */
+ return TRUE;
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - add_general */
{
if (*gr > S390_LAST_ARG_REG) {
sz->stack_size = S390_ALIGN(sz->stack_size, sizeof(long));
- ainfo->reg = STK_BASE;
+ ainfo->reg = STK_BASE;
+ ainfo->offset = sz->stack_size;
+ sz->stack_size += sizeof (gpointer);
sz->parm_size += sizeof(gpointer);
sz->offStruct += sizeof(gpointer);
} else {
ainfo->reg = *gr;
+ ainfo->offset = sz->stack_size;
}
(*gr) ++;
- ainfo->offset = sz->stack_size;
ainfo->offparm = sz->offset;
sz->offset = S390_ALIGN(sz->offset+size, sizeof(long));
ainfo->size = size;
MonoGenericSharingContext *gsctx = cfg ? cfg->generic_sharing_context : NULL;
if (mp)
- cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + (sizeof (ArgInfo) * nParm));
+ cinfo = mono_mempool_alloc0 (mp, sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
else
- cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * nParm));
+ cinfo = g_malloc0 (sizeof (CallInfo) + sizeof (ArgInfo) * nParm);
fr = 0;
gr = s390_r2;
- nParm = 0;
+ nParm = 0;
cinfo->struct_ret = 0;
- sz = &cinfo->sz;
- sz->offset = 0;
+ sz = &cinfo->sz;
+ sz->offset = 0;
sz->offStruct = S390_MINIMAL_STACK_SIZE;
sz->retStruct = 0;
sz->stack_size = S390_MINIMAL_STACK_SIZE;
(info->fields[0].field->type->type == MONO_TYPE_R4)) {
cinfo->args[nParm].size = sizeof(float);
add_float(&fr, sz, cinfo->args+nParm);
+ nParm ++;
break;
}
(info->fields[0].field->type->type == MONO_TYPE_R8)) {
cinfo->args[nParm].size = sizeof(double);
add_float(&fr, sz, cinfo->args+nParm);
+ nParm ++;
break;
}
cfg->flags |= MONO_CFG_HAS_SPILLUP;
+ sig = mono_method_signature (cfg->method);
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
+
/*---------------------------------------------------------*/
/* We use the frame register also for any method that has */
/* filter clauses. This way, when the handlers are called, */
cfg->frame_reg = frame_reg;
+ cfg->arch.bkchain_reg = -1;
+
if (frame_reg != STK_BASE)
cfg->used_int_regs |= 1 << frame_reg;
cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
if (cinfo->struct_ret) {
- cfg->vret_addr->opcode = OP_REGVAR;
- cfg->vret_addr->inst_c0 = s390_r2;
+ if (!cfg->new_ir) {
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->inst_c0 = s390_r2;
+ }
} else {
switch (mono_type_get_underlying_type (sig->ret)->type) {
case MONO_TYPE_VOID:
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset += sizeof(gpointer);
- if ((sig->call_convention == MONO_CALL_VARARG) &&
+ if (!cfg->new_ir && (sig->call_convention == MONO_CALL_VARARG) &&
(!retFitsInReg (cinfo->ret.size)))
cfg->sig_cookie += cinfo->ret.size;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
if (inst->opcode != OP_REGVAR) {
switch (cinfo->args[iParm].regtype) {
case RegTypeStructByAddr :
- if (cinfo->args[iParm].reg == STK_BASE) {
- inst->opcode = OP_S390_LOADARG;
- inst->inst_basereg = frame_reg;
- size = abs(cinfo->args[iParm].vtsize);
- offset = S390_ALIGN(offset, sizeof(long));
- inst->inst_offset = offset;
- inst->backend.arg_info = cinfo->args[iParm].offset;
- } else {
- inst->opcode = OP_S390_ARGREG;
- inst->inst_basereg = frame_reg;
- size = sizeof(gpointer);
- offset = S390_ALIGN(offset, size);
- inst->inst_offset = offset;
- inst->backend.arg_info = cinfo->args[iParm].offset;
- }
+ if (cfg->new_ir) {
+ MonoInst *indir;
+
+ size = sizeof (gpointer);
+
+ if (cinfo->args [iParm].reg == STK_BASE) {
+ cfg->arch.bkchain_reg = s390_r12;
+ cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
+
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = cfg->arch.bkchain_reg;
+ inst->inst_offset = cinfo->args [iParm].offset;
+ } else {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ inst->inst_offset = S390_ALIGN(offset, sizeof (gpointer));
+ }
+
+ /* Add a level of indirection */
+ MONO_INST_NEW (cfg, indir, 0);
+ *indir = *inst;
+ inst->opcode = OP_VTARG_ADDR;
+ inst->inst_left = indir;
+ } else {
+ if (cinfo->args[iParm].reg == STK_BASE) {
+ inst->opcode = OP_S390_LOADARG;
+ inst->inst_basereg = frame_reg;
+ size = abs(cinfo->args[iParm].vtsize);
+ offset = S390_ALIGN(offset, sizeof(long));
+ inst->inst_offset = offset;
+ inst->backend.arg_info = cinfo->args[iParm].offset;
+ } else {
+ inst->opcode = OP_S390_ARGREG;
+ inst->inst_basereg = frame_reg;
+ size = sizeof(gpointer);
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ inst->backend.arg_info = cinfo->args[iParm].offset;
+ }
+ }
break;
case RegTypeStructByVal :
- inst->opcode = OP_S390_ARGPTR;
- inst->inst_basereg = frame_reg;
- size = cinfo->args[iParm].size;
- offset = S390_ALIGN(offset, size);
- inst->inst_offset = offset;
- inst->backend.arg_info = cinfo->args[iParm].offset;
+ if (cfg->new_ir) {
+ size = cinfo->args[iParm].size;
+ offset = S390_ALIGN(offset, size);
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ inst->inst_offset = S390_ALIGN (offset, size);
+ } else {
+ inst->opcode = OP_S390_ARGPTR;
+ inst->inst_basereg = frame_reg;
+ size = cinfo->args[iParm].size;
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ inst->backend.arg_info = cinfo->args[iParm].offset;
+ }
break;
default :
- if (cinfo->args[iParm].reg != STK_BASE) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
- size = (cinfo->args[iParm].size < 8
- ? sizeof(long)
- : sizeof(long long));
- offset = S390_ALIGN(offset, size);
- inst->inst_offset = offset;
- } else {
- inst->opcode = OP_S390_STKARG;
- inst->inst_basereg = frame_reg;
- size = (cinfo->args[iParm].size < 4
- ? 4 - cinfo->args[iParm].size
- : 0);
- inst->inst_offset = cinfo->args[iParm].offset +
- size;
- inst->backend.arg_info = 0;
- size = sizeof(long);
+ if (cfg->new_ir) {
+ if (cinfo->args [iParm].reg == STK_BASE) {
+ /*
+ * These arguments are in the previous frame, so we can't
+ * compute their offset from the current frame pointer right
+ * now, since cfg->stack_offset is not yet known, so dedicate a
+ * register holding the previous frame pointer.
+ */
+ cfg->arch.bkchain_reg = s390_r12;
+ cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
+
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = cfg->arch.bkchain_reg;
+ size = (cinfo->args[iParm].size < 4
+ ? 4 - cinfo->args[iParm].size
+ : 0);
+ inst->inst_offset = cinfo->args [iParm].offset + size;
+ size = sizeof (long);
+ } else {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ size = (cinfo->args[iParm].size < 8
+ ? sizeof(long)
+ : sizeof(long long));
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ }
+ } else {
+ if (cinfo->args[iParm].reg != STK_BASE) {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ size = (cinfo->args[iParm].size < 8
+ ? sizeof(long)
+ : sizeof(long long));
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ } else {
+ inst->opcode = OP_S390_STKARG;
+ inst->inst_basereg = frame_reg;
+ size = (cinfo->args[iParm].size < 4
+ ? 4 - cinfo->args[iParm].size
+ : 0);
+ inst->inst_offset = cinfo->args[iParm].offset +
+ size;
+ inst->backend.arg_info = 0;
+ size = sizeof(long);
+ }
}
}
if ((sig->call_convention == MONO_CALL_VARARG) &&
/*------------------------------------------------------*/
cfg->stack_offset = S390_ALIGN(offset, S390_STACK_ALIGNMENT);
+ /* Fix up offsets for arguments whose value is in the parent frame */
+ if (cfg->new_ir) {
+ for (iParm = sArg; iParm < eArg; ++iParm) {
+ inst = cfg->args [iParm];
+
+ if (inst->opcode == OP_S390_STKARG) {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_offset += cfg->stack_offset;
+ }
+ }
+ }
}
/*========================= End of Function ========================*/
arg->ins.cil_code = in->cil_code;
arg->ins.inst_left = in;
arg->ins.type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->ins.node, &call->out_args);
+ /* prepend, we'll need to reverse them later */
+ arg->ins.next = call->out_args;
+ call->out_args = (MonoInst *) arg;
arg->ins.inst_right = (MonoInst *) call;
if (ainfo->regtype == RegTypeGeneral) {
arg->ins.backend.reg3 = ainfo->reg;
emit_sig_cookie (cfg, call, cinfo, ainfo->size);
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
+
return call;
}
/*========================= End of Function ========================*/
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
+{
+ MonoInst *ins;
+
+ switch (storage) {
+ case RegTypeGeneral:
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
+ break;
+ case RegTypeFP:
+ MONO_INST_NEW (cfg, ins, OP_FMOVE);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ case RegTypeFPR4:
+ MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmpSig;
+ MonoInst *sig_arg;
+
+ cfg->disable_aot = TRUE;
+
+ /*----------------------------------------------------------*/
+ /* mono_ArgIterator_Setup assumes the signature cookie is */
+ /* passed first and all the arguments which were before it */
+ /* passed on the stack after the signature. So compensate */
+ /* by passing a different signature. */
+ /*----------------------------------------------------------*/
+ tmpSig = mono_metadata_signature_dup (call->signature);
+ tmpSig->param_count -= call->signature->sentinelpos;
+ tmpSig->sentinelpos = 0;
+ if (tmpSig->param_count > 0)
+ memcpy (tmpSig->params,
+ call->signature->params + call->signature->sentinelpos,
+ tmpSig->param_count * sizeof(MonoType *));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmpSig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE,
+ cinfo->sigCookie.offset, sig_arg->dreg);
+}
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_call */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ MonoInst *ins;
+ int i, n, lParamArea;
+ CallInfo *cinfo;
+ ArgInfo *ainfo = NULL;
+ int stackSize;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+ DEBUG (g_print ("Call requires: %d parameters\n",n));
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
+
+ stackSize = cinfo->sz.stack_size + cinfo->sz.local_size + cinfo->sz.parm_size + cinfo->sz.offset;
+ call->stack_usage = MAX(stackSize, call->stack_usage);
+ lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0);
+ cfg->param_area = MAX(((signed) cfg->param_area), lParamArea);
+ cfg->flags |= MONO_CFG_HAS_CALLS;
+
+ if (cinfo->struct_ret) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->sreg1 = call->vret_var->dreg;
+ ins->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE);
+ }
+
+ for (i = 0; i < n; ++i) {
+ ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
+ in = call->args [i];
+
+ if ((sig->call_convention == MONO_CALL_VARARG) &&
+ (i == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ switch (ainfo->regtype) {
+ case RegTypeGeneral:
+ if (!t->byref && (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8)) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg, FALSE);
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, ainfo->reg + 1, FALSE);
+ } else {
+ add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
+ }
+ break;
+ case RegTypeFP:
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ /* Valuetype passed in one fp register */
+ ainfo->regtype = RegTypeStructByValInFP;
+ /* Fall through */
+ } else {
+ if (ainfo->size == 4)
+ ainfo->regtype = RegTypeFPR4;
+ add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
+ break;
+ }
+ case RegTypeStructByVal:
+ case RegTypeStructByAddr: {
+ guint32 align;
+ guint32 size;
+
+ if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else
+ if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ g_assert (in->klass);
+
+ ainfo->offparm += cinfo->sz.offStruct;
+
+ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
+ ins->sreg1 = in->dreg;
+ ins->klass = in->klass;
+ ins->backend.size = ainfo->size;
+ ins->inst_p0 = call;
+ ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ if (ainfo->regtype == RegTypeStructByAddr) {
+ /*
+ * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
+ * use the normal OUTARG opcodes to pass the address of the location to
+ * the callee.
+ */
+ int treg = mono_alloc_preg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, treg,
+ STK_BASE, ainfo->offparm);
+ if (ainfo->reg == STK_BASE) {
+ /* The address is passed on the stack */
+ MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
+ ins->inst_destbasereg = STK_BASE;
+ ins->inst_offset = ainfo->offset;
+ ins->sreg1 = treg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ mono_call_inst_add_outarg_reg (cfg, call, treg, ainfo->reg, FALSE);
+ }
+ }
+ break;
+ }
+ case RegTypeBase:
+ if (!t->byref && t->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG,
+ STK_BASE, ainfo->offset,
+ in->dreg);
+ } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG,
+ STK_BASE, ainfo->offset,
+ in->dreg);
+ } else if (!t->byref && (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8)) {
+ MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
+ ins->inst_destbasereg = STK_BASE;
+ ins->inst_offset = ainfo->offset + MINI_LS_WORD_OFFSET;
+ ins->sreg1 = in->dreg + 1;
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
+ ins->inst_destbasereg = STK_BASE;
+ ins->inst_offset = ainfo->offset + MINI_MS_WORD_OFFSET;
+ ins->sreg1 = in->dreg + 2;
+ MONO_ADD_INS (cfg->cbb, ins);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
+ ins->inst_destbasereg = STK_BASE;
+ ins->inst_offset = ainfo->offset;
+ ins->sreg1 = in->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ }
+
+ /*
+ * Handle the case where there are no implicit arguments
+ */
+ if ((sig->call_convention == MONO_CALL_VARARG) &&
+ (i == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_outarg_vt */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->regtype == RegTypeStructByVal) {
+ /*
+ arg->ins.sreg1 = ainfo->reg;
+ arg->ins.opcode = OP_OUTARG_VT;
+ arg->size = ainfo->size;
+ arg->offset = ainfo->offset;
+ arg->offPrm = ainfo->offparm + cinfo->sz.offStruct;
+ */
+ if (ainfo->reg != STK_BASE) {
+ MONO_OUTPUT_VTR2 (cfg, size, ainfo->reg, src->dreg, 0);
+ } else {
+ MONO_OUTPUT_VTS2 (cfg, size, ainfo->reg, ainfo->offset,
+ src->dreg, 0);
+ }
+ } else if (ainfo->regtype == RegTypeStructByValInFP) {
+ int dreg = mono_alloc_freg (cfg);
+
+ if (ainfo->size == 4) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0);
+ MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg);
+ } else {
+ g_assert (ainfo->size == 8);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0);
+ }
+
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
+ } else {
+ MONO_EMIT_NEW_MOVE2 (cfg, STK_BASE, ainfo->offparm,
+ src->dreg, 0, size);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_setret */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, s390_r3, val->dreg + 1);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, s390_r2, val->dreg + 2);
+ return;
+ }
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - emit_sig_cookie. */
arg->ins.inst_right = (MonoInst *) call;
arg->size = argSize;
arg->offset = cinfo->sigCookie.offset;
- MONO_INST_LIST_ADD_TAIL (&arg->ins.node, &call->out_args);
+ arg->ins.next = call->out_args;
+ call->out_args = (MonoInst *) arg;
}
/*========================= End of Function ========================*/
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
- mono_decompose_op_imm (cfg, ins);
+ case OP_LOCALLOC_IMM:
+ mono_decompose_op_imm (cfg, bb, ins);
break;
default:
break;
}
}
-
- bb->max_vreg = cfg->rs->next_vreg;
}
/*========================= End of Function ========================*/
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next;
+ MonoInst *ins;
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
+ MonoInst *last_ins = NULL;
guint last_offset = 0;
int max_len, cpos, src2;
s390_lr (code, ins->dreg, s390_r0);
}
break;
+
case OP_ICONV_TO_I1: {
- s390_lhi (code, s390_r0, 0x80);
- if (ins->dreg != ins->sreg1) {
+ if (ins->dreg != ins->sreg1)
s390_lr (code, ins->dreg, ins->sreg1);
- }
- s390_nr (code, s390_r0, ins->sreg1);
- s390_jz (code, 7);
- s390_lhi (code, s390_r13, -1);
- s390_sll (code, s390_r13, 0, 8);
- s390_or (code, ins->dreg, s390_r13);
+ s390_sll (code, ins->dreg, 0, 24);
+ s390_sra (code, ins->dreg, 0, 24);
}
break;
case OP_ICONV_TO_I2: {
- s390_lhi (code, s390_r0, 0x80);
- s390_sll (code, s390_r0, 0, 8);
- if (ins->dreg != ins->sreg1) {
- s390_lr (code, ins->dreg, ins->sreg1);
- }
- s390_nr (code, s390_r0, ins->sreg1);
- s390_jz (code, 7);
- s390_lhi (code, s390_r13, -1);
- s390_sll (code, s390_r13, 0, 16);
- s390_or (code, ins->dreg, s390_r13);
+ if (ins->dreg != ins->sreg1)
+ s390_lr (code, ins->dreg, ins->sreg1);
+ s390_sll (code, ins->dreg, 0, 16);
+ s390_sra (code, ins->dreg, 0, 16);
}
break;
case OP_ICONV_TO_U1: {
s390_nr (code, ins->dreg, s390_r0);
}
break;
- case OP_COMPARE: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_COMPARE:
+ case OP_ICOMPARE: {
+ gboolean un = FALSE;
+ MonoInst *next = ins->next;
+
if ((next) &&
(((next->opcode >= OP_IBNE_UN) &&
(next->opcode <= OP_IBLT_UN)) ||
((next->opcode >= OP_COND_EXC_NE_UN) &&
(next->opcode <= OP_COND_EXC_LT_UN)) ||
+ ((next->opcode >= OP_COND_EXC_INE_UN) &&
+ (next->opcode <= OP_COND_EXC_ILT_UN)) ||
((next->opcode == OP_CLT_UN) ||
- (next->opcode == OP_CGT_UN))))
+ (next->opcode == OP_CGT_UN)) ||
+ ((next->opcode == OP_ICLT_UN) ||
+ (next->opcode == OP_ICGT_UN))))
+ un = TRUE;
+
+ if (un)
s390_clr (code, ins->sreg1, ins->sreg2);
else
s390_cr (code, ins->sreg1, ins->sreg2);
}
break;
- case OP_COMPARE_IMM: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_COMPARE_IMM:
+ case OP_ICOMPARE_IMM: {
+ gboolean un = FALSE;
+ MonoInst *next = ins->next;
+
+ if ((next) &&
+ (((next->opcode >= OP_IBNE_UN) &&
+ (next->opcode <= OP_IBLT_UN)) ||
+ ((next->opcode >= OP_COND_EXC_NE_UN) &&
+ (next->opcode <= OP_COND_EXC_LT_UN)) ||
+ ((next->opcode >= OP_COND_EXC_INE_UN) &&
+ (next->opcode <= OP_COND_EXC_ILT_UN)) ||
+ ((next->opcode == OP_CLT_UN) ||
+ (next->opcode == OP_CGT_UN)) ||
+ ((next->opcode == OP_ICLT_UN) ||
+ (next->opcode == OP_ICGT_UN))))
+ un = TRUE;
+
if (s390_is_imm16 (ins->inst_imm)) {
s390_lhi (code, s390_r0, ins->inst_imm);
- if ((next) &&
- (((next->opcode >= OP_IBNE_UN) &&
- (next->opcode <= OP_IBLT_UN)) ||
- ((next->opcode >= OP_COND_EXC_NE_UN) &&
- (next->opcode <= OP_COND_EXC_LT_UN)) ||
- ((next->opcode == OP_CLT_UN) ||
- (next->opcode == OP_CGT_UN))))
+ if (un)
s390_clr (code, ins->sreg1, s390_r0);
else
s390_cr (code, ins->sreg1, s390_r0);
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
s390_word (code, ins->inst_imm);
- if ((next) &&
- (((next->opcode >= OP_IBNE_UN) &&
- (next->opcode <= OP_IBLT_UN)) ||
- ((next->opcode >= OP_COND_EXC_NE_UN) &&
- (next->opcode <= OP_COND_EXC_LT_UN)) ||
- ((next->opcode == OP_CLT_UN) ||
- (next->opcode == OP_CGT_UN))))
+ if (un)
s390_cl (code, ins->sreg1, 0, s390_r13, 4);
else
s390_c (code, ins->sreg1, 0, s390_r13, 4);
s390_brasl (code, s390_r14, 0);
}
break;
- case OP_ADDCC: {
+ case OP_ADDCC:
+ case OP_IADDCC: {
CHECK_SRCDST_COM;
s390_alr (code, ins->dreg, src2);
}
s390_ar (code, ins->dreg, src2);
}
break;
- case OP_ADC: {
+ case OP_ADC:
+ case OP_IADC: {
CHECK_SRCDST_COM;
s390_alcr (code, ins->dreg, src2);
}
break;
- case OP_ADD_IMM: {
+ case OP_ADD_IMM:
+ case OP_IADD_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lr (code, ins->dreg, ins->sreg1);
}
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) &&
- (next->opcode == OP_ADC_IMM)) {
+ if ((ins->next) &&
+ (ins->next->opcode == OP_ADC_IMM)) {
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
s390_word (code, ins->inst_imm);
}
break;
case OP_ADDCC_IMM: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) &&
- (next->opcode == OP_ADC_IMM)) {
+ if ((ins->next) &&
+ (ins->next->opcode == OP_ADC_IMM)) {
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
s390_word (code, ins->inst_imm);
}
}
break;
- case OP_IADD_OVF: {
+ case OP_IADD_OVF:
+ case OP_S390_IADD_OVF: {
CHECK_SRCDST_COM;
s390_ar (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
- case OP_IADD_OVF_UN: {
+ case OP_IADD_OVF_UN:
+ case OP_S390_IADD_OVF_UN: {
CHECK_SRCDST_COM;
s390_alr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
- case OP_LADD: {
+ case OP_LADD:
+ case OP_S390_LADD: {
s390_alr (code, s390_r0, ins->sreg1);
s390_jnc (code, 4);
s390_ahi (code, s390_r1, 1);
s390_lr (code, ins->dreg+1, s390_r1);
}
break;
- case OP_LADD_OVF: {
+ case OP_LADD_OVF:
+ case OP_S390_LADD_OVF: {
short int *o[1];
s390_alr (code, s390_r0, ins->sreg1);
s390_jnc (code, 0); CODEPTR(code, o[0]);
s390_lr (code, ins->dreg+1, s390_r1);
}
break;
- case OP_LADD_OVF_UN: {
+ case OP_LADD_OVF_UN:
+ case OP_S390_LADD_OVF_UN: {
s390_alr (code, s390_r0, ins->sreg1);
s390_alcr (code, s390_r1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
}
break;
- case OP_SUBCC: {
+ case OP_SUBCC:
+ case OP_ISUBCC: {
CHECK_SRCDST_NCOM;
s390_slr (code, ins->dreg, src2);
}
s390_sr (code, ins->dreg, src2);
}
break;
- case OP_SBB: {
+ case OP_SBB:
+ case OP_ISBB: {
CHECK_SRCDST_NCOM;
s390_slbr (code, ins->dreg, src2);
}
}
}
break;
- case OP_SUB_IMM: {
+ case OP_SUB_IMM:
+ case OP_ISUB_IMM: {
if (s390_is_imm16 (-ins->inst_imm)) {
if (ins->dreg != ins->sreg1) {
s390_lr (code, ins->dreg, ins->sreg1);
}
}
break;
- case OP_ISUB_OVF: {
+ case OP_ISUB_OVF:
+ case OP_S390_ISUB_OVF: {
CHECK_SRCDST_NCOM;
s390_sr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
- case OP_ISUB_OVF_UN: {
+ case OP_ISUB_OVF_UN:
+ case OP_S390_ISUB_OVF_UN: {
CHECK_SRCDST_NCOM;
s390_slr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
- case OP_LSUB: {
+ case OP_LSUB:
+ case OP_S390_LSUB: {
s390_lr (code, s390_r14, ins->sreg2);
s390_slr (code, s390_r0, ins->sreg1);
s390_jnl (code, 4);
s390_lr (code, ins->dreg+1, s390_r1);
}
break;
- case OP_LSUB_OVF: {
+ case OP_LSUB_OVF:
+ case OP_S390_LSUB_OVF: {
short int *o[1];
s390_lr (code, s390_r14, ins->sreg2);
s390_slr (code, s390_r0, ins->sreg1);
s390_lr (code, ins->dreg+1, s390_r1);
}
break;
- case OP_LSUB_OVF_UN: {
+ case OP_LSUB_OVF_UN:
+ case OP_S390_LSUB_OVF_UN: {
s390_slr (code, s390_r0, ins->sreg1);
s390_slbr (code, s390_r1, ins->sreg2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
}
}
break;
- case OP_AND_IMM: {
+ case OP_AND_IMM:
+ case OP_IAND_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lhi (code, s390_r0, ins->inst_imm);
if (ins->dreg != ins->sreg1) {
}
}
break;
- case OP_OR_IMM: {
+ case OP_OR_IMM:
+ case OP_IOR_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lhi (code, s390_r0, ins->inst_imm);
if (ins->dreg != ins->sreg1) {
}
}
break;
- case OP_XOR_IMM: {
+ case OP_XOR_IMM:
+ case OP_IXOR_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lhi (code, s390_r0, ins->inst_imm);
if (ins->dreg != ins->sreg1) {
s390_sll (code, ins->dreg, src2, 0);
}
break;
- case OP_SHL_IMM: {
+ case OP_SHL_IMM:
+ case OP_ISHL_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lr (code, ins->dreg, ins->sreg1);
}
s390_sra (code, ins->dreg, src2, 0);
}
break;
- case OP_SHR_IMM: {
+ case OP_SHR_IMM:
+ case OP_ISHR_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lr (code, ins->dreg, ins->sreg1);
}
s390_sra (code, ins->dreg, 0, (ins->inst_imm & 0x1f));
}
break;
- case OP_SHR_UN_IMM: {
+ case OP_SHR_UN_IMM:
+ case OP_ISHR_UN_IMM: {
if (ins->sreg1 != ins->dreg) {
s390_lr (code, ins->dreg, ins->sreg1);
}
s390_lcr (code, ins->dreg, ins->sreg1);
}
break;
+ case OP_S390_LNEG: {
+ /* From gcc code */
+ g_assert (ins->dreg + 1 != ins->sreg1);
+ s390_lcr (code, ins->dreg + 1, ins->sreg2);
+ s390_lcr (code, ins->dreg, ins->sreg1);
+ s390_je (code, 3);
+ s390_bctr (code, ins->dreg + 1, 0);
+ }
+ break;
case OP_IMUL: {
if (ins->sreg1 == ins->dreg) {
s390_msr (code, ins->dreg, ins->sreg2);
}
}
break;
- case OP_MUL_IMM: {
+ case OP_MUL_IMM:
+ case OP_IMUL_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lhi (code, s390_r13, ins->inst_imm);
} else {
s390_l (code,ins->dreg, 0, s390_r13, 4);
}
break;
+ case OP_JUMP_TABLE: {
+ mono_add_patch_info (cfg, code - cfg->native_code,
+ (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ s390_basr (code, s390_r13, 0);
+ s390_j (code, 4);
+ s390_word (code, 0);
+ s390_l (code, ins->dreg, 0, s390_r13, 4);
+ }
+ break;
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U4:
case OP_MOVE: {
}
break;
case OP_FCONV_TO_R4: {
+ // FIXME:
+ if (ins->dreg != ins->sreg1) {
+ s390_ldr (code, ins->dreg, ins->sreg1);
+ }
+ /*
NOT_IMPLEMENTED;
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) &&
- (next->opcode != OP_FMOVE) &&
- (next->opcode != OP_STORER4_MEMBASE_REG))
+ if ((ins->next) &&
+ (ins->next->opcode != OP_FMOVE) &&
+ (ins->next->opcode != OP_STORER4_MEMBASE_REG))
s390_ledbr (code, ins->dreg, ins->sreg1);
+ */
}
break;
case OP_JMP: {
break;
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL: {
call = (MonoCallInst*)ins;
break;
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG: {
s390_lr (code, s390_r1, ins->sreg1);
break;
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_FCALL_MEMBASE:
case OP_CALL_MEMBASE: {
}
break;
case OP_START_HANDLER: {
- if (s390_is_uimm12 (ins->inst_left->inst_offset)) {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ if (s390_is_uimm12 (spvar->inst_offset)) {
s390_st (code, s390_r14, 0,
- ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ spvar->inst_basereg,
+ spvar->inst_offset);
} else {
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
- s390_word (code, ins->inst_left->inst_offset);
+ s390_word (code, spvar->inst_offset);
s390_l (code, s390_r13, 0, s390_r13, 4);
s390_st (code, s390_r14, s390_r13,
- ins->inst_left->inst_basereg, 0);
+ spvar->inst_basereg, 0);
}
}
break;
case OP_ENDFILTER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
if (ins->sreg1 != s390_r2)
s390_lr (code, s390_r2, ins->sreg1);
- if (s390_is_uimm12 (ins->inst_left->inst_offset)) {
- s390_l (code, s390_r14, 0, ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ if (s390_is_uimm12 (spvar->inst_offset)) {
+ s390_l (code, s390_r14, 0, spvar->inst_basereg,
+ spvar->inst_offset);
} else {
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
- s390_word (code, ins->inst_left->inst_offset);
+ s390_word (code, spvar->inst_offset);
s390_l (code, s390_r13, 0, s390_r13, 4);
s390_l (code, s390_r14, s390_r13,
- ins->inst_left->inst_basereg, 0);
+ spvar->inst_basereg, 0);
}
s390_br (code, s390_r14);
}
break;
case OP_ENDFINALLY: {
- if (s390_is_uimm12 (ins->inst_left->inst_offset)) {
- s390_l (code, s390_r14, 0, ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+ if (s390_is_uimm12 (spvar->inst_offset)) {
+ s390_l (code, s390_r14, 0, spvar->inst_basereg,
+ spvar->inst_offset);
} else {
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
- s390_word (code, ins->inst_left->inst_offset);
+ s390_word (code, spvar->inst_offset);
s390_l (code, s390_r13, 0, s390_r13, 4);
s390_l (code, s390_r14, s390_r13,
- ins->inst_left->inst_basereg, 0);
+ spvar->inst_basereg, 0);
}
s390_br (code, s390_r14);
}
break;
case OP_LABEL: {
ins->inst_c0 = code - cfg->native_code;
+ }
+ break;
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL: {
}
break;
case OP_BR:
s390_br (code, ins->sreg1);
}
break;
- case OP_CEQ: {
+ case OP_CEQ:
+ case OP_ICEQ: {
s390_lhi (code, ins->dreg, 1);
s390_jz (code, 4);
s390_lhi (code, ins->dreg, 0);
}
break;
- case OP_CLT: {
+ case OP_CLT:
+ case OP_ICLT: {
s390_lhi (code, ins->dreg, 1);
s390_jl (code, 4);
s390_lhi (code, ins->dreg, 0);
}
break;
- case OP_CLT_UN: {
+ case OP_CLT_UN:
+ case OP_ICLT_UN: {
s390_lhi (code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lhi (code, ins->dreg, 0);
}
break;
- case OP_CGT: {
+ case OP_CGT:
+ case OP_ICGT: {
s390_lhi (code, ins->dreg, 1);
s390_jh (code, 4);
s390_lhi (code, ins->dreg, 0);
}
break;
- case OP_CGT_UN: {
+ case OP_CGT_UN:
+ case OP_ICGT_UN: {
s390_lhi (code, ins->dreg, 1);
s390_jho (code, 4);
s390_lhi (code, ins->dreg, 0);
}
break;
case OP_COND_EXC_EQ:
+ case OP_COND_EXC_IEQ:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_NE_UN:
+ case OP_COND_EXC_INE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1);
break;
case OP_COND_EXC_LT:
+ case OP_COND_EXC_ILT:
case OP_COND_EXC_LT_UN:
+ case OP_COND_EXC_ILT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1);
break;
case OP_COND_EXC_GT:
+ case OP_COND_EXC_IGT:
case OP_COND_EXC_GT_UN:
+ case OP_COND_EXC_IGT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1);
break;
case OP_COND_EXC_GE:
+ case OP_COND_EXC_IGE:
case OP_COND_EXC_GE_UN:
+ case OP_COND_EXC_IGE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1);
break;
case OP_COND_EXC_LE:
+ case OP_COND_EXC_ILE:
case OP_COND_EXC_LE_UN:
+ case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1);
break;
case OP_COND_EXC_OV:
+ case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1);
break;
case OP_COND_EXC_NO:
+ case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1);
break;
case OP_COND_EXC_C:
+ case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1);
break;
case OP_COND_EXC_NC:
+ case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1);
break;
case OP_IBEQ:
g_assert_not_reached ();
/* Implemented as helper calls */
break;
- case OP_LCONV_TO_OVF_I: {
+ case OP_LCONV_TO_OVF_I:
+ case OP_LCONV_TO_OVF_I4_2: {
/* Valid ints: 0xffffffff:8000000 to 00000000:0x7f000000 */
short int *o[5];
s390_ltr (code, ins->sreg1, ins->sreg1);
cpos += max_len;
+ last_ins = ins;
last_offset = offset;
}
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
tracing = 1;
- cfg->code_size = 512;
+ cfg->code_size = 1024;
cfg->native_code = code = g_malloc (cfg->code_size);
s390_stm (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
+ if (cfg->arch.bkchain_reg != -1)
+ s390_lr (code, cfg->arch.bkchain_reg, STK_BASE);
+
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
cfg->used_int_regs |= 1 << 11;
}
*/
max_offset = 0;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *ins;
+ MonoInst *ins = bb->code;
bb->max_offset = max_offset;
if (cfg->prof_options & MONO_PROFILE_COVERAGE)
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [pos];
+ if (inst->opcode == OP_VTARG_ADDR)
+ inst = inst->inst_left;
+
if (inst->opcode == OP_REGVAR) {
if (ainfo->regtype == RegTypeGeneral)
s390_lr (code, inst->dreg, ainfo->reg);
cfg->code_len = code - cfg->native_code;
+ g_assert (cfg->code_len < cfg->code_size);
+
return code;
}
/*========================= End of Function ========================*/
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_inst_for_method */
+/* */
+/*------------------------------------------------------------------*/
+
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ return NULL;
+}
+
+/*========================= End of Function ========================*/
+
+void
+mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
+{
+ switch (ins->opcode) {
+ case OP_ISUB_OVF:
+ ins->opcode = OP_S390_ISUB_OVF;
+ break;
+ case OP_ISUB_OVF_UN:
+ ins->opcode = OP_S390_ISUB_OVF_UN;
+ break;
+ case OP_IADD_OVF:
+ ins->opcode = OP_S390_IADD_OVF;
+ break;
+ case OP_IADD_OVF_UN:
+ ins->opcode = OP_S390_IADD_OVF_UN;
+ break;
+ default:
+ break;
+ }
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_decompose_long_opts */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins)
+{
+ // The generic code seems to work for OP_LSUB fine on s390, why is a different
+ // implementation needed ? gcc also seems to use the different implementation.
+ // FIXME: What about the other OP_L opcodes below ?
+
+ switch (ins->opcode) {
+ case OP_LADD_OVF:
+ case OP_LADD_OVF_UN:
+ case OP_LSUB_OVF:
+ case OP_LSUB_OVF_UN: {
+ int opcode = 0;
+
+ switch (ins->opcode) {
+ case OP_LADD:
+ opcode = OP_S390_LADD;
+ break;
+ case OP_LADD_OVF:
+ opcode = OP_S390_LADD_OVF;
+ break;
+ case OP_LADD_OVF_UN:
+ opcode = OP_S390_LADD_OVF_UN;
+ break;
+ case OP_LSUB:
+ opcode = OP_S390_LSUB;
+ break;
+ case OP_LSUB_OVF:
+ opcode = OP_S390_LSUB_OVF;
+ break;
+ case OP_LSUB_OVF_UN:
+ opcode = OP_S390_LSUB_OVF_UN;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ /* These hard regs make ssa crazy */
+ cfg->disable_ssa = TRUE;
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, s390_r0, ins->sreg1 + 1);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, s390_r1, ins->sreg1 + 2);
+ MONO_EMIT_NEW_BIALU (cfg, opcode, ins->dreg, ins->sreg2 + 1, ins->sreg2 + 2);
+ NULLIFY_INS (ins);
+ break;
+ }
+ case OP_LADD_IMM: {
+ int dreg = mono_alloc_dreg (cfg, STACK_I8);
+ MONO_EMIT_NEW_I8CONST (cfg, dreg, ins->inst_l);
+ MONO_EMIT_NEW_BIALU (cfg, OP_LADD, ins->dreg, ins->sreg1, dreg);
+ NULLIFY_INS (ins);
+ break;
+ }
+ case OP_LSUB_IMM: {
+ int dreg = mono_alloc_dreg (cfg, STACK_I8);
+ MONO_EMIT_NEW_I8CONST (cfg, dreg, ins->inst_l);
+ MONO_EMIT_NEW_BIALU (cfg, OP_LSUB, ins->dreg, ins->sreg1, dreg);
+ NULLIFY_INS (ins);
+ break;
+ }
+ case OP_LNEG: {
+ MONO_EMIT_NEW_BIALU (cfg, OP_S390_LNEG, ins->dreg, ins->sreg1 + 1, ins->sreg1 + 2);
+ NULLIFY_INS (ins);
+ break;
+ }
+ case OP_ISUB_OVF:
+ ins->opcode = OP_S390_ISUB_OVF;
+ break;
+ case OP_ISUB_OVF_UN:
+ ins->opcode = OP_S390_ISUB_OVF_UN;
+ break;
+ default:
+ break;
+ }
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - mono_arch_print_tree */
typedef struct ucontext MonoContext;
typedef struct MonoCompileArch {
+ int bkchain_reg;
} MonoCompileArch;
typedef struct
#define MONO_ARCH_HAVE_ATOMIC_ADD 1
#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
#define MONO_ARCH_ENABLE_NORMALIZE_OPCODES 1
+#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
+#define MONO_ARCH_HAVE_DECOMPOSE_LONG_OPTS 1
// #define MONO_ARCH_SIGSEGV_ON_ALTSTACK 1
// #define MONO_ARCH_SIGNAL_STACK_SIZE 65536
// #define MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION 1
/* Definitions used by mini-codegen.c */
/*===============================================*/
-/*--------------------------------------------*/
-/* use s390_r2-s390_r6 as parm registers */
-/* s390_r0, s390_r1, s390_r13 used internally */
-/* s390_r15 is the stack pointer */
-/*--------------------------------------------*/
-#define MONO_ARCH_CALLEE_REGS (0x1ffc)
+/*-----------------------------------------------------*/
+/* use s390_r2-s390_r6 as parm registers */
+/* s390_r0, s390_r1, s390_r12, s390_r13 used internally*/
+/* s390_r8..s390_r11 are used for global regalloc */
+/* s390_r15 is the stack pointer */
+/*-----------------------------------------------------*/
+#define MONO_ARCH_CALLEE_REGS (0xfc)
#define MONO_ARCH_CALLEE_SAVED_REGS 0xff80
s390_ldr (code, ins->dreg, ins->sreg1); \
}
+#define MONO_EMIT_NEW_MOVE2(cfg,dest,offset,src,imm,size) do { \
+ MonoInst *inst; \
+ int tmpr = 0; \
+ int sReg, dReg; \
+ MONO_INST_NEW (cfg, inst, OP_NOP); \
+ if (size > 256) { \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_ICONST(cfg,tmpr,size); \
+ inst->dreg = dest; \
+ inst->inst_offset = offset; \
+ inst->sreg1 = src; \
+ inst->inst_imm = imm; \
+ inst->sreg2 = tmpr; \
+ } else { \
+ if (s390_is_uimm12(offset)) { \
+ inst->dreg = dest; \
+ inst->inst_offset = offset; \
+ } else { \
+ dReg = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_BIALU_IMM(cfg, OP_ADD_IMM, \
+ dReg, dest, offset); \
+ inst->dreg = dReg; \
+ inst->inst_offset = 0; \
+ } \
+ if (s390_is_uimm12(imm)) { \
+ inst->sreg1 = src; \
+ inst->inst_imm = imm; \
+ } else { \
+ sReg = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_BIALU_IMM(cfg, OP_ADD_IMM, \
+ sReg, src, imm); \
+ inst->sreg1 = sReg; \
+ inst->inst_imm = 0; \
+ } \
+ } \
+ inst->opcode = OP_S390_MOVE; \
+ inst->backend.size = size; \
+ MONO_ADD_INS (cfg->cbb, inst); \
+ } while (0)
+
+#define MONO_OUTPUT_VTR2(cfg, size, dr, sr, so) do { \
+ int reg = mono_alloc_preg (cfg); \
+ switch (size) { \
+ case 0: \
+ MONO_EMIT_NEW_ICONST(cfg, reg, 0); \
+ break; \
+ case 1: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, \
+ reg, sr, so); \
+ break; \
+ case 2: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, \
+ reg, sr, so); \
+ break; \
+ case 4: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, \
+ reg, sr, so); \
+ break; \
+ case 8: \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI8_MEMBASE, \
+ reg, sr, so); \
+ break; \
+ } \
+ mono_call_inst_add_outarg_reg(cfg, call, reg, dr, FALSE); \
+} while (0)
+
+#define MONO_OUTPUT_VTS2(cfg, size, dr, dx, sr, so) do { \
+ int tmpr; \
+ switch (size) { \
+ case 0: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_ICONST(cfg, tmpr, 0); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 1: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU1_MEMBASE, \
+ tmpr, sr, so); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 2: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADU2_MEMBASE, \
+ tmpr, sr, so); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 4: \
+ tmpr = mono_alloc_preg (cfg); \
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP(cfg, OP_LOADI4_MEMBASE, \
+ tmpr, sr, so); \
+ MONO_EMIT_NEW_STORE_MEMBASE(cfg, OP_STORE_MEMBASE_REG, \
+ dr, dx, tmpr); \
+ break; \
+ case 8: \
+ MONO_EMIT_NEW_MOVE2 (cfg, dr, dx, sr, so, size); \
+ break; \
+ } \
+} while (0)
+
#undef DEBUG
#define DEBUG(a) if (cfg->verbose_level > 1) a
MonoInst *data;
};
-enum {
+typedef enum {
RegTypeGeneral,
RegTypeBase,
RegTypeFP,
+ RegTypeFPR4,
RegTypeStructByVal,
+ RegTypeStructByValInFP,
RegTypeStructByAddr
-};
+} ArgStorage;
typedef struct {
gint32 offset; /* offset from caller's stack */
gint32 offparm; /* offset from callee's stack */
guint16 vtsize; /* in param area */
guint8 reg;
- guint8 regtype; /* See RegType* */
+ ArgStorage regtype;
guint32 size; /* Size of structure used by RegTypeStructByVal */
gint32 type; /* Data type of argument */
} ArgInfo;
static CallInfo * get_call_info (MonoCompile *, MonoMemPool *, MonoMethodSignature *, gboolean);
static guchar * emit_float_to_int (MonoCompile *, guchar *, int, int, int, gboolean);
gpointer mono_arch_get_lmf_addr (void);
-static guint8 * emit_load_volatile_registers (guint8 *, MonoCompile *);
+static guint8 * emit_load_volatile_arguments (guint8 *, MonoCompile *);
static void catch_SIGILL(int, siginfo_t *, void *);
static void emit_sig_cookie (MonoCompile *, MonoCallInst *, CallInfo *, int);
if ((cfg->flags & MONO_CFG_HAS_ALLOCA) || header->num_clauses)
cfg->frame_reg = s390_r11;
+
+ /* FIXME: s390_r12 is reserved for bkchain_reg. Only reserve it if needed */
+ top = 12;
for (i = 8; i < top; ++i) {
- if (cfg->frame_reg != i)
+ if (cfg->frame_reg != i)
regs = g_list_prepend (regs, GUINT_TO_POINTER (i));
}
(info->fields[0].field->type->type == MONO_TYPE_R4)) {
cinfo->args[nParm].size = sizeof(float);
add_float(&fr, sz, cinfo->args+nParm);
+ nParm ++;
break;
}
(info->fields[0].field->type->type == MONO_TYPE_R8)) {
cinfo->args[nParm].size = sizeof(double);
add_float(&fr, sz, cinfo->args+nParm);
+ nParm ++;
break;
}
cfg->frame_reg = frame_reg;
+ cfg->arch.bkchain_reg = -1;
+
if (frame_reg != STK_BASE)
cfg->used_int_regs |= 1 << frame_reg;
cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
if (cinfo->struct_ret) {
- cfg->vret_addr->opcode = OP_REGVAR;
- cfg->vret_addr->inst_c0 = s390_r2;
+ if (!cfg->new_ir) {
+ cfg->vret_addr->opcode = OP_REGVAR;
+ cfg->vret_addr->inst_c0 = s390_r2;
+ }
} else {
switch (mono_type_get_underlying_type (sig->ret)->type) {
case MONO_TYPE_VOID:
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
offset += sizeof(gpointer);
- if ((sig->call_convention == MONO_CALL_VARARG) &&
+ if (!cfg->new_ir && (sig->call_convention == MONO_CALL_VARARG) &&
(!retFitsInReg (cinfo->ret.size)))
cfg->sig_cookie += cinfo->ret.size;
if (G_UNLIKELY (cfg->verbose_level > 1)) {
inst = cfg->args [curinst];
if (inst->opcode != OP_REGVAR) {
switch (cinfo->args[iParm].regtype) {
- case RegTypeStructByAddr :
- if (cinfo->args[iParm].reg == STK_BASE) {
- inst->opcode = OP_S390_LOADARG;
+ case RegTypeStructByAddr :
+ if (cfg->new_ir) {
+ MonoInst *indir;
+
+ size = sizeof (gpointer);
+
+ inst->opcode = OP_REGOFFSET;
inst->inst_basereg = frame_reg;
- offset = S390_ALIGN(offset, sizeof(long));
- inst->inst_offset = offset;
- size = abs(cinfo->args[iParm].vtsize);
- inst->backend.arg_info = cinfo->args[iParm].offset;
+ offset = S390_ALIGN (offset, sizeof (gpointer));
+ inst->inst_offset = offset;
+
+ /* Add a level of indirection */
+ MONO_INST_NEW (cfg, indir, 0);
+ *indir = *inst;
+ inst->opcode = OP_VTARG_ADDR;
+ inst->inst_left = indir;
} else {
- inst->opcode = OP_S390_ARGREG;
- inst->inst_basereg = frame_reg;
- size = sizeof(gpointer);
- offset = S390_ALIGN(offset, size);
- inst->inst_offset = offset;
- inst->backend.arg_info = cinfo->args[iParm].offset;
+ if (cinfo->args[iParm].reg == STK_BASE) {
+ inst->opcode = OP_S390_LOADARG;
+ inst->inst_basereg = frame_reg;
+ offset = S390_ALIGN(offset, sizeof(long));
+ inst->inst_offset = offset;
+ size = abs(cinfo->args[iParm].vtsize);
+ inst->backend.arg_info = cinfo->args[iParm].offset;
+ } else {
+ inst->opcode = OP_S390_ARGREG;
+ inst->inst_basereg = frame_reg;
+ size = sizeof(gpointer);
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ inst->backend.arg_info = cinfo->args[iParm].offset;
+ }
}
- break;
- case RegTypeStructByVal :
+ break;
+ case RegTypeStructByVal :
+ if (cfg->new_ir) {
+ size = cinfo->args[iParm].size;
+ offset = S390_ALIGN(offset, size);
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ inst->inst_offset = offset;
+ } else {
inst->opcode = OP_S390_ARGPTR;
inst->inst_basereg = frame_reg;
size = cinfo->args[iParm].size;
offset = S390_ALIGN(offset, size);
inst->inst_offset = offset;
inst->backend.arg_info = cinfo->args[iParm].offset;
- break;
- default :
- if (cinfo->args[iParm].reg != STK_BASE) {
- inst->opcode = OP_REGOFFSET;
- inst->inst_basereg = frame_reg;
- size = (cinfo->args[iParm].size < 8
- ? sizeof(int)
- : sizeof(long));
- offset = S390_ALIGN(offset, size);
- inst->inst_offset = offset;
+ }
+ break;
+ default :
+ if (cfg->new_ir) {
+ if (cinfo->args [iParm].reg == STK_BASE) {
+ /*
+ * These arguments are in the previous frame, so we can't
+ * compute their offset from the current frame pointer right
+ * now, since cfg->stack_offset is not yet known, so dedicate a
+ * register holding the previous frame pointer.
+ */
+ cfg->arch.bkchain_reg = s390_r12;
+ cfg->used_int_regs |= 1 << cfg->arch.bkchain_reg;
+
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = cfg->arch.bkchain_reg;
+ size = (cinfo->args[iParm].size < 8
+ ? 8 - cinfo->args[iParm].size
+ : 0);
+ inst->inst_offset = cinfo->args [iParm].offset + size;
+ size = sizeof (long);
+ } else {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ size = (cinfo->args[iParm].size < 8
+ ? sizeof(int)
+ : sizeof(long));
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ }
} else {
- inst->opcode = OP_S390_STKARG;
- inst->inst_basereg = frame_reg;
- size = ((cinfo->args[iParm].size < 8)
- ? 8 - cinfo->args[iParm].size
- : 0);
- inst->inst_offset = cinfo->args[iParm].offset +
- size;
- inst->backend.arg_info = 0;
- size = sizeof(long);
- }
+ if (cinfo->args[iParm].reg != STK_BASE) {
+ inst->opcode = OP_REGOFFSET;
+ inst->inst_basereg = frame_reg;
+ size = (cinfo->args[iParm].size < 8
+ ? sizeof(int)
+ : sizeof(long));
+ offset = S390_ALIGN(offset, size);
+ inst->inst_offset = offset;
+ } else {
+ inst->opcode = OP_S390_STKARG;
+ inst->inst_basereg = frame_reg;
+ size = ((cinfo->args[iParm].size < 8)
+ ? 8 - cinfo->args[iParm].size
+ : 0);
+ inst->inst_offset = cinfo->args[iParm].offset +
+ size;
+ inst->backend.arg_info = 0;
+ size = sizeof(long);
+ }
+ }
+ break;
}
if ((sig->call_convention == MONO_CALL_VARARG) &&
(cinfo->args[iParm].regtype != RegTypeGeneral) &&
arg->ins.cil_code = in->cil_code;
arg->ins.inst_left = in;
arg->ins.type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->ins.node, &call->out_args);
+ /* prepend, we'll need to reverse them later */
+ arg->ins.next = call->out_args;
+ call->out_args = (MonoInst *) arg;
arg->ins.inst_right = (MonoInst *) call;
if (ainfo->regtype == RegTypeGeneral) {
arg->ins.backend.reg3 = ainfo->reg;
emit_sig_cookie (cfg, call, cinfo, sizeof(MonoType *));
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
+
return call;
}
/*========================= End of Function ========================*/
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, MonoInst *tree)
+{
+ MonoInst *ins;
+
+ switch (storage) {
+ case RegTypeGeneral:
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, FALSE);
+ break;
+ case RegTypeFP:
+ MONO_INST_NEW (cfg, ins, OP_FMOVE);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ case RegTypeFPR4:
+ MONO_INST_NEW (cfg, ins, OP_S390_SETF4RET);
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = tree->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmpSig;
+ MonoInst *sig_arg;
+
+ cfg->disable_aot = TRUE;
+
+ /*----------------------------------------------------------*/
+ /* mono_ArgIterator_Setup assumes the signature cookie is */
+ /* passed first and all the arguments which were before it */
+ /* passed on the stack after the signature. So compensate */
+ /* by passing a different signature. */
+ /*----------------------------------------------------------*/
+ tmpSig = mono_metadata_signature_dup (call->signature);
+ tmpSig->param_count -= call->signature->sentinelpos;
+ tmpSig->sentinelpos = 0;
+ if (tmpSig->param_count > 0)
+ memcpy (tmpSig->params,
+ call->signature->params + call->signature->sentinelpos,
+ tmpSig->param_count * sizeof(MonoType *));
+
+ MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
+ sig_arg->dreg = mono_alloc_ireg (cfg);
+ sig_arg->inst_p0 = tmpSig;
+ MONO_ADD_INS (cfg->cbb, sig_arg);
+
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, STK_BASE,
+ cinfo->sigCookie.offset, sig_arg->dreg);
+}
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_call */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ MonoInst *ins;
+ int i, n, lParamArea;
+ CallInfo *cinfo;
+ ArgInfo *ainfo = NULL;
+ int stackSize;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+ DEBUG (g_print ("Call requires: %d parameters\n",n));
+
+ cinfo = get_call_info (cfg, cfg->mempool, sig, sig->pinvoke);
+
+ stackSize = cinfo->sz.stack_size + cinfo->sz.local_size + cinfo->sz.parm_size + cinfo->sz.offset;
+ call->stack_usage = MAX(stackSize, call->stack_usage);
+ lParamArea = MAX((call->stack_usage-S390_MINIMAL_STACK_SIZE-cinfo->sz.parm_size), 0);
+ cfg->param_area = MAX(((signed) cfg->param_area), lParamArea);
+ cfg->flags |= MONO_CFG_HAS_CALLS;
+
+ if (cinfo->struct_ret) {
+ MONO_INST_NEW (cfg, ins, OP_MOVE);
+ ins->sreg1 = call->vret_var->dreg;
+ ins->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, ins);
+ mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, cinfo->ret.reg, FALSE);
+ }
+
+ for (i = 0; i < n; ++i) {
+ ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
+ in = call->args [i];
+
+ if ((sig->call_convention == MONO_CALL_VARARG) &&
+ (i == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ switch (ainfo->regtype) {
+ case RegTypeGeneral:
+ add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
+ break;
+ case RegTypeFP:
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ /* Valuetype passed in one fp register */
+ ainfo->regtype = RegTypeStructByValInFP;
+ /* Fall through */
+ } else {
+ if (ainfo->size == 4)
+ ainfo->regtype = RegTypeFPR4;
+ add_outarg_reg2 (cfg, call, ainfo->regtype, ainfo->reg, in);
+ break;
+ }
+ case RegTypeStructByVal:
+ case RegTypeStructByAddr: {
+ guint32 align;
+ guint32 size;
+
+ if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else
+ if (sig->pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ g_assert (in->klass);
+
+ ainfo->offparm += cinfo->sz.offStruct;
+
+ MONO_INST_NEW (cfg, ins, OP_OUTARG_VT);
+ ins->sreg1 = in->dreg;
+ ins->klass = in->klass;
+ ins->backend.size = ainfo->size;
+ ins->inst_p0 = call;
+ ins->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (ins->inst_p1, ainfo, sizeof (ArgInfo));
+
+ MONO_ADD_INS (cfg->cbb, ins);
+
+ if (ainfo->regtype == RegTypeStructByAddr) {
+ /*
+ * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
+ * use the normal OUTARG opcodes to pass the address of the location to
+ * the callee.
+ */
+ int treg = mono_alloc_preg (cfg);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_ADD_IMM, treg,
+ STK_BASE, ainfo->offparm);
+ mono_call_inst_add_outarg_reg (cfg, call, treg, ainfo->reg, FALSE);
+ }
+ break;
+ }
+ case RegTypeBase:
+ if (!t->byref && t->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG,
+ STK_BASE, ainfo->offset + 4,
+ in->dreg);
+ } else if (!t->byref && (t->type == MONO_TYPE_R8)) {
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG,
+ STK_BASE, ainfo->offset,
+ in->dreg);
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_STORE_MEMBASE_REG);
+ ins->inst_destbasereg = STK_BASE;
+ ins->inst_offset = ainfo->offset;
+ ins->sreg1 = in->dreg;
+
+ /* This is needed by MonoTypedRef->value to point to the correct data */
+ if ((sig->call_convention == MONO_CALL_VARARG) &&
+ (i >= sig->sentinelpos)) {
+ switch (ainfo->size) {
+ case 1:
+ ins->opcode = OP_STOREI1_MEMBASE_REG;
+ break;
+ case 2:
+ ins->opcode = OP_STOREI2_MEMBASE_REG;
+ break;
+ case 4:
+ ins->opcode = OP_STOREI4_MEMBASE_REG;
+ break;
+ default:
+ break;
+ }
+ }
+
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+ }
+
+ /*
+ * Handle the case where there are no implicit arguments
+ */
+ if ((sig->call_convention == MONO_CALL_VARARG) &&
+ (i == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_outarg_vt */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoCallInst *call = (MonoCallInst*)ins->inst_p0;
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ if (ainfo->regtype == RegTypeStructByVal) {
+ /*
+ arg->ins.sreg1 = ainfo->reg;
+ arg->ins.opcode = OP_OUTARG_VT;
+ arg->size = ainfo->size;
+ arg->offset = ainfo->offset;
+ arg->offPrm = ainfo->offparm + cinfo->sz.offStruct;
+ */
+ if (ainfo->reg != STK_BASE) {
+ MONO_OUTPUT_VTR2 (cfg, size, ainfo->reg, src->dreg, 0);
+ } else {
+ MONO_OUTPUT_VTS2 (cfg, size, ainfo->reg, ainfo->offset,
+ src->dreg, 0);
+ }
+ } else if (ainfo->regtype == RegTypeStructByValInFP) {
+ int dreg = mono_alloc_freg (cfg);
+
+ if (ainfo->size == 4) {
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR4_MEMBASE, dreg, src->dreg, 0);
+ MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, dreg, dreg);
+ } else {
+ g_assert (ainfo->size == 8);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE_OP (cfg, OP_LOADR8_MEMBASE, dreg, src->dreg, 0);
+ }
+
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, TRUE);
+ } else {
+ MONO_EMIT_NEW_MOVE2 (cfg, STK_BASE, ainfo->offparm,
+ src->dreg, 0, size);
+ }
+}
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_setret */
+/* */
+/*------------------------------------------------------------------*/
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_S390_SETF4RET, s390_f0, val->dreg);
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, s390_f0, val->dreg);
+ return;
+ }
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - emit_sig_cookie. */
arg->ins.inst_right = (MonoInst *) call;
arg->size = argSize;
arg->offset = cinfo->sigCookie.offset;
- MONO_INST_LIST_ADD_TAIL (&arg->ins.node, &call->out_args);
+ arg->ins.next = call->out_args;
+ call->out_args = (MonoInst *) arg;
}
/*========================= End of Function ========================*/
case OP_IREM_IMM:
case OP_IDIV_UN_IMM:
case OP_IREM_UN_IMM:
- mono_decompose_op_imm (cfg, ins);
+ case OP_LAND_IMM:
+ case OP_LOR_IMM:
+ case OP_LXOR_IMM:
+ case OP_LOCALLOC_IMM:
+ mono_decompose_op_imm (cfg, bb, ins);
+ break;
+ case OP_LADD_IMM:
+ if (!s390_is_imm16 (ins->inst_imm))
+ /* This is created by the memcpy code which ignores is_inst_imm */
+ mono_decompose_op_imm (cfg, bb, ins);
break;
default:
break;
/*========================= End of Function ========================*/
+static gboolean is_unsigned (MonoInst *next)
+{
+ if ((next) &&
+ (((next->opcode >= OP_IBNE_UN) &&
+ (next->opcode <= OP_IBLT_UN)) ||
+ ((next->opcode >= OP_LBNE_UN) &&
+ (next->opcode <= OP_LBLT_UN)) ||
+ ((next->opcode >= OP_COND_EXC_NE_UN) &&
+ (next->opcode <= OP_COND_EXC_LT_UN)) ||
+ ((next->opcode >= OP_COND_EXC_INE_UN) &&
+ (next->opcode <= OP_COND_EXC_ILT_UN)) ||
+ ((next->opcode == OP_CLT_UN) ||
+ (next->opcode == OP_CGT_UN)) ||
+ ((next->opcode == OP_ICLT_UN) ||
+ (next->opcode == OP_ICGT_UN) ||
+ (next->opcode == OP_LCLT_UN) ||
+ (next->opcode == OP_LCGT_UN))))
+ return TRUE;
+ else
+ return FALSE;
+}
+
/*------------------------------------------------------------------*/
/* */
/* Name - mono_arch_output_basic_block */
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *next;
+ MonoInst *ins;
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
s390_ngr (code, ins->dreg, s390_r0);
}
break;
+ case OP_ICONV_TO_I1: {
+ s390_lhi (code, s390_r0, 0x80);
+ if (ins->dreg != ins->sreg1) {
+ s390_lr (code, ins->dreg, ins->sreg1);
+ }
+ s390_nr (code, s390_r0, ins->sreg1);
+ s390_jz (code, 7);
+ s390_lhi (code, s390_r13, -1);
+ s390_sll (code, s390_r13, 0, 8);
+ s390_or (code, ins->dreg, s390_r13);
+ }
+ break;
+ case OP_ICONV_TO_I2: {
+ s390_lhi (code, s390_r0, 0x80);
+ s390_sll (code, s390_r0, 0, 8);
+ if (ins->dreg != ins->sreg1) {
+ s390_lr (code, ins->dreg, ins->sreg1);
+ }
+ s390_nr (code, s390_r0, ins->sreg1);
+ s390_jz (code, 7);
+ s390_lhi (code, s390_r13, -1);
+ s390_sll (code, s390_r13, 0, 16);
+ s390_or (code, ins->dreg, s390_r13);
+ }
+ break;
+ case OP_ICONV_TO_U1: {
+ s390_lhi (code, s390_r0, 0xff);
+ if (ins->dreg != ins->sreg1) {
+ s390_lr (code, ins->dreg, ins->sreg1);
+ }
+ s390_nr (code, ins->dreg, s390_r0);
+ }
+ break;
+ case OP_ICONV_TO_U2: {
+ s390_lhi (code, s390_r0, -1);
+ s390_sll (code, s390_r0, 0, 16);
+ s390_srl (code, s390_r0, 0, 16);
+ if (ins->dreg != ins->sreg1) {
+ s390_lr (code, ins->dreg, ins->sreg1);
+ }
+ s390_nr (code, ins->dreg, s390_r0);
+ }
+ break;
case OP_COMPARE:
case OP_LCOMPARE: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) && (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
+ if (is_unsigned (ins->next))
s390_clgr (code, ins->sreg1, ins->sreg2);
else
s390_cgr (code, ins->sreg1, ins->sreg2);
}
break;
- case OP_COMPARE_IMM: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
+ case OP_ICOMPARE: {
+ if (is_unsigned (ins->next))
+ s390_clr (code, ins->sreg1, ins->sreg2);
+ else
+ s390_cr (code, ins->sreg1, ins->sreg2);
+ }
+ break;
+ case OP_COMPARE_IMM:
+ case OP_LCOMPARE_IMM: {
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
- if ((next) &&
- (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
+ if (is_unsigned (ins->next))
s390_clgr (code, ins->sreg1, s390_r0);
else
s390_cgr (code, ins->sreg1, s390_r0);
s390_basr (code, s390_r13, 0);
s390_j (code, 6);
s390_llong(code, ins->inst_imm);
- if ((next) &&
- (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
+ if (is_unsigned (ins->next))
s390_clg (code, ins->sreg1, 0, s390_r13, 4);
else
s390_cg (code, ins->sreg1, 0, s390_r13, 4);
}
}
break;
- case OP_ICOMPARE: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) && (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
- s390_clr (code, ins->sreg1, ins->sreg2);
- else
- s390_cr (code, ins->sreg1, ins->sreg2);
- }
- break;
case OP_ICOMPARE_IMM: {
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
if (s390_is_imm16 (ins->inst_imm)) {
s390_lghi (code, s390_r0, ins->inst_imm);
- if ((next) &&
- (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
+ if (is_unsigned (ins->next))
s390_clr (code, ins->sreg1, s390_r0);
else
s390_cr (code, ins->sreg1, s390_r0);
s390_basr (code, s390_r13, 0);
s390_j (code, 4);
s390_word (code, ins->inst_imm);
- if ((next) &&
- (mono_opcode_to_cond(next->opcode) >= CMP_LE_UN))
+ if (is_unsigned (ins->next))
s390_cl (code, ins->sreg1, 0, s390_r13, 4);
else
s390_c (code, ins->sreg1, 0, s390_r13, 4);
break;
case OP_ADDCC: {
CHECK_SRCDST_COM;
- s390_algr (code, ins->dreg, src2);
+ s390_agr (code, ins->dreg, src2);
}
break;
case OP_LADD: {
}
}
break;
+ case OP_LADD_IMM: {
+ if (ins->dreg != ins->sreg1) {
+ s390_lgr (code, ins->dreg, ins->sreg1);
+ }
+ g_assert (s390_is_imm16 (ins->inst_imm));
+ s390_aghi (code, ins->dreg, ins->inst_imm);
+ }
+ break;
case OP_ADC_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
}
break;
- case OP_IADD_OVF: {
+ case OP_IADD_OVF:
+ case OP_S390_IADD_OVF: {
CHECK_SRCDST_COM;
s390_ar (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
break;
- case OP_IADD_OVF_UN: {
+ case OP_IADD_OVF_UN:
+ case OP_S390_IADD_OVF_UN: {
CHECK_SRCDST_COM;
s390_algr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
break;
case OP_SUBCC: {
CHECK_SRCDST_NCOM;
- s390_slgr (code, ins->dreg, src2);
+ s390_sgr (code, ins->dreg, src2);
}
break;
case OP_LSUB: {
}
}
break;
+ case OP_LSUB_IMM: {
+ if (ins->dreg != ins->sreg1) {
+ s390_lgr (code, ins->dreg, ins->sreg1);
+ }
+ if (s390_is_imm16 (-ins->inst_imm)) {
+ s390_lghi (code, s390_r0, ins->inst_imm);
+ s390_slgr (code, ins->dreg, s390_r0);
+ } else {
+ s390_basr (code, s390_r13, 0);
+ s390_j (code, 6);
+ s390_llong(code, ins->inst_imm);
+ s390_slg (code, ins->dreg, 0, s390_r13, 4);
+ }
+ }
+ break;
case OP_SBB_IMM: {
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
}
}
break;
- case OP_LADD_OVF: {
+ case OP_LADD_OVF:
+ case OP_S390_LADD_OVF: {
CHECK_SRCDST_COM;
s390_agr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
- case OP_LADD_OVF_UN: {
+ case OP_LADD_OVF_UN:
+ case OP_S390_LADD_OVF_UN: {
CHECK_SRCDST_COM;
s390_algr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, "OverflowException");
s390_slgf (code, ins->dreg, 0, s390_r13, 4);
}
break;
- case OP_ISUB_OVF: {
+ case OP_ISUB_OVF:
+ case OP_S390_ISUB_OVF: {
CHECK_SRCDST_NCOM;
s390_sr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
s390_lgfr (code, ins->dreg, ins->dreg);
}
break;
- case OP_ISUB_OVF_UN: {
+ case OP_ISUB_OVF_UN:
+ case OP_S390_ISUB_OVF_UN: {
CHECK_SRCDST_NCOM;
s390_slr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
s390_llgfr(code, ins->dreg, ins->dreg);
}
break;
- case OP_LSUB_OVF: {
+ case OP_LSUB_OVF:
+ case OP_S390_LSUB_OVF: {
CHECK_SRCDST_NCOM;
s390_sgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, "OverflowException");
}
break;
- case OP_LSUB_OVF_UN: {
+ case OP_LSUB_OVF_UN:
+ case OP_S390_LSUB_OVF_UN: {
CHECK_SRCDST_NCOM;
s390_slgr (code, ins->dreg, src2);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, "OverflowException");
}
break;
case OP_ISHL: {
- if (ins->sreg1 != ins->dreg) {
- s390_lgfr (code, ins->dreg, ins->sreg1);
- }
- s390_sll (code, ins->dreg, ins->sreg2, 0);
+ CHECK_SRCDST_NCOM;
+ s390_sll (code, ins->dreg, src2, 0);
}
break;
case OP_ISHL_IMM: {
}
break;
case OP_ISHR: {
- if (ins->sreg1 != ins->dreg) {
- s390_lgfr (code, ins->dreg, ins->sreg1);
- }
- s390_sra (code, ins->dreg, ins->sreg2, 0);
+ CHECK_SRCDST_NCOM;
+ s390_sra (code, ins->dreg, src2, 0);
}
break;
case OP_ISHR_IMM: {
}
break;
case OP_ISHR_UN: {
- if (ins->sreg1 != ins->dreg) {
- s390_lgfr (code, ins->dreg, ins->sreg1);
- }
- s390_srl (code, ins->dreg, ins->sreg2, 0);
+ CHECK_SRCDST_NCOM;
+ s390_srl (code, ins->dreg, src2, 0);
}
break;
case OP_INOT: {
s390_lg (code,ins->dreg, 0, s390_r13, 4);
}
break;
+ case OP_JUMP_TABLE: {
+ mono_add_patch_info (cfg, code - cfg->native_code,
+ (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ s390_basr (code, s390_r13, 0);
+ s390_j (code, 6);
+ s390_llong (code, 0);
+ s390_lg (code, ins->dreg, 0, s390_r13, 4);
+ }
+ break;
case OP_MOVE:
if (ins->dreg != ins->sreg1) {
s390_lgr (code, ins->dreg, ins->sreg1);
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, "OverflowException");
s390_lgfr (code, ins->dreg, ins->sreg1);
break;
- case OP_FMOVE: {
+ case OP_FMOVE:
+ case OP_FCONV_TO_R4: {
if (ins->dreg != ins->sreg1) {
s390_ldr (code, ins->dreg, ins->sreg1);
}
s390_ledbr (code, ins->dreg, ins->sreg1);
}
break;
- case OP_FCONV_TO_R4: {
- g_error ("Shouldn't be reached");
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if ((next) &&
- (next->opcode != OP_FMOVE) &&
- (next->opcode != OP_STORER4_MEMBASE_REG))
- s390_ledbr (code, ins->dreg, ins->sreg1);
- }
- break;
case OP_JMP: {
if (cfg->method->save_lmf)
restoreLMF(code, cfg->frame_reg, cfg->stack_usage);
if (cfg->flags & MONO_CFG_HAS_TAIL) {
- code = emit_load_volatile_registers(code, cfg);
+ code = emit_load_volatile_arguments (code, cfg);
}
code = backUpStackPtr(cfg, code);
break;
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL: {
s390_basr (code, s390_r13, 0);
break;
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG: {
s390_lgr (code, s390_r1, ins->sreg1);
break;
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE: {
s390_lg (code, s390_r1, 0, ins->sreg1, ins->inst_offset);
}
break;
case OP_START_HANDLER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
S390_LONG (code, stg, stg, s390_r14, 0,
- ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ spvar->inst_basereg,
+ spvar->inst_offset);
}
break;
case OP_ENDFILTER: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
if (ins->sreg1 != s390_r2)
s390_lgr(code, s390_r2, ins->sreg1);
S390_LONG (code, lg, lg, s390_r14, 0,
- ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ spvar->inst_basereg,
+ spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
case OP_ENDFINALLY: {
+ MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
+
S390_LONG (code, lg, lg, s390_r14, 0,
- ins->inst_left->inst_basereg,
- ins->inst_left->inst_offset);
+ spvar->inst_basereg,
+ spvar->inst_offset);
s390_br (code, s390_r14);
}
break;
break;
case OP_LABEL: {
ins->inst_c0 = code - cfg->native_code;
+ }
+ break;
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL: {
}
break;
case OP_BR:
}
break;
case OP_CEQ:
- case OP_ICEQ: {
+ case OP_ICEQ:
+ case OP_LCEQ: {
s390_lghi(code, ins->dreg, 1);
s390_jz (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT:
- case OP_ICLT: {
+ case OP_ICLT:
+ case OP_LCLT: {
s390_lghi(code, ins->dreg, 1);
s390_jl (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CLT_UN:
- case OP_ICLT_UN: {
+ case OP_ICLT_UN:
+ case OP_LCLT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jlo (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT:
- case OP_ICGT: {
+ case OP_ICGT:
+ case OP_LCGT: {
s390_lghi(code, ins->dreg, 1);
s390_jh (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_CGT_UN:
- case OP_ICGT_UN: {
+ case OP_ICGT_UN:
+ case OP_LCGT_UN: {
s390_lghi(code, ins->dreg, 1);
s390_jho (code, 4);
s390_lghi(code, ins->dreg, 0);
}
break;
case OP_COND_EXC_EQ:
+ case OP_COND_EXC_IEQ:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_EQ, ins->inst_p1);
break;
case OP_COND_EXC_NE_UN:
+ case OP_COND_EXC_INE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NE, ins->inst_p1);
break;
case OP_COND_EXC_LT:
+ case OP_COND_EXC_ILT:
case OP_COND_EXC_LT_UN:
+ case OP_COND_EXC_ILT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LT, ins->inst_p1);
break;
case OP_COND_EXC_GT:
+ case OP_COND_EXC_IGT:
case OP_COND_EXC_GT_UN:
+ case OP_COND_EXC_IGT_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GT, ins->inst_p1);
break;
case OP_COND_EXC_GE:
+ case OP_COND_EXC_IGE:
case OP_COND_EXC_GE_UN:
+ case OP_COND_EXC_IGE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_GE, ins->inst_p1);
break;
case OP_COND_EXC_LE:
+ case OP_COND_EXC_ILE:
case OP_COND_EXC_LE_UN:
+ case OP_COND_EXC_ILE_UN:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_LE, ins->inst_p1);
break;
case OP_COND_EXC_OV:
+ case OP_COND_EXC_IOV:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_OV, ins->inst_p1);
break;
case OP_COND_EXC_NO:
+ case OP_COND_EXC_INO:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NO, ins->inst_p1);
break;
case OP_COND_EXC_C:
+ case OP_COND_EXC_IC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_CY, ins->inst_p1);
break;
case OP_COND_EXC_NC:
+ case OP_COND_EXC_INC:
EMIT_COND_SYSTEM_EXCEPTION (S390_CC_NC, ins->inst_p1);
break;
case OP_LBEQ:
s390_ldebr (code, ins->dreg, s390_f15);
}
break;
+ case OP_ICONV_TO_R_UN: {
+ s390_cdfbr (code, ins->dreg, ins->sreg1);
+ s390_ltr (code, ins->sreg1, ins->sreg1);
+ s390_jnl (code, 12);
+ s390_basr (code, s390_r13, 0);
+ s390_j (code, 6);
+ s390_word (code, 0x41f00000);
+ s390_word (code, 0);
+ s390_adb (code, ins->dreg, 0, s390_r13, 4);
+ }
+ break;
case OP_LCONV_TO_R_UN: {
s390_cdgbr (code, ins->dreg, ins->sreg1);
s390_ltgr (code, ins->sreg1, ins->sreg1);
/*------------------------------------------------------------------*/
/* */
-/* Name - emit_load_volatile_registers */
+/* Name - emit_load_volatile_arguments */
/* */
/* Function - Emit the instructions to reload parameter regist- */
/* registers for use with "tail" operations. */
/*------------------------------------------------------------------*/
guint8 *
-emit_load_volatile_registers (guint8 *code, MonoCompile *cfg)
+emit_load_volatile_arguments (guint8 *code, MonoCompile *cfg)
{
MonoInst *inst;
MonoMethod *method = cfg->method;
s390_stmg (code, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
+ if (cfg->arch.bkchain_reg != -1)
+ s390_lgr (code, cfg->arch.bkchain_reg, STK_BASE);
+
if (cfg->flags & MONO_CFG_HAS_ALLOCA) {
cfg->used_int_regs |= 1 << 11;
}
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
inst = cfg->args [pos];
+
+ if (inst->opcode == OP_VTARG_ADDR)
+ inst = inst->inst_left;
if (inst->opcode == OP_REGVAR) {
if (ainfo->regtype == RegTypeGeneral)
/*========================= End of Function ========================*/
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_emit_inst_for_method */
+/* */
+/*------------------------------------------------------------------*/
+
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ return NULL;
+}
+
+/*========================= End of Function ========================*/
+
+void
+mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins)
+{
+ /*
+ * Have to rename these to avoid being decomposed normally, since the normal
+ * decomposition does not work on S390.
+ */
+ switch (ins->opcode) {
+ case OP_ISUB_OVF:
+ ins->opcode = OP_S390_ISUB_OVF;
+ break;
+ case OP_ISUB_OVF_UN:
+ ins->opcode = OP_S390_ISUB_OVF_UN;
+ break;
+ case OP_IADD_OVF:
+ ins->opcode = OP_S390_IADD_OVF;
+ break;
+ case OP_IADD_OVF_UN:
+ ins->opcode = OP_S390_IADD_OVF_UN;
+ break;
+ case OP_LADD_OVF:
+ ins->opcode = OP_S390_LADD_OVF;
+ break;
+ case OP_LADD_OVF_UN:
+ ins->opcode = OP_S390_LADD_OVF_UN;
+ break;
+ case OP_LSUB_OVF:
+ ins->opcode = OP_S390_LSUB_OVF;
+ break;
+ case OP_LSUB_OVF_UN:
+ ins->opcode = OP_S390_LSUB_OVF_UN;
+ break;
+ default:
+ break;
+ }
+}
+
+/*========================= End of Function ========================*/
+
/*------------------------------------------------------------------*/
/* */
/* Name - mono_arch_print_tree */
/*========================= End of Function ========================*/
-/*========================= End of Function ========================*/
-
/*------------------------------------------------------------------*/
/* */
/* Name - mono_arch_get_patch_offset */
typedef struct MonoCompileArch {
gpointer litpool;
glong litsize;
+ int bkchain_reg;
} MonoCompileArch;
typedef struct
#define MONO_ARCH_HAVE_ATOMIC_ADD 1
#define MONO_ARCH_HAVE_ATOMIC_EXCHANGE 1
#define MONO_ARCH_SIGNAL_STACK_SIZE 256*1024
+#define MONO_ARCH_HAVE_DECOMPOSE_OPTS 1
#define MONO_ARCH_ENABLE_NORMALIZE_OPCODES 1
// #define MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION 1
/* Definitions used by mini-codegen.c */
/*===============================================*/
-/*--------------------------------------------*/
-/* use s390_r2-s390_r6 as parm registers */
-/* s390_r0, s390_r1, s390_r13 used internally */
-/* s390_r15 is the stack pointer */
-/*--------------------------------------------*/
-#define MONO_ARCH_CALLEE_REGS (0x1ffc)
+/*------------------------------------------------------*/
+/* use s390_r2-s390_r6 as parm registers */
+/* s390_r0, s390_r1, s390_r12, s390_r13 used internally */
+/* s390_r8..s390_r11 are used for global regalloc */
+/* s390_r15 is the stack pointer */
+/*------------------------------------------------------*/
+#define MONO_ARCH_CALLEE_REGS (0xfc)
#define MONO_ARCH_CALLEE_SAVED_REGS 0xff80
* Set var information according to the calling convention. sparc version.
* The locals var stuff should most likely be split in another method.
*/
+
void
mono_arch_allocate_vars (MonoCompile *cfg)
{
switch (cinfo->ret.storage) {
case ArgInIReg:
case ArgInFReg:
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
+ break;
case ArgInIRegPair:
+ if (cfg->new_ir && ((sig->ret->type == MONO_TYPE_I8) || (sig->ret->type == MONO_TYPE_U8))) {
+ MonoInst *low = get_vreg_to_inst (cfg, cfg->ret->dreg + 1);
+ MonoInst *high = get_vreg_to_inst (cfg, cfg->ret->dreg + 2);
+
+ low->opcode = OP_REGVAR;
+ low->dreg = cinfo->ret.reg + 1;
+ high->opcode = OP_REGVAR;
+ high->dreg = cinfo->ret.reg;
+ }
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
break;
#endif
/* FIXME: Allocate volatile arguments to registers */
+ /* FIXME: This makes the argument holding a vtype address into volatile */
if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
inreg = FALSE;
switch (storage) {
case ArgInIReg:
- case ArgInIRegPair:
inst->opcode = OP_REGVAR;
inst->dreg = sparc_i0 + ainfo->reg;
break;
+ case ArgInIRegPair:
+ if (cfg->new_ir && (inst->type == STACK_I8)) {
+ MonoInst *low = get_vreg_to_inst (cfg, inst->dreg + 1);
+ MonoInst *high = get_vreg_to_inst (cfg, inst->dreg + 2);
+
+ low->opcode = OP_REGVAR;
+ low->dreg = sparc_i0 + ainfo->reg + 1;
+ high->opcode = OP_REGVAR;
+ high->dreg = sparc_i0 + ainfo->reg;
+ }
+ inst->opcode = OP_REGVAR;
+ inst->dreg = sparc_i0 + ainfo->reg;
+ break;
case ArgInFloatReg:
case ArgInDoubleReg:
/*
MonoInst *indir;
MONO_INST_NEW (cfg, indir, 0);
*indir = *inst;
- inst->opcode = OP_VTARG_ADDR;
+ if (cfg->new_ir)
+ inst->opcode = OP_VTARG_ADDR;
+ else
+ inst->opcode = OP_SPARC_INARG_VT;
inst->inst_left = indir;
}
}
sig_arg->inst_p0 = tmp_sig;
arg->inst_left = sig_arg;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
/*
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
- MONO_INST_LIST_ADD_TAIL (&arg->node, &call->out_args);
+ /* prepend, we'll need to reverse them later */
+ arg->next = call->out_args;
+ call->out_args = arg;
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
MonoInst *inst;
emit_sig_cookie (cfg, call, cinfo);
}
+ /*
+ * Reverse the call->out_args list.
+ */
+ {
+ MonoInst *prev = NULL, *list = call->out_args, *next;
+ while (list) {
+ next = list->next;
+ list->next = prev;
+ prev = list;
+ list = next;
+ }
+ call->out_args = prev;
+ }
call->stack_usage = cinfo->stack_usage + extra_space;
call->out_ireg_args = NULL;
call->out_freg_args = NULL;
return call;
}
+/* FIXME: Remove these later */
+#define NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { \
+ MONO_INST_NEW ((cfg), (dest), (op)); \
+ (dest)->dreg = (dr); \
+ (dest)->inst_basereg = (base); \
+ (dest)->inst_offset = (offset); \
+ (dest)->type = STACK_I4; \
+ } while (0)
+
+#define EMIT_NEW_LOAD_MEMBASE(cfg,dest,op,dr,base,offset) do { NEW_LOAD_MEMBASE ((cfg), (dest), (op), (dr), (base), (offset)); MONO_ADD_INS ((cfg)->cbb, (dest)); } while (0)
+
+#undef MONO_EMIT_NEW_STORE_MEMBASE_IMM
+#define MONO_EMIT_NEW_STORE_MEMBASE_IMM(cfg,op,base,offset,imm) do { \
+ MonoInst *inst; \
+ MONO_INST_NEW ((cfg), (inst), (op)); \
+ inst->inst_destbasereg = base; \
+ inst->inst_offset = offset; \
+ inst->inst_p1 = (gpointer)(gssize)imm; \
+ MONO_ADD_INS ((cfg)->cbb, inst); \
+ } while (0)
+
+static void
+add_outarg_reg2 (MonoCompile *cfg, MonoCallInst *call, ArgStorage storage, int reg, guint32 sreg)
+{
+ MonoInst *arg;
+
+ MONO_INST_NEW (cfg, arg, 0);
+
+ arg->sreg1 = sreg;
+
+ switch (storage) {
+ case ArgInIReg:
+ arg->opcode = OP_MOVE;
+ arg->dreg = mono_alloc_ireg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, FALSE);
+ break;
+ case ArgInFloatReg:
+ arg->opcode = OP_FMOVE;
+ arg->dreg = mono_alloc_freg (cfg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, arg->dreg, reg, TRUE);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ MONO_ADD_INS (cfg->cbb, arg);
+}
+
+static void
+add_outarg_load (MonoCompile *cfg, MonoCallInst *call, int opcode, int basereg, int offset, int reg)
+{
+ MonoInst *arg;
+ int dreg = mono_alloc_ireg (cfg);
+
+ EMIT_NEW_LOAD_MEMBASE (cfg, arg, OP_LOAD_MEMBASE, dreg, sparc_sp, offset);
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, dreg, reg, FALSE);
+}
+
+static void
+emit_pass_long (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIRegPair:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg + 1, in->dreg + 1);
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
+ break;
+ case ArgOnStackPair:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset, in->dreg + 2);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
+ break;
+ case ArgInSplitRegStack:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg + 2);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, offset + 4, in->dreg + 1);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_double (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIRegPair:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+
+ /* Load into a register pair */
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset + 4, sparc_o0 + ainfo->reg + 1);
+ break;
+ case ArgOnStackPair:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ break;
+ case ArgInSplitRegStack:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ /* Load most significant word into register */
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_float (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ /* floating-point <-> integer transfer must go through memory */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ add_outarg_load (cfg, call, OP_LOADI4_MEMBASE, sparc_sp, offset, sparc_o0 + ainfo->reg);
+ break;
+ case ArgOnStack:
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, sparc_sp, offset, in->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in);
+
+static void
+emit_pass_vtype (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in, gboolean pinvoke)
+{
+ MonoInst *arg;
+ guint32 align, offset, pad, size;
+
+ if (arg_type->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else if (pinvoke)
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ else {
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (in->klass, &align);
+ }
+
+ /* The first 6 argument locations are reserved */
+ if (cinfo->stack_usage < 6 * sizeof (gpointer))
+ cinfo->stack_usage = 6 * sizeof (gpointer);
+
+ offset = ALIGN_TO ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage, align);
+ pad = offset - ((ARGS_OFFSET - STACK_BIAS) + cinfo->stack_usage);
+
+ cinfo->stack_usage += size;
+ cinfo->stack_usage += pad;
+
+ /*
+ * We use OP_OUTARG_VT to copy the valuetype to a stack location, then
+ * use the normal OUTARG opcodes to pass the address of the location to
+ * the callee.
+ */
+ if (size > 0) {
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+ ((ArgInfo*)(arg->inst_p1))->offset = STACK_BIAS + offset;
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ MONO_INST_NEW (cfg, arg, OP_ADD_IMM);
+ arg->dreg = mono_alloc_preg (cfg);
+ arg->sreg1 = sparc_sp;
+ arg->inst_imm = STACK_BIAS + offset;
+ MONO_ADD_INS (cfg->cbb, arg);
+
+ emit_pass_other (cfg, call, ainfo, NULL, arg);
+ }
+}
+
+static void
+emit_pass_other (MonoCompile *cfg, MonoCallInst *call, ArgInfo *ainfo, MonoType *arg_type, MonoInst *in)
+{
+ int offset = ARGS_OFFSET + ainfo->offset;
+ int opcode;
+
+ switch (ainfo->storage) {
+ case ArgInIReg:
+ add_outarg_reg2 (cfg, call, ArgInIReg, sparc_o0 + ainfo->reg, in->dreg);
+ break;
+ case ArgOnStack:
+#ifdef SPARCV9
+ NOT_IMPLEMENTED;
+#else
+ if (offset & 0x1)
+ opcode = OP_STOREI1_MEMBASE_REG;
+ else if (offset & 0x2)
+ opcode = OP_STOREI2_MEMBASE_REG;
+ else
+ opcode = OP_STOREI4_MEMBASE_REG;
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, opcode, sparc_sp, offset, in->dreg);
+#endif
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+}
+
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmp_sig;
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+ /* We allways pass the signature on the stack for simplicity */
+ MONO_EMIT_NEW_STORE_MEMBASE_IMM (cfg, OP_STORE_MEMBASE_IMM, sparc_sp, ARGS_OFFSET + cinfo->sig_cookie.offset, tmp_sig);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *in;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
+ guint32 extra_space = 0;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg, sig, sig->pinvoke);
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ /* Set the 'struct/union return pointer' location on the stack */
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STOREI4_MEMBASE_REG, sparc_sp, 64, call->vret_var->dreg);
+ }
+
+ for (i = 0; i < n; ++i) {
+ MonoType *arg_type;
+
+ ainfo = cinfo->args + i;
+
+ if ((sig->call_convention == MONO_CALL_VARARG) && (i == sig->sentinelpos)) {
+ /* Emit the signature cookie just before the first implicit argument */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ in = call->args [i];
+
+ if (sig->hasthis && (i == 0))
+ arg_type = &mono_defaults.object_class->byval_arg;
+ else
+ arg_type = sig->params [i - sig->hasthis];
+
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis])))
+ emit_pass_vtype (cfg, call, cinfo, ainfo, arg_type, in, sig->pinvoke);
+ else if (!arg_type->byref && ((arg_type->type == MONO_TYPE_I8) || (arg_type->type == MONO_TYPE_U8)))
+ emit_pass_long (cfg, call, ainfo, in);
+ else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R8))
+ emit_pass_double (cfg, call, ainfo, in);
+ else if (!arg_type->byref && (arg_type->type == MONO_TYPE_R4))
+ emit_pass_float (cfg, call, ainfo, in);
+ else
+ emit_pass_other (cfg, call, ainfo, arg_type, in);
+ }
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ call->stack_usage = cinfo->stack_usage + extra_space;
+
+ g_free (cinfo);
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
+ int size = ins->backend.size;
+
+ mini_emit_memcpy2 (cfg, sparc_sp, ainfo->offset, src->dreg, 0, size, 0);
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ CallInfo *cinfo = get_call_info (cfg, mono_method_signature (method), FALSE);
+
+ switch (cinfo->ret.storage) {
+ case ArgInIReg:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+ break;
+ case ArgInIRegPair:
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg + 2);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg + 1, val->dreg + 1);
+ break;
+ case ArgInFReg:
+ if (mono_method_signature (method)->ret->type == MONO_TYPE_R4)
+ MONO_EMIT_NEW_UNALU (cfg, OP_SETFRET, cfg->ret->dreg, val->dreg);
+ else
+ MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ g_assert (cinfo);
+}
+
int cond_to_sparc_cond [][3] = {
{sparc_be, sparc_be, sparc_fbe},
{sparc_bne, sparc_bne, 0},
void
mono_arch_peephole_pass_2 (MonoCompile *cfg, MonoBasicBlock *bb)
{
- MonoInst *ins, *n;
+ MonoInst *ins, *n, *last_ins = NULL;
+ ins = bb->code;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
-
switch (ins->opcode) {
case OP_MUL_IMM:
/* remove unnecessary multiplication with 1 */
if (v64 && (mono_method_get_header (cfg->method)->code_size < 10000) && last_ins &&
(last_ins->opcode == OP_COMPARE_IMM) &&
(last_ins->inst_imm == 0)) {
+ MonoInst *next = ins->next;
switch (ins->opcode) {
case OP_IBEQ:
ins->opcode = OP_SPARC_BRZ;
default:
g_assert_not_reached ();
}
- last_ins->data = ins->data;
- last_ins->opcode = ins->opcode;
- last_ins->type = ins->type;
- last_ins->ssa_op = ins->ssa_op;
- last_ins->flags = ins->flags;
- last_ins->dreg = ins->dreg;
- last_ins->sreg2 = ins->sreg2;
- last_ins->backend = ins->backend;
- last_ins->klass = ins->klass;
- last_ins->cil_code = ins->cil_code;
+ ins->sreg1 = last_ins->sreg1;
+ *last_ins = *ins;
MONO_DELETE_INS (bb, ins);
continue;
}
}
break;
}
+ last_ins = ins;
+ ins = ins->next;
}
+ bb->last_ins = last_ins;
}
void
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE:
break;
default:
NOT_IMPLEMENTED;
ArgInfo *ainfo = cinfo->args + i;
gint32 stack_offset;
MonoType *arg_type;
+
inst = cfg->args [i];
if (sig->hasthis && (i == 0))
MonoCallInst *call;
guint offset;
guint32 *code = (guint32*)(cfg->native_code + cfg->code_len);
+ MonoInst *last_ins = NULL;
int max_len, cpos;
const char *spec;
sparc_sll_imm (code, ins->sreg1, 16, sparc_o7);
sparc_srl_imm (code, sparc_o7, 16, ins->dreg);
break;
+ case OP_LCONV_TO_OVF_U4:
case OP_ICONV_TO_OVF_U4:
/* Only used on V9 */
sparc_cmp_imm (code, ins->sreg1, 0);
sparc_nop (code);
sparc_mov_reg_reg (code, ins->sreg1, ins->dreg);
break;
+ case OP_LCONV_TO_OVF_I4_UN:
case OP_ICONV_TO_OVF_I4_UN:
/* Only used on V9 */
NOT_IMPLEMENTED;
sparc_wry (code, sparc_g0, sparc_g0);
sparc_udiv (code, FALSE, ins->sreg1, ins->sreg2, ins->dreg);
break;
- case OP_DIV_IMM: {
+ case OP_DIV_IMM:
+ case OP_IDIV_IMM: {
int i, imm;
/* Transform division into a shift */
}
break;
}
+ case OP_IDIV_UN_IMM:
+ sparc_wry (code, sparc_g0, sparc_g0);
+ EMIT_ALU_IMM (ins, udiv, FALSE);
+ break;
case OP_IREM:
/* Sign extend sreg1 into %y */
sparc_sra_imm (code, ins->sreg1, 31, sparc_o7);
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
sparc_set_template (code, ins->dreg);
break;
+ case OP_JUMP_TABLE:
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ sparc_set_template (code, ins->dreg);
+ break;
case OP_ICONV_TO_I4:
case OP_ICONV_TO_U4:
case OP_MOVE:
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
break;
case OP_LOCALLOC: {
guint32 size_reg;
+ gint32 offset2;
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
/* Perform stack touching */
#endif
/* Keep alignment */
- sparc_add_imm (code, FALSE, ins->sreg1, MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
+ /* Add 4 to compensate for the rounding of localloc_offset */
+ sparc_add_imm (code, FALSE, ins->sreg1, 4 + MONO_ARCH_LOCALLOC_ALIGNMENT - 1, ins->dreg);
sparc_set (code, ~(MONO_ARCH_LOCALLOC_ALIGNMENT - 1), sparc_o7);
sparc_and (code, FALSE, ins->dreg, sparc_o7, ins->dreg);
sparc_sub (code, FALSE, sparc_sp, ins->dreg, ins->dreg);
/* Keep %sp valid at all times */
sparc_mov_reg_reg (code, ins->dreg, sparc_sp);
- g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
- sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
+ /* Round localloc_offset too so the result is at least 8 aligned */
+ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
+ g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
+ sparc_add_imm (code, FALSE, ins->dreg, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
if (ins->flags & MONO_INST_INIT) {
guint32 *br [3];
NOT_IMPLEMENTED;
#endif
- offset = ALIGN_TO (offset, MONO_ARCH_LOCALLOC_ALIGNMENT);
+ /* To compensate for the rounding of localloc_offset */
+ offset += sizeof (gpointer);
+ offset = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
if (sparc_is_imm13 (offset))
sparc_sub_imm (code, FALSE, sparc_sp, offset, sparc_sp);
else {
sparc_set (code, offset, sparc_o7);
sparc_sub (code, FALSE, sparc_sp, sparc_o7, sparc_sp);
}
- g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset));
- sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + cfg->arch.localloc_offset, ins->dreg);
+ /* Round localloc_offset too so the result is at least 8 aligned */
+ offset2 = ALIGN_TO (cfg->arch.localloc_offset, 8);
+ g_assert (sparc_is_imm13 (MONO_SPARC_STACK_BIAS + offset2));
+ sparc_add_imm (code, FALSE, sparc_sp, MONO_SPARC_STACK_BIAS + offset2, ins->dreg);
if ((ins->flags & MONO_INST_INIT) && (offset > 0)) {
guint32 *br [2];
int i;
case OP_LABEL:
ins->inst_c0 = (guint8*)code - cfg->native_code;
break;
+ case OP_NOP:
+ case OP_DUMMY_USE:
+ case OP_DUMMY_STORE:
+ case OP_NOT_REACHED:
+ case OP_NOT_NULL:
+ break;
case OP_BR:
- if ((ins->inst_target_bb == bb->next_bb) &&
- ins->node.next == &bb->ins_list)
+ //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
+ if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
break;
if (ins->flags & MONO_INST_BRLABEL) {
if (ins->inst_i0->inst_c0) {
case OP_COND_EXC_NO:
case OP_COND_EXC_C:
case OP_COND_EXC_NC:
+ case OP_COND_EXC_IEQ:
+ case OP_COND_EXC_INE_UN:
+ case OP_COND_EXC_ILT:
+ case OP_COND_EXC_ILT_UN:
+ case OP_COND_EXC_IGT:
+ case OP_COND_EXC_IGT_UN:
+ case OP_COND_EXC_IGE:
+ case OP_COND_EXC_IGE_UN:
+ case OP_COND_EXC_ILE:
+ case OP_COND_EXC_ILE_UN:
+ case OP_COND_EXC_IOV:
+ case OP_COND_EXC_INO:
+ case OP_COND_EXC_IC:
+ case OP_COND_EXC_INC:
+#ifdef SPARCV9
+ NOT_IMPLEMENTED;
+#else
EMIT_COND_SYSTEM_EXCEPTION (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1);
+#endif
break;
case OP_SPARC_COND_EXC_EQZ:
EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brz, ins->inst_p1);
case OP_SPARC_COND_EXC_NEZ:
EMIT_COND_SYSTEM_EXCEPTION_BPR (ins, brnz, ins->inst_p1);
break;
- case OP_COND_EXC_IOV:
- case OP_COND_EXC_IC:
- EMIT_COND_SYSTEM_EXCEPTION_GENERAL (ins, opcode_to_sparc_cond (ins->opcode), ins->inst_p1, TRUE, sparc_icc_short);
- break;
+
case OP_IBEQ:
case OP_IBNE_UN:
case OP_IBLT:
}
break;
}
- case OP_LCONV_TO_OVF_I: {
+ case OP_FCONV_TO_I8:
+ case OP_FCONV_TO_U8:
+ /* Emulated */
+ g_assert_not_reached ();
+ break;
+ case OP_FCONV_TO_R4:
+ /* FIXME: Change precision ? */
+#ifdef SPARCV9
+ sparc_fmovd (code, ins->sreg1, ins->dreg);
+#else
+ sparc_fmovs (code, ins->sreg1, ins->dreg);
+ sparc_fmovs (code, ins->sreg1 + 1, ins->dreg + 1);
+#endif
+ break;
+ case OP_LCONV_TO_R_UN: {
+ /* Emulated */
+ g_assert_not_reached ();
+ break;
+ }
+ case OP_LCONV_TO_OVF_I:
+ case OP_LCONV_TO_OVF_I4_2: {
guint32 *br [3], *label [1];
/*
}
cpos += max_len;
+
+ last_ins = ins;
}
cfg->code_len = (guint8*)code - cfg->native_code;
if (cfg->bb_exit->in_count == 1 && cfg->bb_exit->in_bb[0]->native_offset != cfg->bb_exit->native_offset)
can_fold = 1;
+ if (cfg->new_ir) {
+ /*
+ * FIXME: The last instruction might have a branch pointing into it like in
+ * int_ceq sparc_i0 <-
+ */
+ can_fold = 0;
+ }
+
/* Try folding last instruction into the restore */
if (can_fold && (sparc_inst_op (code [-2]) == 0x2) && (sparc_inst_op3 (code [-2]) == 0x2) && sparc_inst_imm (code [-2]) && (sparc_inst_rd (code [-2]) == sparc_i0)) {
/* or reg, imm, %i0 */
return ins;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+
+ return ins;
+}
+
/*
* mono_arch_get_argument_info:
* @csig: a method signature
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
- guint32 stack_align_amount;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
case ArgInIReg:
cfg->ret->opcode = OP_REGVAR;
cfg->ret->inst_c0 = cinfo->ret.reg;
+ cfg->ret->dreg = cinfo->ret.reg;
break;
case ArgNone:
case ArgOnFloatFpStack:
}
}
-/* Fixme: we need an alignment solution for enter_method and mono_arch_call_opcode,
- * currently alignment in mono_arch_call_opcode is computed without arch_get_argument_info
- */
-
static void
emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call)
{
arg->inst_left = sig_arg;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
}
/*
arg->cil_code = in->cil_code;
arg->inst_left = in;
arg->type = in->type;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
- guint32 size, align;
+ gint align;
+ guint32 ialign;
+ guint32 size;
if (t->type == MONO_TYPE_TYPEDBYREF) {
size = sizeof (MonoTypedRef);
}
else
if (sig->pinvoke)
- size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &ialign);
else {
int ialign;
size = mini_type_stack_size (cfg->generic_sharing_context, &in->klass->byval_arg, &ialign);
zero_inst->inst_p0 = 0;
arg->inst_left = zero_inst;
arg->type = STACK_PTR;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
- } else {
+ /* prepend, so they get reversed */
+ arg->next = call->out_args;
+ call->out_args = arg;
+ }
+ else
/* if the function returns a struct, the called method already does a ret $0x4 */
if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret))
cinfo->stack_usage -= 4;
- }
}
-
+
call->stack_usage = cinfo->stack_usage;
#if defined(__APPLE__)
if (cinfo->need_stack_align) {
MONO_INST_NEW (cfg, arg, OP_X86_OUTARG_ALIGN_STACK);
arg->inst_c0 = cinfo->stack_align_amount;
- MONO_INST_LIST_ADD (&arg->node, &call->out_args);
+ arg->next = call->out_args;
+ call->out_args = arg;
}
#endif
return call;
}
+static void
+emit_sig_cookie2 (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
+{
+ MonoMethodSignature *tmp_sig;
+
+ /* FIXME: Add support for signature tokens to AOT */
+ cfg->disable_aot = TRUE;
+
+ /*
+ * mono_ArgIterator_Setup assumes the signature cookie is
+ * passed first and all the arguments which were before it are
+ * passed on the stack after the signature. So compensate by
+ * passing a different signature.
+ */
+ tmp_sig = mono_metadata_signature_dup (call->signature);
+ tmp_sig->param_count -= call->signature->sentinelpos;
+ tmp_sig->sentinelpos = 0;
+ memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
+
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_X86_PUSH_IMM, -1, -1, tmp_sig);
+}
+
+void
+mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
+{
+ MonoInst *arg, *in;
+ MonoMethodSignature *sig;
+ int i, n;
+ CallInfo *cinfo;
+ int sentinelpos = 0;
+
+ sig = call->signature;
+ n = sig->param_count + sig->hasthis;
+
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
+ sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ MonoInst *vtarg;
+
+ if (cinfo->ret.storage == ArgValuetypeInReg) {
+ if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
+ /*
+ * Tell the JIT to use a more efficient calling convention: call using
+ * OP_CALL, compute the result location after the call, and save the
+ * result there.
+ */
+ call->vret_in_reg = TRUE;
+ } else {
+ /*
+ * The valuetype is in EAX:EDX after the call, needs to be copied to
+ * the stack. Save the address here, so the call instruction can
+ * access it.
+ */
+ MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
+ vtarg->sreg1 = call->vret_var->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+ }
+ }
+
+#if defined(__APPLE__)
+ if (cinfo->need_stack_align) {
+ MONO_INST_NEW (cfg, arg, OP_SUB_IMM);
+ arg->dreg = X86_ESP;
+ arg->sreg1 = X86_ESP;
+ arg->inst_imm = cinfo->stack_align_amount;
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+#endif
+
+ /* Handle the case where there are no implicit arguments */
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sentinelpos)) {
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+
+ /* Arguments are pushed in the reverse order */
+ for (i = n - 1; i >= 0; i --) {
+ ArgInfo *ainfo = cinfo->args + i;
+ MonoType *t;
+
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+ t = mono_type_get_underlying_type (t);
+
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
+
+ in = call->args [i];
+ arg->cil_code = in->cil_code;
+ arg->sreg1 = in->dreg;
+ arg->type = in->type;
+
+ g_assert (in->dreg != -1);
+
+ if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(t))) {
+ gint align;
+ guint32 ialign;
+ guint32 size;
+
+ g_assert (in->klass);
+
+ if (t->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else
+ if (sig->pinvoke) {
+ size = mono_type_native_stack_size (&in->klass->byval_arg, &ialign);
+ align = ialign;
+ } else {
+ size = mono_type_stack_size (&in->klass->byval_arg, &align);
+ }
+
+ if (size > 0) {
+ arg->opcode = OP_OUTARG_VT;
+ arg->sreg1 = in->dreg;
+ arg->klass = in->klass;
+ arg->backend.size = size;
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+ }
+ else {
+ switch (ainfo->storage) {
+ case ArgOnStack:
+ arg->opcode = OP_X86_PUSH;
+ if (!t->byref) {
+ if (t->type == MONO_TYPE_R4) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 4);
+ arg->opcode = OP_STORER4_MEMBASE_REG;
+ arg->inst_destbasereg = X86_ESP;
+ arg->inst_offset = 0;
+ } else if (t->type == MONO_TYPE_R8) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+ arg->opcode = OP_STORER8_MEMBASE_REG;
+ arg->inst_destbasereg = X86_ESP;
+ arg->inst_offset = 0;
+ } else if (t->type == MONO_TYPE_I8 || t->type == MONO_TYPE_U8) {
+ arg->sreg1 ++;
+ MONO_EMIT_NEW_UNALU (cfg, OP_X86_PUSH, -1, in->dreg + 2);
+ }
+ }
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+
+ if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (i == sentinelpos)) {
+ /* Emit the signature cookie just before the implicit arguments */
+ emit_sig_cookie2 (cfg, call, cinfo);
+ }
+ }
+
+ if (sig->ret && MONO_TYPE_ISSTRUCT (sig->ret)) {
+ MonoInst *vtarg;
+
+ if (cinfo->ret.storage == ArgValuetypeInReg) {
+ /* Already done */
+ }
+ else if (cinfo->ret.storage == ArgInIReg) {
+ NOT_IMPLEMENTED;
+ /* The return address is passed in a register */
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->inst.dreg;
+ vtarg->dreg = mono_regstate_next_int (cfg->rs);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ } else {
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
+ vtarg->type = STACK_MP;
+ vtarg->sreg1 = call->vret_var->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
+ /* if the function returns a struct, the called method already does a ret $0x4 */
+ cinfo->stack_usage -= 4;
+ }
+
+ call->stack_usage = cinfo->stack_usage;
+}
+
+void
+mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
+{
+ MonoInst *arg;
+ int size = ins->backend.size;
+
+ if (size <= 4) {
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
+ arg->sreg1 = src->dreg;
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ } else if (size <= 20) {
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 4));
+ mini_emit_memcpy2 (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
+ } else {
+ MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
+ arg->inst_basereg = src->dreg;
+ arg->inst_offset = 0;
+ arg->inst_imm = size;
+
+ MONO_ADD_INS (cfg->cbb, arg);
+ }
+}
+
+void
+mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
+{
+ MonoType *ret = mono_type_get_underlying_type (mono_method_signature (method)->ret);
+
+ if (!ret->byref) {
+ if (ret->type == MONO_TYPE_R4) {
+ /* Nothing to do */
+ return;
+ } else if (ret->type == MONO_TYPE_R8) {
+ /* Nothing to do */
+ return;
+ } else if (ret->type == MONO_TYPE_I8 || ret->type == MONO_TYPE_U8) {
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EAX, val->dreg + 1);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, X86_EDX, val->dreg + 2);
+ return;
+ }
+ }
+
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, cfg->ret->dreg, val->dreg);
+}
+
/*
* Allow tracing to work with this interface (with an optional argument)
*/
MonoInst *ins, *n;
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
- MonoInst *last_ins = mono_inst_list_prev (&ins->node, &bb->ins_list);
+ MonoInst *last_ins = ins->prev;
+
switch (ins->opcode) {
case OP_IADD_IMM:
case OP_ADD_IMM:
MONO_BB_FOR_EACH_INS_SAFE (bb, n, ins) {
switch (ins->opcode) {
- case OP_ICONST: {
- MonoInst *next;
-
+ case OP_ICONST:
/* reg = 0 -> XOR (reg, reg) */
/* XOR sets cflags on x86, so we cant do it always */
- next = mono_inst_list_next (&ins->node, &bb->ins_list);
- if (ins->inst_c0 == 0 && (!next ||
- (next && INST_IGNORES_CFLAGS (next->opcode)))) {
+ if (ins->inst_c0 == 0 && (!ins->next || (ins->next && INST_IGNORES_CFLAGS (ins->next->opcode)))) {
MonoInst *ins2;
ins->opcode = OP_IXOR;
* Convert succeeding STORE_MEMBASE_IMM 0 ins to STORE_MEMBASE_REG
* since it takes 3 bytes instead of 7.
*/
- for (ins2 = mono_inst_list_next (&ins->node, &bb->ins_list); ins2;
- ins2 = mono_inst_list_next (&ins2->node, &bb->ins_list)) {
+ for (ins2 = ins->next; ins2; ins2 = ins2->next) {
if ((ins2->opcode == OP_STORE_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
ins2->opcode = OP_STORE_MEMBASE_REG;
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
+ }
+ else if ((ins2->opcode == OP_STOREI4_MEMBASE_IMM) && (ins2->inst_imm == 0)) {
ins2->opcode = OP_STOREI4_MEMBASE_REG;
ins2->sreg1 = ins->dreg;
- } else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
+ }
+ else if ((ins2->opcode == OP_STOREI1_MEMBASE_IMM) || (ins2->opcode == OP_STOREI2_MEMBASE_IMM)) {
/* Continue iteration */
- } else
+ }
+ else
break;
}
}
break;
- }
case OP_IADD_IMM:
case OP_ADD_IMM:
if ((ins->inst_imm == 1) && (ins->dreg == ins->sreg1))
}
}
+/*
+ * mono_arch_lowering_pass:
+ *
+ * Converts complex opcodes into simpler ones so that each IR instruction
+ * corresponds to one machine instruction.
+ */
void
mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
{
+ MonoInst *ins, *next;
+
+ if (bb->max_vreg > cfg->rs->next_vreg)
+ cfg->rs->next_vreg = bb->max_vreg;
+
+ /*
+ * FIXME: Need to add more instructions, but the current machine
+ * description can't model some parts of the composite instructions like
+ * cdq.
+ */
+ MONO_BB_FOR_EACH_INS_SAFE (bb, next, ins) {
+ switch (ins->opcode) {
+ case OP_IREM_IMM:
+ case OP_IDIV_IMM:
+ case OP_IDIV_UN_IMM:
+ case OP_IREM_UN_IMM:
+ /*
+ * Keep the cases where we could generated optimized code, otherwise convert
+ * to the non-imm variant.
+ */
+ if ((ins->opcode == OP_IREM_IMM) && mono_is_power_of_two (ins->inst_imm) >= 0)
+ break;
+ mono_decompose_op_imm (cfg, bb, ins);
+ break;
+ default:
+ break;
+ }
+ }
+
+ bb->max_vreg = cfg->rs->next_vreg;
}
static const int
case OP_VCALL:
case OP_VCALL_REG:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2:
+ case OP_VCALL2_REG:
+ case OP_VCALL2_MEMBASE:
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature, FALSE);
if (cinfo->ret.storage == ArgValuetypeInReg) {
/* Pop the destination address from the stack */
x86_mov_mem_imm (code, ins->inst_p0, ins->inst_c0, 4);
break;
case OP_LOADU4_MEM:
- x86_mov_reg_imm (code, ins->dreg, ins->inst_p0);
- x86_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
+ if (cfg->new_ir)
+ x86_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
+ else
+ x86_mov_reg_mem (code, ins->dreg, ins->inst_p0, 4);
break;
case OP_LOAD_MEM:
case OP_LOADI4_MEM:
x86_cdq (code);
x86_div_reg (code, ins->sreg2, TRUE);
break;
- case OP_REM_IMM:
- x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
- x86_cdq (code);
- x86_div_reg (code, ins->sreg2, TRUE);
+ case OP_IREM_IMM: {
+ int power = mono_is_power_of_two (ins->inst_imm);
+
+ g_assert (ins->sreg1 == X86_EAX);
+ g_assert (ins->dreg == X86_EAX);
+ g_assert (power >= 0);
+
+ if (power == 1) {
+ /* Based on http://compilers.iecc.com/comparch/article/93-04-079 */
+ x86_cdq (code);
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, 1);
+ /*
+ * If the divident is >= 0, this does not nothing. If it is positive, it
+ * it transforms %eax=0 into %eax=0, and %eax=1 into %eax=-1.
+ */
+ x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EDX);
+ x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
+ } else {
+ /* Based on gcc code */
+
+ /* Add compensation for negative dividents */
+ x86_cdq (code);
+ x86_shift_reg_imm (code, X86_SHR, X86_EDX, 32 - power);
+ x86_alu_reg_reg (code, X86_ADD, X86_EAX, X86_EDX);
+ /* Compute remainder */
+ x86_alu_reg_imm (code, X86_AND, X86_EAX, (1 << power) - 1);
+ /* Remove compensation */
+ x86_alu_reg_reg (code, X86_SUB, X86_EAX, X86_EDX);
+ }
break;
+ }
case OP_IOR:
x86_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
break;
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
x86_mov_reg_imm (code, ins->dreg, 0);
break;
+ case OP_JUMP_TABLE:
+ mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
+ x86_mov_reg_imm (code, ins->dreg, 0);
+ break;
case OP_LOAD_GOTADDR:
x86_call_imm (code, 0);
/*
case OP_FCALL:
case OP_LCALL:
case OP_VCALL:
+ case OP_VCALL2:
case OP_VOIDCALL:
case OP_CALL:
call = (MonoCallInst*)ins;
case OP_FCALL_REG:
case OP_LCALL_REG:
case OP_VCALL_REG:
+ case OP_VCALL2_REG:
case OP_VOIDCALL_REG:
case OP_CALL_REG:
call = (MonoCallInst*)ins;
case OP_FCALL_MEMBASE:
case OP_LCALL_MEMBASE:
case OP_VCALL_MEMBASE:
+ case OP_VCALL2_MEMBASE:
case OP_VOIDCALL_MEMBASE:
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
(gpointer)"mono_arch_rethrow_exception");
break;
}
- case OP_CALL_HANDLER:
- /* Align stack */
-#ifdef __APPLE__
- x86_alu_reg_imm (code, X86_SUB, X86_ESP, 12);
+ case OP_CALL_HANDLER:
+#if __APPLE__
+ x86_alu_reg_imm (code, X86_SUB, X86_ESP, 8);
#endif
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
x86_call_imm (code, 0);
EMIT_COND_BRANCH (ins, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)]);
break;
+ case OP_CMOV_IEQ:
+ case OP_CMOV_IGE:
+ case OP_CMOV_IGT:
+ case OP_CMOV_ILE:
+ case OP_CMOV_ILT:
+ case OP_CMOV_INE_UN:
+ case OP_CMOV_IGE_UN:
+ case OP_CMOV_IGT_UN:
+ case OP_CMOV_ILE_UN:
+ case OP_CMOV_ILT_UN:
+ g_assert (ins->dreg == ins->sreg1);
+ x86_cmov_reg (code, cc_table [mono_opcode_to_cond (ins->opcode)], cc_signed_table [mono_opcode_to_cond (ins->opcode)], ins->dreg, ins->sreg2);
+ break;
+
/* floating point opcodes */
case OP_R8CONST: {
double d = *(double *)ins->inst_p0;
x86_fldcw_membase (code, X86_ESP, 0);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
break;
- case OP_LCONV_TO_R_UN: {
+ case OP_LCONV_TO_R8_2:
+ x86_push_reg (code, ins->sreg2);
+ x86_push_reg (code, ins->sreg1);
+ x86_fild_membase (code, X86_ESP, 0, TRUE);
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+ break;
+ case OP_LCONV_TO_R4_2:
+ x86_push_reg (code, ins->sreg2);
+ x86_push_reg (code, ins->sreg1);
+ x86_fild_membase (code, X86_ESP, 0, TRUE);
+ /* Change precision */
+ x86_fst_membase (code, X86_ESP, 0, FALSE, TRUE);
+ x86_fld_membase (code, X86_ESP, 0, FALSE);
+ x86_alu_reg_imm (code, X86_ADD, X86_ESP, 8);
+ break;
+ case OP_LCONV_TO_R_UN:
+ case OP_LCONV_TO_R_UN_2: {
static guint8 mn[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x3f, 0x40 };
guint8 *br;
break;
}
- case OP_LCONV_TO_OVF_I: {
+ case OP_LCONV_TO_OVF_I:
+ case OP_LCONV_TO_OVF_I4_2: {
guint8 *br [3], *label [1];
MonoInst *tins;
/* FIXME: Add a separate key for LMF to avoid this */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
#endif
- } else {
+ }
+ else {
g_assert (!cfg->compile_aot);
x86_push_imm (code, cfg->domain);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
{
}
+gboolean
+mono_arch_is_inst_imm (gint64 imm)
+{
+ return TRUE;
+}
+
/*
* Support for fast access to the thread-local lmf structure using the GS
* segment register on NPTL + kernel 2.6.x.
return ins;
}
+MonoInst*
+mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+{
+ MonoInst *ins = NULL;
+ int opcode = 0;
+
+ if (cmethod->klass == mono_defaults.math_class) {
+ if (strcmp (cmethod->name, "Sin") == 0) {
+ opcode = OP_SIN;
+ } else if (strcmp (cmethod->name, "Cos") == 0) {
+ opcode = OP_COS;
+ } else if (strcmp (cmethod->name, "Tan") == 0) {
+ opcode = OP_TAN;
+ } else if (strcmp (cmethod->name, "Atan") == 0) {
+ opcode = OP_ATAN;
+ } else if (strcmp (cmethod->name, "Sqrt") == 0) {
+ opcode = OP_SQRT;
+ } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
+ opcode = OP_ABS;
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = STACK_R8;
+ ins->dreg = mono_alloc_freg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+
+ if (cfg->opt & MONO_OPT_CMOV) {
+ int opcode = 0;
+
+ if (strcmp (cmethod->name, "Min") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMIN;
+ } else if (strcmp (cmethod->name, "Max") == 0) {
+ if (fsig->params [0]->type == MONO_TYPE_I4)
+ opcode = OP_IMAX;
+ }
+
+ if (opcode) {
+ MONO_INST_NEW (cfg, ins, opcode);
+ ins->type = STACK_I4;
+ ins->dreg = mono_alloc_ireg (cfg);
+ ins->sreg1 = args [0]->dreg;
+ ins->sreg2 = args [1]->dreg;
+ MONO_ADD_INS (cfg->cbb, ins);
+ }
+ }
+
+#if 0
+ /* OP_FREM is not IEEE compatible */
+ else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
+ MONO_INST_NEW (cfg, ins, OP_FREM);
+ ins->inst_i0 = args [0];
+ ins->inst_i1 = args [1];
+ }
+#endif
+ }
+
+ return ins;
+}
gboolean
mono_arch_print_tree (MonoInst *tree, int arity)
MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
{
MonoInst* ins;
-
+
+ return NULL;
+
if (appdomain_tls_offset == -1)
return NULL;
/* we should lower this size and make sure we don't call heavy stack users in the segv handler */
#define MONO_ARCH_SIGNAL_STACK_SIZE (16 * 1024)
-/* Enables OP_LSHL, OP_LSHL_IMM, OP_LSHR, OP_LSHR_IMM, OP_LSHR_UN, OP_LSHR_UN_IMM */
-#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
-
#define MONO_ARCH_CPU_SPEC x86_desc
#define MONO_MAX_IREGS 8
-#define MONO_MAX_FREGS 6
+#define MONO_MAX_FREGS 8
/* Parameters used by the register allocator */
#define MONO_ARCH_CALLEE_REGS X86_CALLEE_REGS
#define MONO_ARCH_CALLEE_SAVED_REGS X86_CALLER_REGS
-#define MONO_ARCH_CALLEE_FREGS 0
+#define MONO_ARCH_CALLEE_FREGS (0xff & ~(regmask (MONO_ARCH_FPSTACK_SIZE)))
#define MONO_ARCH_CALLEE_SAVED_FREGS 0
#define MONO_ARCH_USE_FPSTACK TRUE
#define MONO_ARCH_INST_FIXED_MASK(desc) ((desc == 'y') ? (X86_BYTE_REGS) : 0)
/* RDX is clobbered by the opcode implementation before accessing sreg2 */
-#define MONO_ARCH_INST_SREG2_MASK(ins) (((ins [MONO_INST_CLOB] == 'a') || (ins [MONO_INST_CLOB] == 'd')) ? (1 << X86_EDX) : 0)
+/*
+ * Originally this contained X86_EDX for div/rem opcodes, but that led to unsolvable
+ * situations since there are only 3 usable registers for local register allocation.
+ * Instead, we handle the sreg2==edx case in the opcodes.
+ */
+#define MONO_ARCH_INST_SREG2_MASK(ins) 0
/*
* L is a generic register pair, while l means eax:rdx
#define MONO_ARCH_INST_IS_REGPAIR(desc) (desc == 'l' || desc == 'L')
#define MONO_ARCH_INST_REGPAIR_REG2(desc,hreg1) (desc == 'l' ? X86_EDX : -1)
-#if defined(__APPLE__)
-#define MONO_ARCH_FRAME_ALIGNMENT 16
-#else
#define MONO_ARCH_FRAME_ALIGNMENT 4
-#endif
/* fixme: align to 16byte instead of 32byte (we align to 32byte to get
* reproduceable results for benchmarks */
#endif
+/* Enables OP_LSHL, OP_LSHL_IMM, OP_LSHR, OP_LSHR_IMM, OP_LSHR_UN, OP_LSHR_UN_IMM */
+#define MONO_ARCH_NO_EMULATE_LONG_SHIFT_OPS
+
#define MONO_ARCH_BIGMUL_INTRINS 1
#define MONO_ARCH_NEED_DIV_CHECK 1
#define MONO_ARCH_HAVE_IS_INT_OVERFLOW 1
#define MONO_ARCH_RGCTX_REG X86_EDX
#define MONO_ARCH_ENABLE_NORMALIZE_OPCODES 1
+#define MONO_ARCH_HAVE_CMOV_OPS 1
+
#if !defined(__APPLE__)
#define MONO_ARCH_AOT_SUPPORTED 1
#endif
+/* Used for optimization, not complete */
+#define MONO_ARCH_IS_OP_MEMBASE(opcode) ((opcode) == OP_X86_PUSH_MEMBASE)
+
+#define MONO_ARCH_EMIT_BOUNDS_CHECK(cfg, array_reg, offset, index_reg) do { \
+ MonoInst *inst; \
+ MONO_INST_NEW ((cfg), inst, OP_X86_COMPARE_MEMBASE_REG); \
+ inst->inst_basereg = array_reg; \
+ inst->inst_offset = offset; \
+ inst->sreg2 = index_reg; \
+ MONO_ADD_INS ((cfg)->cbb, inst); \
+ MONO_EMIT_NEW_COND_EXC (cfg, LE_UN, "IndexOutOfRangeException"); \
+ } while (0)
+
typedef struct {
guint8 *address;
guint8 saved_byte;
static void dec_foreach (MonoInst *tree, MonoCompile *cfg);
+int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
+ MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
+ guint inline_offset, gboolean is_virtual_call);
+
static int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
int locals_offset, MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call);
#endif
/* helper methods signature */
-static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
-static MonoMethodSignature *helper_sig_domain_get = NULL;
+/* FIXME: Make these static again */
+MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_domain_get = NULL;
+MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
static MonoCodeManager *global_codeman = NULL;
-static GHashTable *jit_icall_name_hash = NULL;
+/* FIXME: Make this static again */
+GHashTable *jit_icall_name_hash = NULL;
static MonoDebugOptions debug_options;
/* Whenever to check for pending exceptions in managed-to-native wrappers */
gboolean check_for_pending_exc = TRUE;
+/* Whenever to disable passing/returning small valuetypes in registers for managed methods */
+gboolean disable_vtypes_in_regs = FALSE;
+
gboolean
mono_running_on_valgrind (void)
{
* dfn: Depth First Number
* block_num: unique ID assigned at bblock creation
*/
-#define NEW_BBLOCK(cfg,new_bb) do { \
- new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)); \
- MONO_INST_LIST_INIT (&new_bb->ins_list); \
- } while (0)
-
+#define NEW_BBLOCK(cfg) (mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)))
#define ADD_BBLOCK(cfg,b) do { \
cfg->cil_offset_to_bb [(b)->cil_code - cfg->cil_start] = (b); \
(b)->block_num = cfg->num_bblocks++; \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
- NEW_BBLOCK (cfg, (tblock)); \
+ (tblock) = NEW_BBLOCK (cfg); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
#define CHECK_BBLOCK(target,ip,tblock) do { \
- if ((target) < (ip) && \
- MONO_INST_LIST_EMPTY (&(tblock)->ins_list)) { \
- bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
- if (cfg->verbose_level > 2) \
- g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
- } \
+ if ((target) < (ip) && !(tblock)->code) { \
+ bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
+ if (cfg->verbose_level > 2) g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
+ } \
} while (0)
#define NEW_ICONST(cfg,dest,val) do { \
*
* Unlink two basic blocks.
*/
-static void
+void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
}
}
+/*
+ * mono_bblocks_linked:
+ *
+ * Return whenever BB1 and BB2 are linked in the CFG.
+ */
+static gboolean
+mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
+{
+ int i;
+
+ for (i = 0; i < bb1->out_count; ++i) {
+ if (bb1->out_bb [i] == bb2)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/**
* mono_find_block_region:
*
int i;
array [*dfn] = start;
- /*g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num);*/
+ /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
MonoInst *inst;
MonoBasicBlock *bb;
- if (!MONO_INST_LIST_EMPTY (&second->ins_list))
+ if (second->code)
return;
/*
first->out_bb = NULL;
link_bblock (cfg, first, second);
+ second->last_ins = first->last_ins;
+
/*g_print ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
MONO_BB_FOR_EACH_INS (first, inst) {
- MonoInst *inst_next;
-
/*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
g_print ("found %p: %s", inst->next->cil_code, code);
g_free (code);*/
- if (inst->cil_code >= second->cil_code)
- continue;
-
- inst_next = mono_inst_list_next (&inst->node, &first->ins_list);
- if (!inst_next)
- break;
-
- if (inst_next->cil_code < second->cil_code)
- continue;
-
- second->ins_list.next = inst->node.next;
- second->ins_list.prev = first->ins_list.prev;
- inst->node.next = &first->ins_list;
- first->ins_list.prev = &inst->node;
-
- second->next_bb = first->next_bb;
- first->next_bb = second;
- return;
+ if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
+ second->code = inst->next;
+ inst->next = NULL;
+ first->last_ins = inst;
+ second->next_bb = first->next_bb;
+ first->next_bb = second;
+ return;
+ }
}
- if (MONO_INST_LIST_EMPTY (&second->ins_list)) {
+ if (!second->code) {
g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
//G_BREAKPOINT ();
}
return opcode;
}
+guint
+mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_STORE_MEMBASE_REG;
+
+handle_enum:
+ switch (type->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_STOREI1_MEMBASE_REG;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_STOREI2_MEMBASE_REG;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ return OP_STOREI4_MEMBASE_REG;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_STOREI8_MEMBASE_REG;
+ case MONO_TYPE_R4:
+ return OP_STORER4_MEMBASE_REG;
+ case MONO_TYPE_R8:
+ return OP_STORER8_MEMBASE_REG;
+ case MONO_TYPE_VALUETYPE:
+ if (type->data.klass->enumtype) {
+ type = type->data.klass->enum_basetype;
+ goto handle_enum;
+ }
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ type = &type->data.generic_class->container_class->byval_arg;
+ goto handle_enum;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_STORE_MEMBASE_REG;
+ default:
+ g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
+ }
+ return -1;
+}
+
+guint
+mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_LOAD_MEMBASE;
+
+ switch (mono_type_get_underlying_type (type)->type) {
+ case MONO_TYPE_I1:
+ return OP_LOADI1_MEMBASE;
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_LOADU1_MEMBASE;
+ case MONO_TYPE_I2:
+ return OP_LOADI2_MEMBASE;
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_LOADU2_MEMBASE;
+ case MONO_TYPE_I4:
+ return OP_LOADI4_MEMBASE;
+ case MONO_TYPE_U4:
+ return OP_LOADU4_MEMBASE;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_LOADI8_MEMBASE;
+ case MONO_TYPE_R4:
+ return OP_LOADR4_MEMBASE;
+ case MONO_TYPE_R8:
+ return OP_LOADR8_MEMBASE;
+ case MONO_TYPE_VALUETYPE:
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_LOADV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ if (mono_type_generic_inst_is_valuetype (type))
+ return OP_LOADV_MEMBASE;
+ else
+ return OP_LOAD_MEMBASE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_LOAD_MEMBASE;
+ default:
+ g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
+ }
+ return -1;
+}
+
#ifdef MONO_ARCH_SOFT_FLOAT
static int
condbr_to_fp_br (int opcode)
return mono_type_to_ldind (type);
}
-static guint
+guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
if (cfg->generic_sharing_context && !type->byref) {
{
switch (opcode) {
case OP_ADD_IMM:
- return OP_PADD;
+#if SIZEOF_VOID_P == 4
+ return OP_IADD;
+#else
+ return OP_LADD;
+#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
+ case OP_LOCALLOC_IMM:
+ return OP_LOCALLOC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
+ return -1;
}
}
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
-mono_decompose_op_imm (MonoCompile *cfg, MonoInst *ins)
+mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
MonoInst *temp;
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
- MONO_INST_LIST_ADD_TAIL (&(temp)->node, &(ins)->node);
+ mono_bblock_insert_before_ins (bb, ins, temp);
ins->opcode = mono_op_imm_to_op (ins->opcode);
- ins->sreg2 = temp->dreg;
+ if (ins->opcode == OP_LOCALLOC)
+ ins->sreg1 = temp->dreg;
+ else
+ ins->sreg2 = temp->dreg;
+
+ bb->max_vreg = MAX (bb->max_vreg, cfg->rs->next_vreg);
}
/*
return cfg->rgctx_var;
}
+static void
+set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
+{
+ if (vreg >= cfg->vreg_to_inst_len) {
+ MonoInst **tmp = cfg->vreg_to_inst;
+ int size = cfg->vreg_to_inst_len;
+
+ while (vreg >= cfg->vreg_to_inst_len)
+ cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
+ cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
+ if (size)
+ memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
+ }
+ cfg->vreg_to_inst [vreg] = inst;
+}
+
+#define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
+#define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
+
MonoInst*
-mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
+ gboolean regpair;
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
- cfg->varinfo_count = (cfg->varinfo_count + 2) * 2;
+ cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 64;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
- /*g_print ("created temp %d of type 0x%x\n", num, type->type);*/
mono_jit_stats.allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type (type);
+ type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
+ inst->dreg = vreg;
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
+ if (vreg != -1)
+ set_vreg_to_inst (cfg, vreg, inst);
+
+#if SIZEOF_VOID_P == 4
+#ifdef MONO_ARCH_SOFT_FLOAT
+ regpair = mono_type_is_long (type) || mono_type_is_float (type);
+#else
+ regpair = mono_type_is_long (type);
+#endif
+#else
+ regpair = FALSE;
+#endif
+
+ if (regpair) {
+ MonoInst *tree;
+
+ /*
+ * These two cannot be allocated using create_var_for_vreg since that would
+ * put it into the cfg->varinfo array, confusing many parts of the JIT.
+ */
+
+ /*
+ * Set flags to VOLATILE so SSA skips it.
+ */
+
+ if (cfg->verbose_level >= 4) {
+ printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2);
+ }
+
+ /* Allocate a dummy MonoInst for the first vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 1;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 1, tree);
+
+ /* Allocate a dummy MonoInst for the second vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 2;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 2, tree);
+ }
+
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
- g_print ("created temp %d of type %s\n", num, mono_type_get_name (type));
+ g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
+MonoInst*
+mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+{
+ int dreg;
+
+ if (mono_type_is_long (type))
+ dreg = mono_alloc_dreg (cfg, STACK_I8);
+#ifdef MONO_ARCH_SOFT_FLOAT
+ else if (mono_type_is_float (type))
+ dreg = mono_alloc_dreg (cfg, STACK_R8);
+#endif
+ else
+ /* All the others are unified */
+ dreg = mono_alloc_preg (cfg);
+
+ return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
+}
+
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
return NULL;
}
+/*
+ * mono_add_ins_to_end:
+ *
+ * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
+ */
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
- MonoInst *last = mono_inst_list_last (&bb->ins_list);
+ int opcode;
- if (last && ((last->opcode >= CEE_BEQ &&
- last->opcode <= CEE_BLT_UN) ||
- last->opcode == OP_BR ||
- last->opcode == OP_SWITCH)) {
- MONO_INST_LIST_ADD_TAIL (&inst->node, &last->node);
- } else {
+ if (!bb->code) {
MONO_ADD_INS (bb, inst);
+ return;
+ }
+
+ switch (bb->last_ins->opcode) {
+ case OP_BR:
+ case OP_BR_REG:
+ case CEE_BEQ:
+ case CEE_BGE:
+ case CEE_BGT:
+ case CEE_BLE:
+ case CEE_BLT:
+ case CEE_BNE_UN:
+ case CEE_BGE_UN:
+ case CEE_BGT_UN:
+ case CEE_BLE_UN:
+ case CEE_BLT_UN:
+ case OP_SWITCH:
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ break;
+ default:
+ if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ /* Need to insert the ins before the compare */
+ if (bb->code == bb->last_ins) {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ return;
+ }
+
+ if (bb->code->next == bb->last_ins) {
+ /* Only two instructions */
+ opcode = bb->code->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->code, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ } else {
+ opcode = bb->last_ins->prev->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ }
+ }
+ else
+ MONO_ADD_INS (bb, inst);
+ break;
+ }
+}
+
+/**
+ * mono_replace_ins:
+ *
+ * Replace INS with its decomposition which is stored in a series of bblocks starting
+ * at FIRST_BB and ending at LAST_BB. On enter, PREV points to the predecessor of INS.
+ * On return, it will be set to the last ins of the decomposition.
+ */
+void
+mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb)
+{
+ MonoInst *next = ins->next;
+
+ if (next && next->opcode == OP_NOP) {
+ /* Avoid NOPs following branches */
+ ins->next = next->next;
+ next = next->next;
+ }
+
+ if (first_bb == last_bb) {
+ /*
+ * Only one replacement bb, merge the code into
+ * the current bb.
+ */
+
+ /* Delete links between the first_bb and its successors */
+ while (first_bb->out_count)
+ mono_unlink_bblock (cfg, first_bb, first_bb->out_bb [0]);
+
+ /* Head */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+
+ /* Tail */
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ else
+ bb->last_ins = last_bb->last_ins;
+ *prev = last_bb->last_ins;
+ } else {
+ int i, count;
+ MonoBasicBlock **tmp_bblocks, *tmp;
+ MonoInst *last;
+
+ /* Multiple BBs */
+
+ /* Set region */
+ for (tmp = first_bb; tmp; tmp = tmp->next_bb)
+ tmp->region = bb->region;
+
+ /* Split the original bb */
+ if (ins->next)
+ ins->next->prev = NULL;
+ ins->next = NULL;
+ bb->last_ins = ins;
+
+ /* Merge the second part of the original bb into the last bb */
+ if (last_bb->last_ins) {
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ } else {
+ last_bb->code = next;
+ }
+
+ if (next) {
+ for (last = next; last->next != NULL; last = last->next)
+ ;
+ last_bb->last_ins = last;
+ }
+
+ for (i = 0; i < bb->out_count; ++i)
+ link_bblock (cfg, last_bb, bb->out_bb [i]);
+
+ /* Merge the first (dummy) bb to the original bb */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+ bb->last_ins = first_bb->last_ins;
+
+ /* Delete the links between the original bb and its successors */
+ tmp_bblocks = bb->out_bb;
+ count = bb->out_count;
+ for (i = 0; i < count; ++i)
+ mono_unlink_bblock (cfg, bb, tmp_bblocks [i]);
+
+ /* Add links between the original bb and the first_bb's successors */
+ for (i = 0; i < first_bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = first_bb->out_bb [i];
+
+ link_bblock (cfg, bb, out_bb);
+ }
+ /* Delete links between the first_bb and its successors */
+ for (i = 0; i < bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = bb->out_bb [i];
+
+ mono_unlink_bblock (cfg, first_bb, out_bb);
+ }
+ last_bb->next_bb = bb->next_bb;
+ bb->next_bb = first_bb->next_bb;
+
+ *prev = NULL;
}
}
MonoInst **args, int calli, int virtual, const guint8 *ip, gboolean to_end)
{
MonoCallInst *call;
- MonoInst *arg, *n;
+ MonoInst *arg;
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
call = mono_arch_call_opcode (cfg, bblock, call, virtual);
type_to_eval_stack_type (cfg, sig->ret, &call->inst);
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (arg, n, &call->out_args, node) {
+ for (arg = call->out_args; arg;) {
+ MonoInst *narg = arg->next;
+ arg->next = NULL;
if (!arg->cil_code)
arg->cil_code = ip;
if (to_end)
mono_add_ins_to_end (bblock, arg);
else
MONO_ADD_INS (bblock, arg);
+ arg = narg;
}
return call;
}
static void
mono_emulate_opcode (MonoCompile *cfg, MonoInst *tree, MonoInst **iargs, MonoJitICallInfo *info)
{
- MonoInst *ins, *temp = NULL, *store, *load;
- MonoInstList *head, *list;
+ MonoInst *ins, *temp = NULL, *store, *load, *begin;
+ MonoInst *last_arg = NULL;
int nargs;
MonoCallInst *call;
//mono_print_tree_nl (tree);
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (info->sig->ret, FALSE, FALSE, cfg->generic_sharing_context));
ins = (MonoInst*)call;
- MONO_INST_LIST_INIT (&ins->node);
call->inst.cil_code = tree->cil_code;
call->args = iargs;
temp = mono_compile_create_var (cfg, info->sig->ret, OP_LOCAL);
temp->flags |= MONO_INST_IS_TEMP;
NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
- MONO_INST_LIST_INIT (&store->node);
/* FIXME: handle CEE_STIND_R4 */
store->cil_code = tree->cil_code;
} else {
nargs = info->sig->param_count + info->sig->hasthis;
- if (nargs) {
- MONO_INST_LIST_ADD_TAIL (&store->node,
- &call->out_args);
- list = &call->out_args;
- } else {
- list = &store->node;
- }
+ for (last_arg = call->out_args; last_arg && last_arg->next; last_arg = last_arg->next) ;
+
+ if (nargs)
+ last_arg->next = store;
+
+ if (nargs)
+ begin = call->out_args;
+ else
+ begin = store;
if (cfg->prev_ins) {
/*
* node before it is called for its children. dec_foreach needs to
* take this into account.
*/
- head = &cfg->prev_ins->node;
+ store->next = cfg->prev_ins->next;
+ cfg->prev_ins->next = begin;
} else {
- head = &cfg->cbb->ins_list;
+ store->next = cfg->cbb->code;
+ cfg->cbb->code = begin;
}
- MONO_INST_LIST_SPLICE_INIT (list, head);
-
call->fptr = mono_icall_get_wrapper (info);
if (!MONO_TYPE_IS_VOID (info->sig->ret)) {
for (i = 0; i < arity; i++)
res->params [i + 1] = &mono_defaults.int_class->byval_arg;
- res->ret = &mono_defaults.int_class->byval_arg;
+ res->ret = &mono_defaults.object_class->byval_arg;
g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
mono_jit_unlock ();
return res;
}
-static MonoMethod*
-get_memcpy_method (void)
+MonoJitICallInfo *
+mono_get_array_new_va_icall (int rank)
{
- static MonoMethod *memcpy_method = NULL;
+ MonoMethodSignature *esig;
+ char icall_name [256];
+ char *name;
+ MonoJitICallInfo *info;
+
+ /* Need to register the icall so it gets an icall wrapper */
+ sprintf (icall_name, "ves_array_new_va_%d", rank);
+
+ mono_jit_lock ();
+ info = mono_find_jit_icall_by_name (icall_name);
+ if (info == NULL) {
+ esig = mono_get_array_new_va_signature (rank);
+ name = g_strdup (icall_name);
+ info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
+
+ g_hash_table_insert (jit_icall_name_hash, name, name);
+ }
+ mono_jit_unlock ();
+
+ return info;
+}
+
+static MonoMethod*
+get_memcpy_method (void)
+{
+ static MonoMethod *memcpy_method = NULL;
if (!memcpy_method) {
memcpy_method = mono_class_get_method_from_name (mono_defaults.string_class, "memcpy", 3);
if (!memcpy_method)
return obj;
}
-static MonoJitICallInfo*
-mono_get_array_new_va_icall (int rank)
-{
- char icall_name [256];
- char *name;
- MonoMethodSignature *esig;
- MonoJitICallInfo *info;
-
- /* Need to register the icall so it gets an icall wrapper */
- sprintf (icall_name, "ves_array_new_va_%d", rank);
-
- mono_jit_lock ();
- info = mono_find_jit_icall_by_name (icall_name);
- if (info == NULL) {
- esig = mono_get_array_new_va_signature (rank);
- name = g_strdup (icall_name);
- info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
-
- g_hash_table_insert (jit_icall_name_hash, name, name);
- }
- mono_jit_unlock ();
-
- return info;
-}
-
static int
handle_array_new (MonoCompile *cfg, MonoBasicBlock *bblock, int rank, MonoInst **sp, unsigned char *ip)
{
NEW_TEMPSTORE (cfg, store, cfg->got_var->inst_c0, get_got);
/* Add it to the start of the first bblock */
- MONO_INST_LIST_ADD (&store->node, &cfg->bb_entry->ins_list);
+ if (cfg->bb_entry->code) {
+ store->next = cfg->bb_entry->code;
+ cfg->bb_entry->code = store;
+ }
+ else
+ MONO_ADD_INS (cfg->bb_entry, store);
cfg->got_var_allocated = TRUE;
#define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
-static gboolean
+gboolean
mini_class_is_system_array (MonoClass *klass)
{
if (klass->parent == mono_defaults.array_class)
mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
/* allocate starte and end blocks */
- NEW_BBLOCK (cfg, sbblock);
+ sbblock = NEW_BBLOCK (cfg);
sbblock->block_num = cfg->num_bblocks++;
sbblock->real_offset = real_offset;
- NEW_BBLOCK (cfg, ebblock);
+ ebblock = NEW_BBLOCK (cfg);
ebblock->block_num = cfg->num_bblocks++;
ebblock->real_offset = real_offset;
}
}
-static gpointer
-create_rgctx_lazy_fetch_trampoline (guint32 offset)
+gpointer
+mini_create_rgctx_lazy_fetch_trampoline (guint32 offset)
{
static gboolean inited = FALSE;
static int num_trampolines = 0;
MonoInst *rgc_ptr, guint32 slot, const unsigned char *ip)
{
MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
- guint8 *tramp = create_rgctx_lazy_fetch_trampoline (slot);
+ guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
int temp;
MonoInst *field;
return dest;
}
-static MonoObject*
-mono_object_castclass (MonoObject *obj, MonoClass *klass)
-{
- if (!obj)
- return NULL;
-
- if (mono_object_isinst (obj, klass))
- return obj;
-
- mono_raise_exception (mono_exception_from_name (mono_defaults.corlib,
- "System", "InvalidCastException"));
-
- return NULL;
-}
-
static int
emit_castclass (MonoClass *klass, guint32 token, int context_used, gboolean inst_is_castclass, MonoCompile *cfg,
MonoMethod *method, MonoInst **arg_array, MonoType **param_types, GList *dont_inline,
goto do_return;
}
-static gboolean
+gboolean
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
*
* Returns true if the method is invalid.
*/
-static gboolean
+gboolean
mini_method_verify (MonoCompile *cfg, MonoMethod *method)
{
GSList *tmp, *res;
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
/* ENTRY BLOCK */
- NEW_BBLOCK (cfg, start_bblock);
- cfg->bb_entry = start_bblock;
+ cfg->bb_entry = start_bblock = NEW_BBLOCK (cfg);
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
start_bblock->block_num = cfg->num_bblocks++;
/* EXIT BLOCK */
- NEW_BBLOCK (cfg, end_bblock);
- cfg->bb_exit = end_bblock;
+ cfg->bb_exit = end_bblock = NEW_BBLOCK (cfg);
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
end_bblock->block_num = cfg->num_bblocks++;
}
/* FIRST CODE BLOCK */
- NEW_BBLOCK (cfg, bblock);
+ bblock = NEW_BBLOCK (cfg);
bblock->cil_code = ip;
ADD_BBLOCK (cfg, bblock);
if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
/* we use a separate basic block for the initialization code */
- NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
+ cfg->bb_init = init_localsbb = NEW_BBLOCK (cfg);
init_localsbb->real_offset = real_offset;
start_bblock->next_bb = init_localsbb;
init_localsbb->next_bb = bblock;
*/
if ((ins->opcode == OP_LSHR_UN) && (ins->type == STACK_I8)
&& (ins->inst_right->opcode == OP_ICONST) && (ins->inst_right->inst_c0 == 32)) {
- ins->opcode = OP_LONG_SHRUN_32;
+ ins->opcode = OP_LSHR_UN_32;
/*g_print ("applied long shr speedup to %s\n", cfg->method->name);*/
ip++;
break;
typedef struct {
MonoClass *vtype;
- GList *active;
+ GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
return new_list;
}
-/*
- * mono_allocate_stack_slots_full:
- *
- * Allocate stack slots for all non register allocated variables using a
- * linear scan algorithm.
- * Returns: an array of stack offsets.
- * STACK_SIZE is set to the amount of stack space needed.
- * STACK_ALIGN is set to the alignment needed by the locals area.
- */
-gint32*
-mono_allocate_stack_slots_full (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
+static gint
+compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
+{
+ MonoMethodVar *v1 = (MonoMethodVar*)a;
+ MonoMethodVar *v2 = (MonoMethodVar*)b;
+
+ if (v1 == v2)
+ return 0;
+ else if (v1->interval->range && v2->interval->range)
+ return v1->interval->range->from - v2->interval->range->from;
+ else if (v1->interval->range)
+ return -1;
+ else
+ return 1;
+}
+
+#if 0
+#define LSCAN_DEBUG(a) do { a; } while (0)
+#else
+#define LSCAN_DEBUG(a)
+#endif
+
+static gint32*
+mono_allocate_stack_slots_full2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
int i, slot, offset, size;
guint32 align;
MonoMethodVar *vmv;
MonoInst *inst;
gint32 *offsets;
- GList *vars = NULL, *l;
+ GList *vars = NULL, *l, *unhandled;
StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
MonoType *t;
int nvtypes;
+ LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
+
scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
vars = g_list_prepend (vars, vmv);
}
- vars = mono_varlist_sort (cfg, vars, 0);
+ vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
+
+ /* Sanity check */
+ /*
+ i = 0;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ if (current->interval->range) {
+ g_assert (current->interval->range->from >= i);
+ i = current->interval->range->from;
+ }
+ }
+ */
+
offset = 0;
*stack_align = 0;
- for (l = vars; l; l = l->next) {
- vmv = l->data;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ vmv = current;
inst = cfg->varinfo [vmv->idx];
/* inst->backend.is_pinvoke indicates native sized value types, this is used by the
}
t = mono_type_get_underlying_type (inst->inst_vtype);
- if (t->byref) {
- slot_info = &scalar_stack_slots [MONO_TYPE_I];
- } else {
- switch (t->type) {
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (t)) {
- slot_info = &scalar_stack_slots [t->type];
- break;
- }
- /* Fall through */
- case MONO_TYPE_VALUETYPE:
- if (!vtype_stack_slots)
- vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
- for (i = 0; i < nvtypes; ++i)
- if (t->data.klass == vtype_stack_slots [i].vtype)
- break;
- if (i < nvtypes)
- slot_info = &vtype_stack_slots [i];
- else {
- g_assert (nvtypes < 256);
- vtype_stack_slots [nvtypes].vtype = t->data.klass;
- slot_info = &vtype_stack_slots [nvtypes];
- nvtypes ++;
- }
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
break;
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_STRING:
- case MONO_TYPE_PTR:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ break;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
#if SIZEOF_VOID_P == 4
- case MONO_TYPE_I4:
+ case MONO_TYPE_I4:
#else
- case MONO_TYPE_I8:
+ case MONO_TYPE_I8:
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ break;
#endif
- /* Share non-float stack slots of the same size */
- slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
- break;
- default:
- slot_info = &scalar_stack_slots [t->type];
- }
+ default:
+ slot_info = &scalar_stack_slots [t->type];
}
slot = 0xffffff;
if (cfg->comp_done & MONO_COMP_LIVENESS) {
+ int pos;
+ gboolean changed;
+
//printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
-
- /* expire old intervals in active */
- while (slot_info->active) {
- MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
- if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
- break;
+ if (!current->interval->range) {
+ if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
+ pos = ~0;
+ else {
+ /* Dead */
+ inst->flags |= MONO_INST_IS_DEAD;
+ continue;
+ }
+ }
+ else
+ pos = current->interval->range->from;
+
+ LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
+ if (current->interval->range)
+ LSCAN_DEBUG (mono_linterval_print (current->interval));
+ LSCAN_DEBUG (printf ("\n"));
+
+ /* Check for intervals in active which expired or inactive */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->active; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (!mono_linterval_covers (v->interval, pos)) {
+ slot_info->inactive = g_list_append (slot_info->inactive, v);
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
- //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
+ /* Check for intervals in inactive which expired or active */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->inactive; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
- slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
- slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
+ if (v->interval->last_range->to < pos) {
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ // FIXME: Enabling this seems to cause impossible to debug crashes
+ //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (mono_linterval_covers (v->interval, pos)) {
+ slot_info->active = g_list_append (slot_info->active, v);
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
}
/*
slot_info->slots = slot_info->slots->next;
}
+ /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
+
slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
}
}
+#if 0
{
static int count = 0;
count ++;
- /*
- if (count == atoi (getenv ("COUNT")))
+ if (count == atoi (getenv ("COUNT3")))
printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
- if (count > atoi (getenv ("COUNT")))
+ if (count > atoi (getenv ("COUNT3")))
slot = 0xffffff;
else {
mono_print_tree_nl (inst);
}
- */
}
+#endif
- if (cfg->disable_reuse_stack_slots)
- slot = 0xffffff;
+ LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
if (slot == 0xffffff) {
/*
return offsets;
}
+/*
+ * mono_allocate_stack_slots_full:
+ *
+ * Allocate stack slots for all non register allocated variables using a
+ * linear scan algorithm.
+ * Returns: an array of stack offsets.
+ * STACK_SIZE is set to the amount of stack space needed.
+ * STACK_ALIGN is set to the alignment needed by the locals area.
+ */
gint32*
-mono_allocate_stack_slots (MonoCompile *m, guint32 *stack_size, guint32 *stack_align)
+mono_allocate_stack_slots_full (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
- return mono_allocate_stack_slots_full (m, TRUE, stack_size, stack_align);
-}
+ int i, slot, offset, size;
+ guint32 align;
+ MonoMethodVar *vmv;
+ MonoInst *inst;
+ gint32 *offsets;
+ GList *vars = NULL, *l;
+ StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
+ MonoType *t;
+ int nvtypes;
-void
-mono_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, gboolean no_throw)
-{
- MonoJitICallInfo *info;
- MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
+ if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
+ return mono_allocate_stack_slots_full2 (cfg, backward, stack_size, stack_align);
- if (!emul_opcode_map)
- emul_opcode_map = g_new0 (MonoJitICallInfo*, OP_LAST + 1);
+ scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
+ vtype_stack_slots = NULL;
+ nvtypes = 0;
- g_assert (!sig->hasthis);
- g_assert (sig->param_count < 3);
+ offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
+ for (i = 0; i < cfg->num_varinfo; ++i)
+ offsets [i] = -1;
- info = mono_register_jit_icall (func, name, sig, no_throw);
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ inst = cfg->varinfo [i];
+ vmv = MONO_VARINFO (cfg, i);
- emul_opcode_map [opcode] = info;
-}
+ if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
+ continue;
-static void
-register_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
-{
- MonoMethodSignature *sig;
+ vars = g_list_prepend (vars, vmv);
+ }
- if (sigstr)
- sig = mono_create_icall_signature (sigstr);
- else
+ vars = mono_varlist_sort (cfg, vars, 0);
+ offset = 0;
+ *stack_align = 0;
+ for (l = vars; l; l = l->next) {
+ vmv = l->data;
+ inst = cfg->varinfo [vmv->idx];
+
+ /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
+ * pinvoke wrappers when they call functions returning structures */
+ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
+ size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
+ else {
+ int ialign;
+
+ size = mono_type_size (inst->inst_vtype, &ialign);
+ align = ialign;
+ }
+
+ t = mono_type_get_underlying_type (inst->inst_vtype);
+ if (t->byref) {
+ slot_info = &scalar_stack_slots [MONO_TYPE_I];
+ } else {
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
+ break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ break;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+#if SIZEOF_VOID_P == 4
+ case MONO_TYPE_I4:
+#else
+ case MONO_TYPE_I8:
+#endif
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ break;
+ default:
+ slot_info = &scalar_stack_slots [t->type];
+ }
+ }
+
+ slot = 0xffffff;
+ if (cfg->comp_done & MONO_COMP_LIVENESS) {
+ //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
+
+ /* expire old intervals in active */
+ while (slot_info->active) {
+ MonoMethodVar *amv = (MonoMethodVar *)slot_info->active->data;
+
+ if (amv->range.last_use.abs_pos > vmv->range.first_use.abs_pos)
+ break;
+
+ //printf ("EXPIR %2d %08x %08x C%d R%d\n", amv->idx, amv->range.first_use.abs_pos, amv->range.last_use.abs_pos, amv->spill_costs, amv->reg);
+
+ slot_info->active = g_list_delete_link (slot_info->active, slot_info->active);
+ slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [amv->idx]));
+ }
+
+ /*
+ * This also handles the case when the variable is used in an
+ * exception region, as liveness info is not computed there.
+ */
+ /*
+ * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
+ * opcodes.
+ */
+ if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
+ if (slot_info->slots) {
+ slot = GPOINTER_TO_INT (slot_info->slots->data);
+
+ slot_info->slots = slot_info->slots->next;
+ }
+
+ slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
+ }
+ }
+
+ {
+ static int count = 0;
+ count ++;
+
+ /*
+ if (count == atoi (getenv ("COUNT")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT")))
+ slot = 0xffffff;
+ else {
+ mono_print_tree_nl (inst);
+ }
+ */
+ }
+
+ if (cfg->disable_reuse_stack_slots)
+ slot = 0xffffff;
+
+ if (slot == 0xffffff) {
+ /*
+ * Allways allocate valuetypes to sizeof (gpointer) to allow more
+ * efficient copying (and to work around the fact that OP_MEMCPY
+ * and OP_MEMSET ignores alignment).
+ */
+ if (MONO_TYPE_ISSTRUCT (t))
+ align = sizeof (gpointer);
+
+ if (backward) {
+ offset += size;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ }
+ else {
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ offset += size;
+ }
+
+ if (*stack_align == 0)
+ *stack_align = align;
+ }
+
+ offsets [vmv->idx] = slot;
+ }
+ g_list_free (vars);
+ for (i = 0; i < MONO_TYPE_PINNED; ++i) {
+ if (scalar_stack_slots [i].active)
+ g_list_free (scalar_stack_slots [i].active);
+ }
+ for (i = 0; i < nvtypes; ++i) {
+ if (vtype_stack_slots [i].active)
+ g_list_free (vtype_stack_slots [i].active);
+ }
+
+ mono_jit_stats.locals_stack_size += offset;
+
+ *stack_size = offset;
+ return offsets;
+}
+
+gint32*
+mono_allocate_stack_slots (MonoCompile *m, guint32 *stack_size, guint32 *stack_align)
+{
+ return mono_allocate_stack_slots_full (m, TRUE, stack_size, stack_align);
+}
+
+void
+mono_register_opcode_emulation (int opcode, const char *name, const char *sigstr, gpointer func, gboolean no_throw)
+{
+ MonoJitICallInfo *info;
+ MonoMethodSignature *sig = mono_create_icall_signature (sigstr);
+
+ if (!emul_opcode_map)
+ emul_opcode_map = g_new0 (MonoJitICallInfo*, OP_LAST + 1);
+
+ g_assert (!sig->hasthis);
+ g_assert (sig->param_count < 3);
+
+ info = mono_register_jit_icall (func, name, sig, no_throw);
+
+ emul_opcode_map [opcode] = info;
+}
+
+static void
+register_icall (gpointer func, const char *name, const char *sigstr, gboolean save)
+{
+ MonoMethodSignature *sig;
+
+ if (sigstr)
+ sig = mono_create_icall_signature (sigstr);
+ else
sig = NULL;
mono_register_jit_icall (func, name, sig, save);
int i, j;
char *code;
MonoBasicBlock *bb;
+ MonoInst *c;
g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
for (i = 0; i < cfg->num_bblocks; ++i) {
- MonoInst *c;
-
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
g_free (code2);
} else*/
code = g_strdup ("\n");
- g_print ("\nBB%d DFN%d (len: %d): %s", bb->block_num, i, bb->cil_length, code);
+ g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
- mono_print_tree (c);
- g_print ("\n");
+ if (cfg->new_ir) {
+ mono_print_ins_index (-1, c);
+ } else {
+ mono_print_tree (c);
+ g_print ("\n");
+ }
}
g_print ("\tprev:");
MONO_ADD_INS (bb, inst);
}
+void
+mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = ins_to_insert;
+ ins_to_insert->next = ins;
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with next */
+ ins_to_insert->next = ins->next;
+ if (ins->next)
+ ins->next->prev = ins_to_insert;
+
+ /* Link with previous */
+ ins->next = ins_to_insert;
+ ins_to_insert->prev = ins;
+
+ if (bb->last_ins == ins)
+ bb->last_ins = ins_to_insert;
+ }
+}
+
+void
+mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ NOT_IMPLEMENTED;
+ ins = bb->code;
+ bb->code = ins_to_insert;
+ ins_to_insert->next = ins;
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with previous */
+ if (ins->prev)
+ ins->prev->next = ins_to_insert;
+ ins_to_insert->prev = ins->prev;
+
+ /* Link with next */
+ ins->prev = ins_to_insert;
+ ins_to_insert->next = ins;
+
+ if (bb->code == ins)
+ bb->code = ins_to_insert;
+ }
+}
+
+/*
+ * mono_verify_bblock:
+ *
+ * Verify that the next and prev pointers are consistent inside the instructions in BB.
+ */
+void
+mono_verify_bblock (MonoBasicBlock *bb)
+{
+ MonoInst *ins, *prev;
+
+ prev = NULL;
+ for (ins = bb->code; ins; ins = ins->next) {
+ g_assert (ins->prev == prev);
+ prev = ins;
+ }
+ if (bb->last_ins)
+ g_assert (!bb->last_ins->next);
+}
+
+/*
+ * mono_verify_cfg:
+ *
+ * Perform consistency checks on the JIT data structures and the IR
+ */
+void
+mono_verify_cfg (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_verify_bblock (bb);
+}
+
void
mono_destroy_compile (MonoCompile *cfg)
{
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
+ g_free (cfg->reverse_inst_list);
+
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
+ g_assert (patch_info->data.bb->native_offset);
target = patch_info->data.bb->native_offset + code;
break;
case MONO_PATCH_INFO_ABS:
mono_domain_unlock (domain);
}
- for (i = 0; i < patch_info->data.table->table_size; i++) {
+ for (i = 0; i < patch_info->data.table->table_size; i++)
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
- }
target = jump_table;
break;
}
bb->in_bb = NULL;
bb->out_bb = NULL;
bb->next_bb = NULL;
- MONO_INST_LIST_INIT (&bb->ins_list);
+ bb->code = bb->last_ins = NULL;
bb->cil_code = NULL;
}
static void
replace_out_block_in_code (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl) {
- MonoInst *inst;
+ MonoInst *ins;
- MONO_BB_FOR_EACH_INS (bb, inst) {
- if (inst->opcode == OP_CALL_HANDLER) {
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- }
- }
-
- inst = mono_inst_list_last (&bb->ins_list);
- if (!inst)
- return;
-
- switch (inst->opcode) {
- case OP_BR:
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- break;
- case OP_SWITCH: {
- int i;
- int n = GPOINTER_TO_INT (inst->klass);
- for (i = 0; i < n; i++ ) {
- if (inst->inst_many_bb [i] == orig)
- inst->inst_many_bb [i] = repl;
+ for (ins = bb->code; ins != NULL; ins = ins->next) {
+ switch (ins->opcode) {
+ case OP_BR:
+ if (ins->inst_target_bb == orig)
+ ins->inst_target_bb = repl;
+ break;
+ case OP_CALL_HANDLER:
+ if (ins->inst_target_bb == orig)
+ ins->inst_target_bb = repl;
+ break;
+ case OP_SWITCH: {
+ int i;
+ int n = GPOINTER_TO_INT (ins->klass);
+ for (i = 0; i < n; i++ ) {
+ if (ins->inst_many_bb [i] == orig)
+ ins->inst_many_bb [i] = repl;
+ }
+ break;
}
- break;
- }
- case CEE_BNE_UN:
- case CEE_BEQ:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN:
- if (inst->inst_true_bb == orig)
- inst->inst_true_bb = repl;
- if (inst->inst_false_bb == orig)
- inst->inst_false_bb = repl;
- break;
- default:
- break;
- }
-}
-
-static void
-replace_basic_block (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl)
-{
- int i, j;
-
- for (i = 0; i < bb->out_count; i++) {
- MonoBasicBlock *ob = bb->out_bb [i];
- for (j = 0; j < ob->in_count; j++) {
- if (ob->in_bb [j] == orig) {
- ob->in_bb [j] = repl;
+ default:
+ if (MONO_IS_COND_BRANCH_OP (ins)) {
+ if (ins->inst_true_bb == orig)
+ ins->inst_true_bb = repl;
+ if (ins->inst_false_bb == orig)
+ ins->inst_false_bb = repl;
+ } else if (MONO_IS_JUMP_TABLE (ins)) {
+ int i;
+ MonoJumpInfoBBTable *table = MONO_JUMP_TABLE_FROM_INS (ins);
+ for (i = 0; i < table->table_size; i++ ) {
+ if (table->table [i] == orig)
+ table->table [i] = repl;
+ }
}
+
+ break;
}
}
-
}
/**
remove_block_if_useless (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *previous_bb) {
MonoBasicBlock *target_bb = NULL;
MonoInst *inst;
-
+
/* Do not touch handlers */
if (bb->region != -1) {
bb->not_useless = TRUE;
}
/* Do not touch BBs following a switch (they are the "default" branch) */
- inst = mono_inst_list_last (&previous_bb->ins_list);
- if (inst && inst->opcode == OP_SWITCH)
+ if ((previous_bb->last_ins != NULL) && (previous_bb->last_ins->opcode == OP_SWITCH)) {
return FALSE;
+ }
/* Do not touch BBs following the entry BB and jumping to something that is not */
/* thiry "next" bb (the entry BB cannot contain the branch) */
/* Check that there is a target BB, and that bb is not an empty loop (Bug 75061) */
if ((target_bb != NULL) && (target_bb != bb)) {
- MonoInst *last_ins;
int i;
if (cfg->verbose_level > 1) {
mono_unlink_bblock (cfg, bb, target_bb);
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
-
if ((previous_bb != cfg->bb_entry) &&
(previous_bb->region == bb->region) &&
- ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH)))) {
+ ((previous_bb->last_ins == NULL) ||
+ ((previous_bb->last_ins->opcode != OP_BR) &&
+ (! (MONO_IS_COND_BRANCH_OP (previous_bb->last_ins))) &&
+ (previous_bb->last_ins->opcode != OP_SWITCH)))) {
for (i = 0; i < previous_bb->out_count; i++) {
if (previous_bb->out_bb [i] == target_bb) {
MonoInst *jump;
}
}
-static void
-merge_basic_blocks (MonoBasicBlock *bb, MonoBasicBlock *bbn)
+void
+mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn)
{
- MonoInst *last_ins;
-
- bb->out_count = bbn->out_count;
- bb->out_bb = bbn->out_bb;
-
- replace_basic_block (bb, bbn, bb);
+ MonoInst *inst;
+ MonoBasicBlock *prev_bb;
+ int i;
- last_ins = mono_inst_list_last (&bb->ins_list);
+ bb->has_array_access |= bbn->has_array_access;
+ bb->extended |= bbn->extended;
- /* Nullify branch at the end of bb */
- if (last_ins && MONO_IS_BRANCH_OP (last_ins))
- last_ins->opcode = OP_NOP;
+ mono_unlink_bblock (cfg, bb, bbn);
+ for (i = 0; i < bbn->out_count; ++i)
+ mono_link_bblock (cfg, bb, bbn->out_bb [i]);
+ while (bbn->out_count)
+ mono_unlink_bblock (cfg, bbn, bbn->out_bb [0]);
- MONO_INST_LIST_SPLICE_TAIL_INIT (&bbn->ins_list, &bb->ins_list);
+ /* Handle the branch at the end of the bb */
+ for (inst = bb->code; inst != NULL; inst = inst->next) {
+ if (inst->opcode == OP_CALL_HANDLER) {
+ g_assert (inst->inst_target_bb == bbn);
+ NULLIFY_INS (inst);
+ }
+ if (MONO_IS_JUMP_TABLE (inst)) {
+ int i;
+ MonoJumpInfoBBTable *table = MONO_JUMP_TABLE_FROM_INS (inst);
+ for (i = 0; i < table->table_size; i++ ) {
+ /* Might be already NULL from a previous merge */
+ if (table->table [i])
+ g_assert (table->table [i] == bbn);
+ table->table [i] = NULL;
+ }
+ /* Can't nullify this as later instructions depend on it */
+ }
+ }
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ g_assert (bb->last_ins->inst_false_bb == bbn);
+ bb->last_ins->inst_false_bb = NULL;
+ bb->extended = TRUE;
+ } else if (bb->last_ins && MONO_IS_BRANCH_OP (bb->last_ins)) {
+ NULLIFY_INS (bb->last_ins);
+ }
- bb->next_bb = bbn->next_bb;
+ if (bb->last_ins) {
+ if (bbn->code) {
+ bb->last_ins->next = bbn->code;
+ bbn->code->prev = bb->last_ins;
+ bb->last_ins = bbn->last_ins;
+ }
+ } else {
+ bb->code = bbn->code;
+ bb->last_ins = bbn->last_ins;
+ }
+ for (prev_bb = cfg->bb_entry; prev_bb && prev_bb->next_bb != bbn; prev_bb = prev_bb->next_bb)
+ ;
+ if (prev_bb) {
+ prev_bb->next_bb = bbn->next_bb;
+ } else {
+ /* bbn might not be in the bb list yet */
+ if (bb->next_bb == bbn)
+ bb->next_bb = bbn->next_bb;
+ }
nullify_basic_block (bbn);
}
move_basic_block_to_end (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoBasicBlock *bbn, *next;
- MonoInst *last_ins;
next = bb->next_bb;
bbn->next_bb = bb;
bb->next_bb = NULL;
- last_ins = mono_inst_list_last (&bb->ins_list);
-
/* Add a branch */
- if (next && (!last_ins || (last_ins->opcode != OP_NOT_REACHED))) {
+ if (next && (!bb->last_ins || ((bb->last_ins->opcode != OP_NOT_REACHED) && (bb->last_ins->opcode != OP_BR) && (bb->last_ins->opcode != OP_BR_REG) && (!MONO_IS_COND_BRANCH_OP (bb->last_ins))))) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_BR);
}
}
+/*
+ * mono_remove_block:
+ *
+ * Remove BB from the control flow graph
+ */
+void
+mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoBasicBlock *tmp_bb;
+
+ for (tmp_bb = cfg->bb_entry; tmp_bb && tmp_bb->next_bb != bb; tmp_bb = tmp_bb->next_bb)
+ ;
+
+ g_assert (tmp_bb);
+ tmp_bb->next_bb = bb->next_bb;
+}
+
/* checks that a and b represent the same instructions, conservatively,
* it can return FALSE also for two trees that are equal.
* FIXME: also make sure there are no side effects.
* Note that this can't be applied if the second arg is not positive...
*/
static int
-try_unsigned_compare (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *bb_last)
+try_unsigned_compare (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoBasicBlock *truet, *falset;
- MonoInst *cmp_inst = bb_last->inst_left;
+ MonoInst *cmp_inst = bb->last_ins->inst_left;
MonoInst *condb;
if (!cmp_inst->inst_right->inst_c0 == 0)
return FALSE;
- truet = bb_last->inst_true_bb;
- falset = bb_last->inst_false_bb;
+ truet = bb->last_ins->inst_true_bb;
+ falset = bb->last_ins->inst_false_bb;
if (falset->in_count != 1)
return FALSE;
- condb = mono_inst_list_last (&falset->ins_list);
+ condb = falset->last_ins;
/* target bb must have one instruction */
- if (!condb || (condb->node.next != &falset->ins_list))
+ if (!condb || (condb != falset->code))
return FALSE;
if ((((condb->opcode == CEE_BLE || condb->opcode == CEE_BLT) && (condb->inst_false_bb == truet))
|| ((condb->opcode == CEE_BGE || condb->opcode == CEE_BGT) && (condb->inst_true_bb == truet)))
return FALSE;
condb->opcode = get_unsigned_condbranch (condb->opcode);
/* change the original condbranch to just point to the new unsigned check */
- bb_last->opcode = OP_BR;
- bb_last->inst_target_bb = falset;
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = falset;
replace_out_block (bb, truet, NULL);
replace_in_block (truet, bb, NULL);
return TRUE;
* Optimizes the branches on the Control Flow Graph
*
*/
-static void
-optimize_branches (MonoCompile *cfg)
+void
+mono_optimize_branches (MonoCompile *cfg)
{
int i, changed = FALSE;
MonoBasicBlock *bb, *bbn;
niterations = cfg->num_bblocks * 2;
else
niterations = 1000;
-
+
do {
MonoBasicBlock *previous_bb;
changed = FALSE;
/* we skip the entry block (exit is handled specially instead ) */
for (previous_bb = cfg->bb_entry, bb = cfg->bb_entry->next_bb; bb; previous_bb = bb, bb = bb->next_bb) {
- MonoInst *last_ins;
-
/* dont touch code inside exception clauses */
if (bb->region != -1)
continue;
changed = TRUE;
}
- last_ins = mono_inst_list_last (&bb->ins_list);
if (bb->out_count == 1) {
bbn = bb->out_bb [0];
/* conditional branches where true and false targets are the same can be also replaced with OP_BR */
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins)) {
- MonoInst *pop;
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_left;
- mono_add_ins_to_end (bb, pop);
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_right;
- mono_add_ins_to_end (bb, pop);
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = last_ins->inst_true_bb;
+ if (bb->last_ins && (bb->last_ins->opcode != OP_BR) && MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ if (!cfg->new_ir) {
+ MonoInst *pop;
+ MONO_INST_NEW (cfg, pop, CEE_POP);
+ pop->inst_left = bb->last_ins->inst_left->inst_left;
+ mono_add_ins_to_end (bb, pop);
+ MONO_INST_NEW (cfg, pop, CEE_POP);
+ pop->inst_left = bb->last_ins->inst_left->inst_right;
+ mono_add_ins_to_end (bb, pop);
+ }
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = bb->last_ins->inst_true_bb;
changed = TRUE;
if (cfg->verbose_level > 2)
g_print ("cond branch removal triggered in %d %d\n", bb->block_num, bb->out_count);
/* the block are in sequence anyway ... */
/* branches to the following block can be removed */
- if (last_ins && last_ins->opcode == OP_BR) {
- last_ins->opcode = OP_NOP;
+ if (bb->last_ins && bb->last_ins->opcode == OP_BR) {
+ bb->last_ins->opcode = OP_NOP;
changed = TRUE;
if (cfg->verbose_level > 2)
g_print ("br removal triggered %d -> %d\n", bb->block_num, bbn->block_num);
}
- if (bbn->in_count == 1) {
-
+ if (bbn->in_count == 1 && !bb->extended) {
if (bbn != cfg->bb_exit) {
if (cfg->verbose_level > 2)
g_print ("block merge triggered %d -> %d\n", bb->block_num, bbn->block_num);
- merge_basic_blocks (bb, bbn);
+ mono_merge_basic_blocks (cfg, bb, bbn);
changed = TRUE;
continue;
}
}
}
}
+
if ((bbn = bb->next_bb) && bbn->in_count == 0 && bb->region == bbn->region) {
if (cfg->verbose_level > 2) {
g_print ("nullify block triggered %d\n", bbn->block_num);
if (bb->out_count == 1) {
bbn = bb->out_bb [0];
- if (last_ins && last_ins->opcode == OP_BR) {
- MonoInst *bbn_code;
-
- bbn = last_ins->inst_target_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code &&
- bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ if (bb->last_ins && bb->last_ins->opcode == OP_BR) {
+ bbn = bb->last_ins->inst_target_bb;
+ if (bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
+
if (cfg->verbose_level > 2)
- g_print ("in %s branch to branch triggered %d -> %d -> %d\n", cfg->method->name,
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num);
+ g_print ("branch to branch triggered %d -> %d -> %d\n", bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num);
replace_in_block (bbn, bb, NULL);
- replace_out_block (bb, bbn, bbn_code->inst_target_bb);
- link_bblock (cfg, bb, bbn_code->inst_target_bb);
- last_ins->inst_target_bb = bbn_code->inst_target_bb;
+ replace_out_block (bb, bbn, bbn->code->inst_target_bb);
+ link_bblock (cfg, bb, bbn->code->inst_target_bb);
+ bb->last_ins->inst_target_bb = bbn->code->inst_target_bb;
changed = TRUE;
continue;
}
}
} else if (bb->out_count == 2) {
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- int branch_result = mono_eval_cond_branch (last_ins);
+ if (bb->last_ins && MONO_IS_COND_BRANCH_NOFP (bb->last_ins)) {
+ int branch_result;
MonoBasicBlock *taken_branch_target = NULL, *untaken_branch_target = NULL;
- MonoInst *bbn_code;
+
+ if (cfg->new_ir) {
+ if (bb->last_ins->flags & MONO_INST_CFOLD_TAKEN)
+ branch_result = BRANCH_TAKEN;
+ else if (bb->last_ins->flags & MONO_INST_CFOLD_NOT_TAKEN)
+ branch_result = BRANCH_NOT_TAKEN;
+ else
+ branch_result = BRANCH_UNDEF;
+ }
+ else
+ branch_result = mono_eval_cond_branch (bb->last_ins);
if (branch_result == BRANCH_TAKEN) {
- taken_branch_target = last_ins->inst_true_bb;
- untaken_branch_target = last_ins->inst_false_bb;
+ taken_branch_target = bb->last_ins->inst_true_bb;
+ untaken_branch_target = bb->last_ins->inst_false_bb;
} else if (branch_result == BRANCH_NOT_TAKEN) {
- taken_branch_target = last_ins->inst_false_bb;
- untaken_branch_target = last_ins->inst_true_bb;
+ taken_branch_target = bb->last_ins->inst_false_bb;
+ untaken_branch_target = bb->last_ins->inst_true_bb;
}
if (taken_branch_target) {
/* if mono_eval_cond_branch () is ever taken to handle
* non-constant values to compare, issue a pop here.
*/
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = taken_branch_target;
- mono_unlink_bblock (cfg, bb, untaken_branch_target);
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = taken_branch_target;
+ if (!bb->extended)
+ mono_unlink_bblock (cfg, bb, untaken_branch_target);
changed = TRUE;
continue;
}
- bbn = last_ins->inst_true_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ bbn = bb->last_ins->inst_true_bb;
+ if (bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
if (cfg->verbose_level > 2)
g_print ("cbranch1 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
+ bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num,
+ bbn->code->opcode);
/*
* Unlink, then relink bblocks to avoid various
* tricky situations when the two targets of the branch
* are equal, or will become equal after the change.
*/
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_false_bb);
- last_ins->inst_true_bb = bbn_code->inst_target_bb;
+ bb->last_ins->inst_true_bb = bbn->code->inst_target_bb;
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_false_bb);
changed = TRUE;
continue;
}
- bbn = last_ins->inst_false_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ bbn = bb->last_ins->inst_false_bb;
+ if (bbn && bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
if (cfg->verbose_level > 2)
g_print ("cbranch2 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
+ bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num,
+ bbn->code->opcode);
+
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_false_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
+ bb->last_ins->inst_false_bb = bbn->code->inst_target_bb;
- last_ins->inst_false_bb = bbn_code->inst_target_bb;
+ link_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_false_bb);
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
+ changed = TRUE;
+ continue;
+ }
+ bbn = bb->last_ins->inst_false_bb;
+ /*
+ * If bb is an extended bb, it could contain an inside branch to bbn.
+ * FIXME: Enable the optimization if that is not true.
+ * If bblocks_linked () is true, then merging bb and bbn
+ * would require addition of an extra branch at the end of bbn
+ * slowing down loops.
+ */
+ if (cfg->new_ir && bbn && bb->region == bbn->region && bbn->in_count == 1 && cfg->enable_extended_bblocks && bbn != cfg->bb_exit && !bb->extended && !bbn->out_of_line && !mono_bblocks_linked (bbn, bb)) {
+ g_assert (bbn->in_bb [0] == bb);
+ if (cfg->verbose_level > 2)
+ g_print ("merge false branch target triggered BB%d -> BB%d\n", bb->block_num, bbn->block_num);
+ mono_merge_basic_blocks (cfg, bb, bbn);
changed = TRUE;
continue;
}
}
/* detect and optimize to unsigned compares checks like: if (v < 0 || v > limit */
- if (last_ins && last_ins->opcode == CEE_BLT && last_ins->inst_left->inst_right->opcode == OP_ICONST) {
- if (try_unsigned_compare (cfg, bb, last_ins)) {
- /*g_print ("applied in bb %d (->%d) %s\n", bb->block_num, last_ins->inst_target_bb->block_num, mono_method_full_name (cfg->method, TRUE));*/
+ if (bb->last_ins && bb->last_ins->opcode == CEE_BLT && !cfg->new_ir && bb->last_ins->inst_left->inst_right->opcode == OP_ICONST) {
+ if (try_unsigned_compare (cfg, bb)) {
+ /*g_print ("applied in bb %d (->%d) %s\n", bb->block_num, bb->last_ins->inst_target_bb->block_num, mono_method_full_name (cfg->method, TRUE));*/
changed = TRUE;
continue;
}
}
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- if (last_ins->inst_false_bb->out_of_line && (bb->region == last_ins->inst_false_bb->region)) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_NOFP (bb->last_ins)) {
+ if (bb->last_ins->inst_false_bb && bb->last_ins->inst_false_bb->out_of_line && (bb->region == bb->last_ins->inst_false_bb->region)) {
/* Reverse the branch */
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
- bbn = last_ins->inst_false_bb;
- last_ins->inst_false_bb = last_ins->inst_true_bb;
- last_ins->inst_true_bb = bbn;
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
+ bbn = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bbn;
- move_basic_block_to_end (cfg, last_ins->inst_true_bb);
+ move_basic_block_to_end (cfg, bb->last_ins->inst_true_bb);
if (cfg->verbose_level > 2)
g_print ("cbranch to throw block triggered %d.\n",
bb->block_num);
}
}
} while (changed && (niterations > 0));
-
}
static void
sig = mono_method_signature (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
- cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- cfg->ret->opcode = OP_RETARG;
- cfg->ret->inst_vtype = sig->ret;
- cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ if (cfg->new_ir) {
+ cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
+ /* Inhibit optimizations */
+ cfg->ret->flags |= MONO_INST_VOLATILE;
+ } else {
+ cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ cfg->ret->opcode = OP_RETARG;
+ cfg->ret->inst_vtype = sig->ret;
+ cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ }
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
for (i = 0; i < sig->param_count; ++i) {
cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
if (sig->params [i]->byref) {
- cfg->disable_ssa = TRUE;
+ if (!cfg->new_ir) cfg->disable_ssa = TRUE;
+ }
+ }
+
+ if (cfg->new_ir && cfg->verbose_level > 2) {
+ if (cfg->ret) {
+ printf ("\treturn : ");
+ mono_print_ins (cfg->ret);
+ }
+
+ if (sig->hasthis) {
+ printf ("\tthis: ");
+ mono_print_ins (cfg->args [0]);
+ }
+
+ for (i = 0; i < sig->param_count; ++i) {
+ printf ("\targ [%d]: ", i);
+ mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
+ cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i)
- mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+ cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+
if (cfg->verbose_level > 2)
g_print ("locals done\n");
}
void
-mono_print_code (MonoCompile *cfg)
+mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
+ MonoInst *tree = bb->code;
- if (!MONO_INST_LIST_EMPTY (&bb->ins_list))
- g_print ("CODE BLOCK %d (nesting %d):\n",
- bb->block_num, bb->nesting);
+ if (cfg->new_ir) {
+ mono_print_bb (bb, msg);
+ } else {
+ if (!tree)
+ continue;
+
+ g_print ("%s CODE BLOCK %d (nesting %d):\n", msg, bb->block_num, bb->nesting);
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
}
cfg->rs = mono_regstate_new ();
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins = mono_inst_list_last (&bb->ins_list);
-
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins) &&
- bb->next_bb != last_ins->inst_false_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
/* we are careful when inverting, since bugs like #59580
* could show up when dealing with NaNs.
*/
- if (MONO_IS_COND_BRANCH_NOFP(last_ins) && bb->next_bb == last_ins->inst_true_bb) {
- MonoBasicBlock *tmp = last_ins->inst_true_bb;
- last_ins->inst_true_bb = last_ins->inst_false_bb;
- last_ins->inst_false_bb = tmp;
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
} else {
- MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- inst->opcode = OP_BR;
- inst->inst_target_bb = last_ins->inst_false_bb;
- mono_bblock_add_inst (bb, inst);
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ ins->inst_target_bb = bb->last_ins->inst_false_bb;
+ MONO_ADD_INS (bb, ins);
}
}
}
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 4) {
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
- g_print ("DUMP BLOCK %d:\n", bb->block_num);
-
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
- }
#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree, *n;
- MonoInstList head;
+ MonoInst *tree = bb->code, *next;
MBState *mbstate;
- MONO_INST_LIST_INIT (&head);
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
+ if (!tree)
continue;
- MONO_INST_LIST_SPLICE_INIT (&bb->ins_list, &head);
+ bb->code = NULL;
+ bb->last_ins = NULL;
cfg->cbb = bb;
mono_regstate_reset (cfg->rs);
if (cfg->verbose_level >= 3)
g_print ("LABEL BLOCK %d:\n", bb->block_num);
#endif
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (tree, n, &head, node) {
+ for (; tree; tree = next) {
+ next = tree->next;
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 3) {
mono_print_tree (tree);
}
bb->max_vreg = cfg->rs->next_vreg;
+ if (bb->last_ins)
+ bb->last_ins->next = NULL;
+
mono_mempool_empty (cfg->state_pool);
}
mono_mempool_destroy (cfg->state_pool);
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
#ifdef MONO_ARCH_ENABLE_NORMALIZE_OPCODES
- mono_normalize_opcodes (cfg, bb);
+ if (!cfg->new_ir)
+ mono_normalize_opcodes (cfg, bb);
#endif
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- mono_local_regalloc (cfg, bb);
+ if (!cfg->globalra)
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
- mono_arch_output_basic_block (cfg, bb);
+ //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
+ mono_arch_output_basic_block (cfg, bb);
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
mono_domain_unlock (cfg->domain);
}
- if (!cfg->compile_aot)
+ if (!cfg->compile_aot && !cfg->new_ir)
/* In the aot case, the patch already points to the correct location */
patch_info->ip.i = patch_info->ip.label->inst_c0;
for (i = 0; i < patch_info->data.table->table_size; i++) {
- table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ /* Might be NULL if the switch is eliminated */
+ if (patch_info->data.table->table [i]) {
+ g_assert (patch_info->data.table->table [i]->native_offset);
+ table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ } else {
+ table [i] = NULL;
+ }
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
#endif
}
-
-
-static void
-remove_critical_edges (MonoCompile *cfg) {
+void
+mono_remove_critical_edges (MonoCompile *cfg)
+{
MonoBasicBlock *bb;
MonoBasicBlock *previous_bb;
if (cfg->verbose_level > 3) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
int i;
- printf ("remove_critical_edges %s, BEFORE BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
+ printf ("remove_critical_edges, BEFORE BB%d (in:", bb->block_num);
for (i = 0; i < bb->in_count; i++) {
printf (" %d", bb->in_bb [i]->block_num);
}
printf (" %d", bb->out_bb [i]->block_num);
}
printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
+ if (bb->last_ins != NULL) {
printf (" ");
- mono_print_tree (last_ins);
+ mono_print_tree (bb->last_ins);
}
printf ("\n");
}
MonoBasicBlock *in_bb = bb->in_bb [in_bb_index];
if (in_bb->out_count > 1) {
MonoBasicBlock *new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb->ins_list);
new_bb->block_num = cfg->num_bblocks++;
// new_bb->real_offset = bb->real_offset;
new_bb->region = bb->region;
/* Do not alter the CFG while altering the BB list */
if (previous_bb->region == bb->region) {
if (previous_bb != cfg->bb_entry) {
- MonoInst *last_ins;
/* If previous_bb "followed through" to bb, */
/* keep it linked with a OP_BR */
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
- if ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH))) {
+ if ((previous_bb->last_ins == NULL) ||
+ ((previous_bb->last_ins->opcode != OP_BR) &&
+ (! (MONO_IS_COND_BRANCH_OP (previous_bb->last_ins))) &&
+ (previous_bb->last_ins->opcode != OP_SWITCH))) {
int i;
/* Make sure previous_bb really falls through bb */
for (i = 0; i < previous_bb->out_count; i++) {
/* put a new BB in the middle to hold the OP_BR */
MonoInst *jump;
MonoBasicBlock *new_bb_after_entry = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb_after_entry->ins_list);
new_bb_after_entry->block_num = cfg->num_bblocks++;
// new_bb_after_entry->real_offset = bb->real_offset;
new_bb_after_entry->region = bb->region;
previous_bb = new_bb_after_entry;
if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, added helper BB%d jumping to BB%d\n", mono_method_full_name (cfg->method, TRUE), new_bb_after_entry->block_num, bb->block_num);
+ printf ("remove_critical_edges, added helper BB%d jumping to BB%d\n", new_bb_after_entry->block_num, bb->block_num);
}
}
}
replace_in_block (bb, in_bb, new_bb);
if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, removed critical edge from BB%d to BB%d (added BB%d)\n", mono_method_full_name (cfg->method, TRUE), in_bb->block_num, bb->block_num, new_bb->block_num);
+ printf ("remove_critical_edges, removed critical edge from BB%d to BB%d (added BB%d)\n", in_bb->block_num, bb->block_num, new_bb->block_num);
}
}
}
if (cfg->verbose_level > 3) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
int i;
- printf ("remove_critical_edges %s, AFTER BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
+ printf ("remove_critical_edges, AFTER BB%d (in:", bb->block_num);
for (i = 0; i < bb->in_count; i++) {
printf (" %d", bb->in_bb [i]->block_num);
}
printf (" %d", bb->out_bb [i]->block_num);
}
printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
+ if (bb->last_ins != NULL) {
printf (" ");
- mono_print_tree (last_ins);
+ mono_print_tree (bb->last_ins);
}
printf ("\n");
}
guint8 *ip;
MonoCompile *cfg;
MonoJitInfo *jinfo;
- int dfn = 0, i, code_size_ratio;
+ int dfn, i, code_size_ratio;
gboolean deadce_has_run = FALSE;
gboolean try_generic_shared;
MonoMethod *method_to_compile, *method_to_register;
return cfg;
}
+ if (getenv ("MONO_VERBOSE_METHOD")) {
+ if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0)
+ cfg->verbose_level = 4;
+ }
+
ip = (guint8 *)header->code;
+ cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
+
if (cfg->verbose_level > 2) {
if (cfg->generic_sharing_context)
g_print ("converting shared method %s\n", mono_method_full_name (method, TRUE));
g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
}
+ if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ cfg->opt |= MONO_OPT_SSA;
+
+ {
+ static int count = 0;
+
+ count ++;
+
+ if (getenv ("MONO_COUNT")) {
+ if (count == atoi (getenv ("MONO_COUNT"))) {
+ printf ("LAST: %s\n", mono_method_full_name (method, TRUE));
+ //cfg->verbose_level = 5;
+ }
+ if (count <= atoi (getenv ("MONO_COUNT")))
+ cfg->new_ir = TRUE;
+
+ /*
+ * Passing/returning vtypes in registers in managed methods is an ABI change
+ * from the old JIT.
+ */
+ disable_vtypes_in_regs = TRUE;
+ }
+ else
+ cfg->new_ir = TRUE;
+ }
+
+ /*
+ if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
+ cfg->globalra = TRUE;
+ */
+
+ //cfg->globalra = TRUE;
+
+ //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
+ // cfg->globalra = TRUE;
+
+ {
+ static int count = 0;
+ count ++;
+
+ if (getenv ("COUNT2")) {
+ if (count == atoi (getenv ("COUNT2")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT2")))
+ cfg->globalra = FALSE;
+ }
+ }
+
+ if (header->clauses)
+ cfg->globalra = FALSE;
+
+ if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
+ /* The code in the prolog clobbers caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME: Disable globalra in case of tracing/profiling
+
+ if (cfg->method->save_lmf)
+ /* The LMF saving code might clobber caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME:
+ if (!strcmp (cfg->method->name, "CompareInternal"))
+ cfg->globalra = FALSE;
+
+ /*
+ if (strstr (cfg->method->name, "LoadData"))
+ cfg->new_ir = FALSE;
+ */
+
+ if (cfg->new_ir) {
+ cfg->rs = mono_regstate_new ();
+ cfg->next_vreg = cfg->rs->next_vreg;
+ }
+
+ /* FIXME: Fix SSA to handle branches inside bblocks */
+ if (cfg->opt & MONO_OPT_SSA)
+ cfg->enable_extended_bblocks = FALSE;
+
+ /*
+ * FIXME: This confuses liveness analysis because variables which are assigned after
+ * a branch inside a bblock become part of the kill set, even though the assignment
+ * might not get executed. This causes the optimize_initlocals pass to delete some
+ * assignments which are needed.
+ * Also, the mono_if_conversion pass needs to be modified to recognize the code
+ * created by this.
+ */
+ //cfg->enable_extended_bblocks = TRUE;
+
/*
* create MonoInst* which represents arguments and local variables
*/
mono_compile_create_vars (cfg);
- if ((i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE)) < 0) {
+ if (cfg->new_ir) {
+ /* SSAPRE is not supported on linear IR */
+ cfg->opt &= ~MONO_OPT_SSAPRE;
+
+ i = mono_method_to_ir2 (cfg, method_to_compile, NULL, NULL, NULL, NULL, NULL, 0, FALSE);
+ }
+ else {
+ i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE);
+ }
+
+ if (i < 0) {
if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
if (compile_aot) {
if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ())
mono_jit_stats.basic_blocks += cfg->num_bblocks;
mono_jit_stats.max_basic_blocks = MAX (cfg->num_bblocks, mono_jit_stats.max_basic_blocks);
- if ((cfg->num_varinfo > 2000) && !cfg->compile_aot) {
- /*
- * we disable some optimizations if there are too many variables
- * because JIT time may become too expensive. The actual number needs
- * to be tweaked and eventually the non-linear algorithms should be fixed.
- */
- cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
- cfg->disable_ssa = TRUE;
- }
-
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
+ if (cfg->new_ir) {
+ mono_decompose_long_opts (cfg);
+
+ /* Should be done before branch opts */
+ if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
+ mono_local_cprop2 (cfg);
+ }
+
if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ mono_optimize_branches (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- remove_critical_edges (cfg);
+ if (cfg->new_ir) {
+ /* This must be done _before_ global reg alloc and _after_ decompose */
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ mono_if_conversion (cfg);
}
+ if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
+ mono_remove_critical_edges (cfg);
+
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+ dfn = 0;
df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
if (cfg->num_bblocks != dfn + 1) {
MonoBasicBlock *bb;
cfg->num_bblocks = dfn + 1;
- if (!header->clauses) {
- /* remove unreachable code, because the code in them may be
- * inconsistent (access to dead variables for example) */
- for (bb = cfg->bb_entry; bb;) {
- MonoBasicBlock *bbn = bb->next_bb;
-
- if (bbn && bbn->region == -1 && !bbn->dfn) {
- if (cfg->verbose_level > 1)
- g_print ("found unreachable code in BB%d\n", bbn->block_num);
- bb->next_bb = bbn->next_bb;
- nullify_basic_block (bbn);
- } else {
- bb = bb->next_bb;
- }
+ /* remove unreachable code, because the code in them may be
+ * inconsistent (access to dead variables for example) */
+ for (bb = cfg->bb_entry; bb;) {
+ MonoBasicBlock *bbn = bb->next_bb;
+
+ /*
+ * FIXME: Can't use the second case in methods with clauses, since the
+ * bblocks inside the clauses are not processed during dfn computation.
+ */
+ if ((header->clauses && (bbn && bbn->region == -1 && bbn->in_count == 0)) ||
+ (!header->clauses && (bbn && bbn->region == -1 && !bbn->dfn))) {
+ if (cfg->verbose_level > 1)
+ g_print ("found unreachable code in BB%d\n", bbn->block_num);
+ /* There may exist unreachable branches to this bb */
+ bb->next_bb = bbn->next_bb;
+ nullify_basic_block (bbn);
+ } else {
+ bb = bb->next_bb;
}
}
}
+ if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
+ /*
+ * we disable some optimizations if there are too many variables
+ * because JIT time may become too expensive. The actual number needs
+ * to be tweaked and eventually the non-linear algorithms should be fixed.
+ */
+ cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
+ cfg->disable_ssa = TRUE;
+ }
+
if (cfg->opt & MONO_OPT_LOOP) {
mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM);
mono_compute_natural_loops (cfg);
if (!header->num_clauses && !cfg->disable_ssa) {
mono_local_cprop (cfg);
+
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
}
#else
-
- /* fixme: add all optimizations which requires SSA */
- if (cfg->opt & (MONO_OPT_SSA | MONO_OPT_ABCREM | MONO_OPT_SSAPRE)) {
+ if (cfg->opt & MONO_OPT_SSA) {
if (!(cfg->comp_done & MONO_COMP_SSA) && !header->num_clauses && !cfg->disable_ssa) {
- mono_local_cprop (cfg);
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
if (cfg->verbose_level >= 2) {
if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
if (cfg->comp_done & MONO_COMP_SSA) {
#ifndef DISABLE_SSA
- mono_ssa_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_cprop2 (cfg);
+ else
+ mono_ssa_cprop (cfg);
#endif
} else {
- mono_local_cprop (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
}
}
#ifndef DISABLE_SSA
if (cfg->comp_done & MONO_COMP_SSA) {
- //mono_ssa_deadce (cfg);
-
//mono_ssa_strength_reduction (cfg);
if (cfg->opt & MONO_OPT_SSAPRE) {
mono_perform_ssapre (cfg);
//mono_local_cprop (cfg);
}
-
+
if (cfg->opt & MONO_OPT_DEADCE) {
- mono_ssa_deadce (cfg);
+ if (cfg->new_ir)
+ mono_ssa_deadce2 (cfg);
+ else
+ mono_ssa_deadce (cfg);
deadce_has_run = TRUE;
}
-
- if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
- mono_perform_abc_removal (cfg);
-
- mono_ssa_remove (cfg);
- if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ if (cfg->new_ir) {
+ if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal2 (cfg);
+ } else {
+ if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal (cfg);
+ }
+
+ if (cfg->new_ir) {
+ mono_ssa_remove2 (cfg);
+ mono_local_cprop2 (cfg);
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ }
+ else
+ mono_ssa_remove (cfg);
+
+ if (cfg->opt & MONO_OPT_BRANCH) {
+ MonoBasicBlock *bb;
+
+ mono_optimize_branches (cfg);
+
+ /* Have to recompute cfg->bblocks and bb->dfn */
+ if (cfg->globalra) {
+ mono_remove_critical_edges (cfg);
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ bb->dfn = 0;
+
+ /* Depth-first ordering on basic blocks */
+ cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+
+ dfn = 0;
+ df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
+ cfg->num_bblocks = dfn + 1;
+ }
+ }
}
#endif
return cfg;
}
- if (cfg->verbose_level > 4) {
- printf ("BEFORE DECOMPSE START\n");
- mono_print_code (cfg);
- printf ("BEFORE DECOMPSE END\n");
+ if (cfg->new_ir) {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ mono_handle_soft_float (cfg);
+#endif
+ mono_decompose_vtype_opts (cfg);
+ if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
+ mono_decompose_array_access_opts (cfg);
+ }
+
+ if (!cfg->new_ir) {
+ if (cfg->verbose_level > 4)
+ mono_print_code (cfg, "BEFORE DECOMPOSE");
+
+ decompose_pass (cfg);
}
-
- decompose_pass (cfg);
if (cfg->got_var) {
GList *regs;
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->opt & MONO_OPT_LINEARS) {
+ if (cfg->globalra) {
+ MonoBasicBlock *bb;
+
+ /* Have to do this before regalloc since it can create vregs */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_arch_lowering_pass (cfg, bb);
+
+ mono_global_regalloc (cfg);
+ }
+
+ if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
GList *vars, *regs;
/* For now, compute aliasing info only if needed for deadce... */
- if ((cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
+ if (!cfg->new_ir && (cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
cfg->aliasing_info = mono_build_aliasing_information (cfg);
}
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- mono_arch_allocate_vars (cfg);
+ if (!cfg->globalra)
+ mono_arch_allocate_vars (cfg);
- if (cfg->opt & MONO_OPT_CFOLD)
+ if (!cfg->new_ir && cfg->opt & MONO_OPT_CFOLD)
mono_constant_fold (cfg);
- mini_select_instructions (cfg);
+ if (cfg->new_ir) {
+ MonoBasicBlock *bb;
+ gboolean need_local_opts;
+
+ if (!cfg->globalra) {
+ mono_spill_global_vars (cfg, &need_local_opts);
+
+ if (need_local_opts || cfg->compile_aot) {
+ /* To optimize code created by spill_global_vars */
+ mono_local_cprop2 (cfg);
+ mono_local_deadce (cfg);
+ }
+ }
+
+ /* Add branches between non-consecutive bblocks */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
+ /* we are careful when inverting, since bugs like #59580
+ * could show up when dealing with NaNs.
+ */
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
+
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
+ } else {
+ MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ inst->opcode = OP_BR;
+ inst->inst_target_bb = bb->last_ins->inst_false_bb;
+ mono_bblock_add_inst (bb, inst);
+ }
+ }
+ }
+
+ if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree = bb->code;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ if (!tree)
+ continue;
+ for (; tree; tree = tree->next) {
+ mono_print_ins_index (-1, tree);
+ }
+ }
+ }
+
+ /* FIXME: */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ bb->max_vreg = cfg->next_vreg;
+ }
+ }
+ else
+ mini_select_instructions (cfg);
mono_codegen (cfg);
if (cfg->verbose_level >= 2) {
mono_register_opcode_emulation (CEE_DIV_UN, "__emul_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
mono_register_opcode_emulation (CEE_REM, "__emul_irem", "int32 int32 int32", mono_irem, FALSE);
mono_register_opcode_emulation (CEE_REM_UN, "__emul_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
+ mono_register_opcode_emulation (OP_IDIV, "__emul_op_idiv", "int32 int32 int32", mono_idiv, FALSE);
+ mono_register_opcode_emulation (OP_IDIV_UN, "__emul_op_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
+ mono_register_opcode_emulation (OP_IREM, "__emul_op_irem", "int32 int32 int32", mono_irem, FALSE);
+ mono_register_opcode_emulation (OP_IREM_UN, "__emul_op_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_MUL_DIV
mono_register_opcode_emulation (CEE_MUL_OVF, "__emul_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
mono_register_opcode_emulation (CEE_MUL_OVF_UN, "__emul_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
mono_register_opcode_emulation (CEE_MUL, "__emul_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL, "__emul_op_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL_OVF, "__emul_op_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
+ mono_register_opcode_emulation (OP_IMUL_OVF_UN, "__emul_op_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT)
mono_register_opcode_emulation (OP_FDIV, "__emul_fdiv", "double double double", mono_fdiv, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_CONV_R8_UN
mono_register_opcode_emulation (CEE_CONV_R_UN, "__emul_conv_r_un", "double int32", mono_conv_to_r8_un, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R_UN, "__emul_iconv_to_r_un", "double int32", mono_conv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_LCONV_TO_R8
mono_register_opcode_emulation (OP_LCONV_TO_R8, "__emul_lconv_to_r8", "double long", mono_lconv_to_r8, FALSE);
mono_register_opcode_emulation (OP_FMUL, "__emul_fmul", "double double double", mono_fmul, FALSE);
mono_register_opcode_emulation (OP_FNEG, "__emul_fneg", "double double", mono_fneg, FALSE);
mono_register_opcode_emulation (CEE_CONV_R8, "__emul_conv_r8", "double int32", mono_conv_to_r8, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R8, "__emul_iconv_to_r8", "double int32", mono_conv_to_r8, FALSE);
mono_register_opcode_emulation (CEE_CONV_R4, "__emul_conv_r4", "double int32", mono_conv_to_r4, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R4, "__emul_iconv_to_r4", "double int32", mono_conv_to_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_R4, "__emul_fconv_to_r4", "double double", mono_fconv_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I1, "__emul_fconv_to_i1", "int8 double", mono_fconv_i1, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I2, "__emul_fconv_to_i2", "int16 double", mono_fconv_i2, FALSE);
g_print ("Allocated code size: %ld\n", mono_jit_stats.allocated_code_size);
g_print ("Inlineable methods: %ld\n", mono_jit_stats.inlineable_methods);
g_print ("Inlined methods: %ld\n", mono_jit_stats.inlined_methods);
+ g_print ("Regvars: %ld\n", mono_jit_stats.regvars);
g_print ("Locals stack size: %ld\n", mono_jit_stats.locals_stack_size);
g_print ("\nCreated object count: %ld\n", mono_stats.new_object_count);
#define MONO_INST_NEW(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst)); \
(dest)->opcode = (op); \
+ (dest)->dreg = (dest)->sreg1 = (dest)->sreg2 = -1; \
+ (dest)->cil_code = (cfg)->ip; \
(dest)->cil_code = (cfg)->ip; \
} while (0)
#define MONO_INST_NEW_CALL(cfg,dest,op) do { \
(dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoCallInst)); \
(dest)->inst.opcode = (op); \
- (dest)->inst.cil_code = (cfg)->ip; \
- MONO_INST_LIST_INIT (&(dest)->out_args); \
+ (dest)->inst.dreg = (dest)->inst.sreg1 = (dest)->inst.sreg2 = -1; \
+ (dest)->inst.cil_code = (cfg)->ip; \
} while (0)
#define MONO_INST_NEW_CALL_ARG(cfg,dest,op) do { \
(dest)->ins.opcode = (op); \
} while (0)
+#define MONO_ADD_INS(b,inst) do { \
+ if ((b)->last_ins) { \
+ (b)->last_ins->next = (inst); \
+ (inst)->prev = (b)->last_ins; \
+ (b)->last_ins = (inst); \
+ } else { \
+ (b)->code = (b)->last_ins = (inst); \
+ } \
+ } while (0)
+
#define NULLIFY_INS(ins) do { \
(ins)->opcode = OP_NOP; \
(ins)->dreg = (ins)->sreg1 = (ins)->sreg2 = -1; \
(ins)->ssa_op = MONO_SSA_NOP; \
} while (0)
+/* Remove INS from BB */
+#define MONO_REMOVE_INS(bb,ins) do { \
+ if ((ins)->prev) \
+ (ins)->prev->next = (ins)->next; \
+ if ((ins)->next) \
+ (ins)->next->prev = (ins)->prev; \
+ if ((bb)->code == (ins)) \
+ (bb)->code = (ins)->next; \
+ if ((bb)->last_ins == (ins)) \
+ (bb)->last_ins = (ins)->prev; \
+ } while (0)
+
+/* Remove INS from BB and nullify it */
+#define MONO_DELETE_INS(bb,ins) do { \
+ MONO_REMOVE_INS ((bb), (ins)); \
+ NULLIFY_INS ((ins)); \
+ } while (0)
+
/*
* this is used to determine when some branch optimizations are possible: we exclude FP compares
* because they have weird semantics with NaNs.
*/
#define MONO_IS_COND_BRANCH_OP(ins) (((ins)->opcode >= CEE_BEQ && (ins)->opcode <= CEE_BLT_UN) || ((ins)->opcode >= OP_LBEQ && (ins)->opcode <= OP_LBLT_UN) || ((ins)->opcode >= OP_FBEQ && (ins)->opcode <= OP_FBLT_UN) || ((ins)->opcode >= OP_IBEQ && (ins)->opcode <= OP_IBLT_UN))
-#define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && (ins)->inst_left->inst_left->type != STACK_R8)
+#define MONO_IS_COND_BRANCH_NOFP(ins) (MONO_IS_COND_BRANCH_OP(ins) && !(((ins)->opcode >= OP_FBEQ) && ((ins)->opcode <= OP_FBLT_UN)) && (!(ins)->inst_left || (ins)->inst_left->inst_left->type != STACK_R8))
#define MONO_IS_BRANCH_OP(ins) (MONO_IS_COND_BRANCH_OP(ins) || ((ins)->opcode == OP_BR) || ((ins)->opcode == OP_BR_REG) || ((ins)->opcode == OP_SWITCH))
+#define MONO_IS_COND_EXC(ins) ((((ins)->opcode >= OP_COND_EXC_EQ) && ((ins)->opcode <= OP_COND_EXC_LT_UN)) || (((ins)->opcode >= OP_COND_EXC_IEQ) && ((ins)->opcode <= OP_COND_EXC_ILT_UN)))
+
+#define MONO_IS_SETCC(ins) ((((ins)->opcode >= OP_CEQ) && ((ins)->opcode <= OP_CLT_UN)) || (((ins)->opcode >= OP_ICEQ) && ((ins)->opcode <= OP_ICLT_UN)) || (((ins)->opcode >= OP_LCEQ) && ((ins)->opcode <= OP_LCLT_UN)) || (((ins)->opcode >= OP_FCEQ) && ((ins)->opcode <= OP_FCLT_UN)))
+
+#define MONO_IS_PHI(ins) (((ins)->opcode == OP_PHI) || ((ins)->opcode == OP_FPHI) || ((ins)->opcode == OP_VPHI))
+#define MONO_IS_MOVE(ins) (((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_FMOVE) || ((ins)->opcode == OP_VMOVE))
+
+#define MONO_IS_LOAD_MEMBASE(ins) (((ins)->opcode >= OP_LOAD_MEMBASE) && ((ins)->opcode <= OP_LOADV_MEMBASE))
+#define MONO_IS_STORE_MEMBASE(ins) (((ins)->opcode >= OP_STORE_MEMBASE_REG) && ((ins)->opcode <= OP_STOREV_MEMBASE))
+#define MONO_IS_STORE_MEMINDEX(ins) (((ins)->opcode >= OP_STORE_MEMINDEX) && ((ins)->opcode <= OP_STORER8_MEMINDEX))
+
+#define MONO_IS_CALL(ins) (((ins->opcode >= OP_VOIDCALL) && (ins->opcode <= OP_VOIDCALL_MEMBASE)) || ((ins->opcode >= OP_FCALL) && (ins->opcode <= OP_FCALL_MEMBASE)) || ((ins->opcode >= OP_LCALL) && (ins->opcode <= OP_LCALL_MEMBASE)) || ((ins->opcode >= OP_VCALL) && (ins->opcode <= OP_VCALL_MEMBASE)) || ((ins->opcode >= OP_CALL) && (ins->opcode <= OP_CALL_MEMBASE)) || ((ins->opcode >= OP_VCALL2) && (ins->opcode <= OP_VCALL2_MEMBASE)) || (ins->opcode == OP_TAILCALL))
+
+/* FIXME: Handle OP_GOT_ENTRY too */
+#define MONO_IS_JUMP_TABLE(ins) (((ins)->opcode == OP_JUMP_TABLE) ? TRUE : ((((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH)) ? TRUE : ((ins)->opcode == OP_SWITCH) ? TRUE : FALSE))
+
+#define MONO_JUMP_TABLE_FROM_INS(ins) (((ins)->opcode == OP_JUMP_TABLE) ? (ins)->inst_p0 : (((ins)->opcode == OP_AOTCONST) && (ins->inst_i1 == (gpointer)MONO_PATCH_INFO_SWITCH) ? (ins)->inst_p0 : (((ins)->opcode == OP_SWITCH) ? (ins)->inst_p0 : NULL)))
+
+/* FIXME: Add more instructions */
+#define MONO_INS_HAS_NO_SIDE_EFFECT(ins) (MONO_IS_MOVE (ins) || (ins->opcode == OP_ICONST) || (ins->opcode == OP_I8CONST) || (ins->opcode == OP_VZERO) || (ins->opcode == OP_ADD_IMM) || (ins->opcode == OP_R8CONST) || (ins->opcode == OP_LADD_IMM) || (ins->opcode == OP_ISUB_IMM) || (ins->opcode == OP_IADD_IMM) || (ins->opcode == OP_INEG) || (ins->opcode == OP_LNEG) || (ins->opcode == OP_ISUB) || (ins->opcode == OP_CMOV_IGE) || (ins->opcode == OP_ISHL_IMM) || (ins->opcode == OP_ISHR_IMM) || (ins->opcode == OP_ISHR_UN_IMM) || (ins->opcode == OP_IAND_IMM) || (ins->opcode == OP_ICONV_TO_U1) || (ins->opcode == OP_ICONV_TO_I1) || (ins->opcode == OP_SEXT_I4) || (ins->opcode == OP_LCONV_TO_U1) || (ins->opcode == OP_ICONV_TO_U2) || (ins->opcode == OP_ICONV_TO_I2) || (ins->opcode == OP_LCONV_TO_I2))
+
typedef struct MonoInstList MonoInstList;
typedef struct MonoInst MonoInst;
typedef struct MonoCallInst MonoCallInst;
extern MonoMethodDesc *mono_break_at_bb_method;
extern int mono_break_at_bb_bb_num;
extern gboolean check_for_pending_exc;
+extern gboolean disable_vtypes_in_regs;
extern gboolean mono_verify_all;
-struct MonoInstList {
- struct MonoInstList *next, *prev;
-};
+#define INS_INFO(opcode) (&ins_info [((opcode) - OP_START - 1) * 3])
-#define MONO_INST_LIST_INIT(list) do { \
- (list)->next = (list); \
- (list)->prev = (list); \
- } while (0)
+extern const char ins_info[];
-static inline int
-MONO_INST_LIST_EMPTY (MonoInstList *list)
-{
- return list->next == list;
-}
+#define MONO_BB_FOR_EACH_INS(bb, ins) for ((ins) = (bb)->code; (ins); (ins) = (ins)->next)
-static inline void
-__MONO_INST_LIST_ADD (MonoInstList *new, MonoInstList *prev, MonoInstList *next)
-{
- next->prev = new;
- new->next = next;
- new->prev = prev;
- prev->next = new;
-}
+#define MONO_BB_FOR_EACH_INS_SAFE(bb, n, ins) for ((ins) = (bb)->code, n = (ins) ? (ins)->next : NULL; (ins); (ins) = (n), (n) = (ins) ? (ins)->next : NULL)
+#define MONO_BB_FOR_EACH_INS_REVERSE_SAFE(bb, p, ins) for ((ins) = (bb)->last_ins, p = (ins) ? (ins)->prev : NULL; (ins); (ins) = (p), (p) = (ins) ? (ins)->prev : NULL)
-static inline void
-MONO_INST_LIST_ADD (MonoInstList *new, MonoInstList *head)
-{
- __MONO_INST_LIST_ADD (new, head, head->next);
-}
+#define mono_bb_first_ins(bb) (bb)->code
+
+#if 0
static inline void
MONO_INST_LIST_ADD_TAIL (MonoInstList *new, MonoInstList *head)
#define MONO_BB_FOR_EACH_INS_REVERSE(bb, ins) MONO_INST_LIST_FOR_EACH_ENTRY_REVERSE ((ins), &((bb)->ins_list), node)
+#endif
+
struct MonoEdge {
MonoEdge *next;
MonoBasicBlock *bb;
};
/*
- * The IR-level basic block.
+ * The IR-level extended basic block.
*
* A basic block can have multiple exits just fine, as long as the point of
* 'departure' is the last instruction in the basic block. Extended basic
* at the beginning of the block, never in the middle.
*/
struct MonoBasicBlock {
- MonoInstList ins_list;
+ MonoInst *last_ins;
/* the next basic block in the order it appears in IL */
MonoBasicBlock *next_bb;
+ /*
+ * Before instruction selection it is the first tree in the
+ * forest and the first item in the list of trees. After
+ * instruction selection it is the first instruction and the
+ * first item in the list of instructions.
+ */
+ MonoInst *code;
+
/* unique block number identification */
gint32 block_num;
guint out_of_line : 1;
/* Caches the result of uselessness calculation during optimize_branches */
guint not_useless : 1;
-
+ /* Whenever the decompose_array_access_opts () pass needs to process this bblock */
+ guint has_array_access : 1;
+ /* Whenever this bblock is extended, ie. it has branches inside it */
+ guint extended : 1;
+
/* use for liveness analysis */
MonoBitSet *gen_set;
MonoBitSet *kill_set;
} MonoMemcpyArgs;
struct MonoInst {
- MonoInstList node; /* this must be the first field in this struct */
+ guint16 opcode;
+ guint8 type; /* stack type */
+ guint ssa_op : 3;
+ guint8 flags : 5;
+
+ /* used by the register allocator */
+ gint32 dreg, sreg1, sreg2;
+
+ MonoInst *next, *prev;
union {
union {
gint64 i8const;
double r8const;
} data;
- guint16 opcode;
- guint8 type; /* stack type */
- guint ssa_op : 3;
- guint8 flags : 5;
-
- /* used by the register allocator */
- gint32 dreg, sreg1, sreg2;
+
+ const unsigned char* cil_code; /* for debugging and bblock splitting */
+
/* used mostly by the backend to store additional info it may need */
union {
gint32 reg3;
gint32 arg_info;
gint32 size;
MonoMemcpyArgs *memcpy_args; /* in OP_MEMSET and OP_MEMCPY */
+ gpointer data;
gint shift_amount;
gboolean is_pinvoke; /* for variables in the unmanaged marshal format */
gboolean record_cast_details; /* For CEE_CASTCLASS */
- gpointer data;
} backend;
MonoClass *klass;
- const unsigned char* cil_code; /* for debugging and bblock splitting */
};
-static inline void
-MONO_ADD_INS (MonoBasicBlock *bb, MonoInst *inst)
-{
- MONO_INST_LIST_ADD_TAIL (&inst->node, &bb->ins_list);
-}
-
-static inline void
-MONO_DELETE_INS (MonoBasicBlock *bb, MonoInst *inst)
-{
- __MONO_INST_LIST_DEL (inst->node.prev, inst->node.next);
-}
-
-static inline MonoInst *
-mono_inst_list_first (MonoInstList *head)
-{
- if (MONO_INST_LIST_EMPTY (head))
- return NULL;
- return MONO_INST_LIST_FIRST_ENTRY (head, MonoInst, node);
-}
-
-static inline MonoInst *
-mono_inst_list_last (MonoInstList *head)
-{
- if (MONO_INST_LIST_EMPTY (head))
- return NULL;
- return MONO_INST_LIST_LAST_ENTRY (head, MonoInst, node);
-}
-
-static inline MonoInst *
-mono_inst_list_next (MonoInstList *ins, MonoInstList *head)
-{
- if (ins->next == head)
- return NULL;
- return MONO_INST_LIST_ENTRY (ins->next, MonoInst, node);
-}
-
-static inline MonoInst *
-mono_inst_list_prev (MonoInstList *ins, MonoInstList *head)
-{
- if (ins->prev == head)
- return NULL;
- return MONO_INST_LIST_ENTRY (ins->prev, MonoInst, node);
-}
-
struct MonoCallInst {
MonoInst inst;
MonoMethodSignature *signature;
MonoMethod *method;
MonoInst **args;
- MonoInstList out_args;
+ MonoInst *out_args;
+ MonoInst *vret_var;
gconstpointer fptr;
guint stack_usage;
- gboolean virtual;
+ guint virtual : 1;
+ guint tail_call : 1;
+ /*
+ * If this is true, then the call returns a vtype in a register using the same
+ * calling convention as OP_CALL.
+ */
+ guint vret_in_reg : 1;
regmask_t used_iregs;
regmask_t used_fregs;
GSList *out_ireg_args;
MONO_INST_BRLABEL = 4,
MONO_INST_NOTYPECHECK = 4,
MONO_INST_UNALIGNED = 8,
+ MONO_INST_CFOLD_TAKEN = 8, /* On branches */
+ MONO_INST_CFOLD_NOT_TAKEN = 16, /* On branches */
MONO_INST_DEFINITION_HAS_SIDE_EFFECTS = 8,
/* the address of the variable has been taken */
MONO_INST_INDIRECT = 16,
MonoPosition first_use, last_use;
} MonoLiveRange;
+typedef struct MonoLiveRange2 MonoLiveRange2;
+
+struct MonoLiveRange2 {
+ int from, to;
+ MonoLiveRange2 *next;
+};
+
+typedef struct {
+ /* List of live ranges sorted by 'from' */
+ MonoLiveRange2 *range;
+ MonoLiveRange2 *last_range;
+} MonoLiveInterval;
+
/*
* Additional information about a variable
*/
struct MonoMethodVar {
guint idx; /* inside cfg->varinfo, cfg->vars */
- guint last_name;
- MonoBitSet *dfrontier;
MonoLiveRange range; /* generated by liveness analysis */
+ MonoLiveInterval *interval; /* generated by liveness analysis */
int reg; /* != -1 if allocated into a register */
int spill_costs;
MonoBitSet *def_in; /* used by SSA */
#define MONO_BBLOCK_IS_IN_REGION(bblock, regtype) (((bblock)->region & (0xf << 4)) == (regtype))
+#define get_vreg_to_inst(cfg, vreg) ((vreg) < (cfg)->vreg_to_inst_len ? (cfg)->vreg_to_inst [(vreg)] : NULL)
+
+#define vreg_is_volatile(cfg, vreg) (G_UNLIKELY (get_vreg_to_inst ((cfg), (vreg)) && (get_vreg_to_inst ((cfg), (vreg))->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))))
+
/*
* Control Flow Graph and compilation unit information
*/
MonoMethod *inlined_method; /* the method which is currently inlined */
MonoInst *domainvar; /* a cache for the current domain */
MonoInst *got_var; /* Global Offset Table variable */
+ MonoInst **locals;
MonoInst *rgctx_var; /* Runtime generic context variable (for static generic methods) */
MonoInst **args;
+ MonoMethod *current_method; /* The method currently processed by method_to_ir () */
+ MonoGenericContext *generic_context;
/*
* This variable represents the hidden argument holding the vtype
MonoDomain *domain;
+ guint real_offset;
+ GHashTable *cbb_hash;
+
+ /* The current virtual register number */
+ guint32 next_vreg;
+
MonoGenericSharingContext *generic_sharing_context;
unsigned char *cil_start;
gint32 sig_cookie;
guint disable_aot : 1;
guint disable_ssa : 1;
+ guint enable_extended_bblocks : 1;
guint run_cctors : 1;
guint need_lmf_area : 1;
guint compile_aot : 1;
guint got_var_allocated : 1;
guint ret_var_is_local : 1;
+ guint ret_var_set : 1;
+ guint new_ir : 1;
+ guint globalra : 1;
guint unverifiable : 1;
guint skip_visibility : 1;
guint disable_reuse_registers : 1;
guint has_got_slots : 1;
gpointer debug_info;
guint32 lmf_offset;
+ guint16 *intvars;
MonoProfileCoverageInfo *coverage_info;
GHashTable *token_info_hash;
MonoCompileArch arch;
/* Fields used by the local reg allocator */
void* reginfo;
- void* reginfof;
- int reginfo_len, reginfof_len;
+ void* reverse_inst_list;
+ int reginfo_len;
+ int reverse_inst_list_len;
+
+ /* Maps vregs to their associated MonoInst's */
+ /* vregs with an associated MonoInst are 'global' while others are 'local' */
+ MonoInst **vreg_to_inst;
+
+ /* Size of above array */
+ guint32 vreg_to_inst_len;
} MonoCompile;
typedef enum {
MONO_CFG_HAS_VARARGS = 1 << 3,
MONO_CFG_HAS_TAIL = 1 << 4,
MONO_CFG_HAS_FPOUT = 1 << 5, /* there are fp values passed in int registers */
- MONO_CFG_HAS_SPILLUP = 1 << 6 /* spill var slots are allocated from bottom to top */
+ MONO_CFG_HAS_SPILLUP = 1 << 6, /* spill var slots are allocated from bottom to top */
+ MONO_CFG_HAS_CHECK_THIS = 1 << 7,
+ MONO_CFG_HAS_ARRAY_ACCESS = 1 << 8
} MonoCompileFlags;
typedef struct {
gulong basic_blocks;
gulong max_basic_blocks;
gulong locals_stack_size;
+ gulong regvars;
gulong cas_declsec_check;
gulong cas_linkdemand_icall;
gulong cas_linkdemand_pinvoke;
#ifdef MINI_OP
#undef MINI_OP
#endif
-#define MINI_OP(a,b) a,
+#define MINI_OP(a,b,dest,src1,src2) a,
enum {
OP_START = MONO_CEE_LAST - 1,
#include "mini-ops.h"
};
#undef MINI_OP
+/* Can't use the same with both JITs since that would break the burg rules */
+#if defined(NEW_IR)
+
+#if SIZEOF_VOID_P == 8
+#define OP_PCONST OP_I8CONST
+#define OP_PADD OP_LADD
+#define OP_PADD_IMM OP_LADD_IMM
+#define OP_PSUB OP_LSUB
+#define OP_PMUL OP_LMUL
+#define OP_PMUL_IMM OP_LMUL_IMM
+#define OP_PNEG OP_LNEG
+#define OP_PCONV_TO_I1 OP_LCONV_TO_I1
+#define OP_PCONV_TO_U1 OP_LCONV_TO_U1
+#define OP_PCONV_TO_I2 OP_LCONV_TO_I2
+#define OP_PCONV_TO_U2 OP_LCONV_TO_U2
+#define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN
+#define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1
+#define OP_PBEQ OP_LBEQ
+#define OP_PCEQ OP_LCEQ
+#define OP_PBNE_UN OP_LBNE_UN
+#define OP_PBGE_UN OP_LBGE_UN
+#define OP_PBLT_UN OP_LBLT_UN
+#define OP_PBGE OP_LBGE
+#define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG
+#define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM
+#else
+#define OP_PCONST OP_ICONST
+#define OP_PADD OP_IADD
+#define OP_PADD_IMM OP_IADD_IMM
+#define OP_PSUB OP_ISUB
+#define OP_PMUL OP_IMUL
+#define OP_PMUL_IMM OP_IMUL_IMM
+#define OP_PNEG OP_INEG
+#define OP_PCONV_TO_U2 OP_ICONV_TO_U2
+#define OP_PCONV_TO_OVF_I1_UN OP_ICONV_TO_OVF_I1_UN
+#define OP_PCONV_TO_OVF_I1 OP_ICONV_TO_OVF_I1
+#define OP_PBEQ OP_IBEQ
+#define OP_PCEQ OP_ICEQ
+#define OP_PBNE_UN OP_IBNE_UN
+#define OP_PBGE_UN OP_IBGE_UN
+#define OP_PBLT_UN OP_IBLT_UN
+#define OP_PBGE OP_IBGE
+#define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG
+#define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM
+#endif
+
+#else
+
#if SIZEOF_VOID_P == 8
#define OP_PCONST OP_I8CONST
#define OP_PADD OP_LADD
+#define OP_PADD_IMM OP_LADD_IMM
#define OP_PNEG OP_LNEG
#define OP_PCONV_TO_I1 OP_LCONV_TO_I1
#define OP_PCONV_TO_U1 OP_LCONV_TO_U1
#define OP_PCONV_TO_U2 OP_LCONV_TO_U2
#define OP_PCONV_TO_OVF_I1_UN OP_LCONV_TO_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 OP_LCONV_TO_OVF_I1
+#define OP_PBEQ OP_LBEQ
#define OP_PCEQ CEE_CEQ
#define OP_STOREP_MEMBASE_REG OP_STOREI8_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI8_MEMBASE_IMM
#else
#define OP_PCONST OP_ICONST
#define OP_PADD CEE_ADD
+#define OP_PADD2 OP_IADD
#define OP_PNEG CEE_NEG
#define OP_PCONV_TO_I1 OP_ICONV_TO_I1
#define OP_PCONV_TO_U1 OP_ICONV_TO_U1
#define OP_PCONV_TO_U2 CEE_CONV_U2
#define OP_PCONV_TO_OVF_I1_UN CEE_CONV_OVF_I1_UN
#define OP_PCONV_TO_OVF_I1 CEE_CONV_OVF_I1
+#define OP_PBEQ OP_IBEQ
#define OP_PCEQ CEE_CEQ
#define OP_STOREP_MEMBASE_REG OP_STOREI4_MEMBASE_REG
#define OP_STOREP_MEMBASE_IMM OP_STOREI4_MEMBASE_IMM
#endif
+#endif
+
typedef enum {
STACK_INV,
STACK_I4,
void mono_precompile_assemblies (void) MONO_INTERNAL;
int mono_parse_default_optimizations (const char* p);
void mono_bblock_add_inst (MonoBasicBlock *bb, MonoInst *inst) MONO_INTERNAL;
+void mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert) MONO_INTERNAL;
+void mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert) MONO_INTERNAL;
+void mono_verify_bblock (MonoBasicBlock *bb) MONO_INTERNAL;
+void mono_verify_cfg (MonoCompile *cfg) MONO_INTERNAL;
void mono_constant_fold (MonoCompile *cfg) MONO_INTERNAL;
void mono_constant_fold_inst (MonoInst *inst, gpointer data) MONO_INTERNAL;
+MonoInst* mono_constant_fold_ins2 (MonoCompile *cfg, MonoInst *ins, MonoInst *arg1, MonoInst *arg2, gboolean overwrite) MONO_INTERNAL;
int mono_eval_cond_branch (MonoInst *branch) MONO_INTERNAL;
int mono_is_power_of_two (guint32 val) MONO_INTERNAL;
void mono_cprop_local (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst **acp, int acp_size) MONO_INTERNAL;
MonoInst* mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode) MONO_INTERNAL;
+MonoInst* mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg) MONO_INTERNAL;
void mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index) MONO_INTERNAL;
MonoInst* mono_compile_create_var_load (MonoCompile *cfg, gssize var_index) MONO_INTERNAL;
MonoInst* mono_compile_create_var_store (MonoCompile *cfg, gssize var_index, MonoInst *value) MONO_INTERNAL;
MonoType* mono_type_from_stack_type (MonoInst *ins) MONO_INTERNAL;
+guint32 mono_alloc_ireg (MonoCompile *cfg) MONO_INTERNAL;
+guint32 mono_alloc_freg (MonoCompile *cfg) MONO_INTERNAL;
+guint32 mono_alloc_preg (MonoCompile *cfg) MONO_INTERNAL;
+guint32 mono_alloc_dreg (MonoCompile *cfg, MonoStackType stack_type) MONO_INTERNAL;
+void mono_link_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) MONO_INTERNAL;
+void mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to) MONO_INTERNAL;
+void mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb) MONO_INTERNAL;
+void mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn) MONO_INTERNAL;
+void mono_optimize_branches (MonoCompile *cfg) MONO_INTERNAL;
void mono_blockset_print (MonoCompile *cfg, MonoBitSet *set, const char *name, guint idom) MONO_INTERNAL;
void mono_print_tree (MonoInst *tree) MONO_INTERNAL;
void mono_print_tree_nl (MonoInst *tree) MONO_INTERNAL;
-void mono_print_code (MonoCompile *cfg) MONO_INTERNAL;
+void mono_print_ins_index (int i, MonoInst *ins) MONO_INTERNAL;
+void mono_print_ins (MonoInst *ins) MONO_INTERNAL;
+void mono_print_bb (MonoBasicBlock *bb, const char *msg) MONO_INTERNAL;
+void mono_print_code (MonoCompile *cfg, const char *msg) MONO_INTERNAL;
void mono_print_method_from_ip (void *ip);
char *mono_pmip (void *ip);
void mono_select_instructions (MonoCompile *cfg) MONO_INTERNAL;
const char* mono_inst_name (int op);
+int mono_op_to_op_imm (int opcode) MONO_INTERNAL;
+int mono_op_imm_to_op (int opcode) MONO_INTERNAL;
+int mono_load_membase_to_load_mem (int opcode) MONO_INTERNAL;
+guint mono_type_to_load_membase (MonoCompile *cfg, MonoType *type) MONO_INTERNAL;
+guint mono_type_to_store_membase (MonoCompile *cfg, MonoType *type) MONO_INTERNAL;
+guint mini_type_to_stind (MonoCompile* cfg, MonoType *type) MONO_INTERNAL;
void mono_inst_foreach (MonoInst *tree, MonoInstFunc func, gpointer data) MONO_INTERNAL;
void mono_disassemble_code (MonoCompile *cfg, guint8 *code, int size, char *id) MONO_INTERNAL;
void mono_add_patch_info (MonoCompile *cfg, int ip, MonoJumpInfoType type, gconstpointer target) MONO_INTERNAL;
GList *mono_varlist_insert_sorted (MonoCompile *cfg, GList *list, MonoMethodVar *mv, gboolean sort_end) MONO_INTERNAL;
GList *mono_varlist_sort (MonoCompile *cfg, GList *list, int sort_type) MONO_INTERNAL;
void mono_analyze_liveness (MonoCompile *cfg) MONO_INTERNAL;
-void mono_liveness_handle_exception_clauses (MonoCompile *cfg) MONO_INTERNAL;
void mono_linear_scan (MonoCompile *cfg, GList *vars, GList *regs, regmask_t *used_mask) MONO_INTERNAL;
+void mono_global_regalloc (MonoCompile *cfg) MONO_INTERNAL;
void mono_create_jump_table (MonoCompile *cfg, MonoInst *label, MonoBasicBlock **bbs, int num_blocks) MONO_INTERNAL;
int mono_compile_assembly (MonoAssembly *ass, guint32 opts, const char *aot_options) MONO_INTERNAL;
MonoCompile *mini_method_compile (MonoMethod *method, guint32 opts, MonoDomain *domain, gboolean run_cctors, gboolean compile_aot, int parts) MONO_INTERNAL;
MonoJitICallInfo *mono_find_jit_opcode_emulation (int opcode) MONO_INTERNAL;
void mono_print_ins_index (int i, MonoInst *ins) MONO_INTERNAL;
void mono_print_ins (MonoInst *ins) MONO_INTERNAL;
+gpointer mini_create_rgctx_lazy_fetch_trampoline (guint32 offset) MONO_INTERNAL;
+gboolean mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method) MONO_INTERNAL;
+gboolean mini_method_verify (MonoCompile *cfg, MonoMethod *method) MONO_INTERNAL;
+
+gboolean mini_class_is_system_array (MonoClass *klass) MONO_INTERNAL;
+MonoMethodSignature *mono_get_element_address_signature (int arity) MONO_INTERNAL;
+MonoJitICallInfo *mono_get_element_address_icall (int rank) MONO_INTERNAL;
+MonoJitICallInfo *mono_get_array_new_va_icall (int rank) MONO_INTERNAL;
+
+void mono_linterval_add_range (MonoCompile *cfg, MonoLiveInterval *interval, int from, int to) MONO_INTERNAL;
+void mono_linterval_print (MonoLiveInterval *interval) MONO_INTERNAL;
+void mono_linterval_print_nl (MonoLiveInterval *interval) MONO_INTERNAL;
+gboolean mono_linterval_covers (MonoLiveInterval *interval, int pos) MONO_INTERNAL;
+gint32 mono_linterval_get_intersect_pos (MonoLiveInterval *i1, MonoLiveInterval *i2) MONO_INTERNAL;
+void mono_linterval_split (MonoCompile *cfg, MonoLiveInterval *interval, MonoLiveInterval **i1, MonoLiveInterval **i2, int pos) MONO_INTERNAL;
+void mono_liveness_handle_exception_clauses (MonoCompile *cfg) MONO_INTERNAL;
/* AOT */
void mono_aot_init (void) MONO_INTERNAL;
void mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst) MONO_INTERNAL;
gpointer mono_create_ftnptr (MonoDomain *domain, gpointer addr) MONO_INTERNAL;
+void mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb);
+
int mono_find_method_opcode (MonoMethod *method) MONO_INTERNAL;
MonoJitICallInfo *mono_find_jit_icall_by_name (const char *name) MONO_INTERNAL;
MonoJitICallInfo *mono_find_jit_icall_by_addr (gconstpointer addr) MONO_INTERNAL;
gpointer mono_create_jit_trampoline_from_token (MonoImage *image, guint32 token) MONO_INTERNAL;
gpointer mono_create_jit_trampoline_in_domain (MonoDomain *domain, MonoMethod *method, gboolean add_sync_wrapper) MONO_INTERNAL;
gpointer mono_create_delegate_trampoline (MonoClass *klass) MONO_INTERNAL;
+gpointer mono_create_rgctx_lazy_fetch_trampoline (guint32 offset) MONO_INTERNAL;
MonoVTable* mono_find_class_init_trampoline_by_addr (gconstpointer addr) MONO_INTERNAL;
MonoClass* mono_find_delegate_trampoline_by_addr (gconstpointer addr) MONO_INTERNAL;
gpointer mono_magic_trampoline (gssize *regs, guint8 *code, MonoMethod *m, guint8* tramp) MONO_INTERNAL;
gint32* mono_allocate_stack_slots (MonoCompile *cfg, guint32 *stack_size, guint32 *stack_align) MONO_INTERNAL;
void mono_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb) MONO_INTERNAL;
MonoInst *mono_branch_optimize_exception_target (MonoCompile *cfg, MonoBasicBlock *bb, const char * exname) MONO_INTERNAL;
+void mono_remove_critical_edges (MonoCompile *cfg) MONO_INTERNAL;
gboolean mono_is_regsize_var (MonoType *t) MONO_INTERNAL;
+void mini_emit_memcpy2 (MonoCompile *cfg, int destreg, int doffset, int srcreg, int soffset, int size, int align) MONO_INTERNAL;
CompRelation mono_opcode_to_cond (int opcode) MONO_INTERNAL;
CompType mono_opcode_to_type (int opcode, int cmp_opcode) MONO_INTERNAL;
+CompRelation mono_negate_cond (CompRelation cond) MONO_INTERNAL;
int mono_op_imm_to_op (int opcode) MONO_INTERNAL;
-void mono_decompose_op_imm (MonoCompile *cfg, MonoInst *ins) MONO_INTERNAL;
+void mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins) MONO_INTERNAL;
void mono_peephole_ins (MonoBasicBlock *bb, MonoInst *ins) MONO_INTERNAL;
+void mono_decompose_opcode (MonoCompile *cfg, MonoInst *ins) MONO_INTERNAL;
+void mono_decompose_long_opts (MonoCompile *cfg) MONO_INTERNAL;
+void mono_decompose_vtype_opts (MonoCompile *cfg) MONO_INTERNAL;
+void mono_decompose_array_access_opts (MonoCompile *cfg) MONO_INTERNAL;
+void mono_handle_soft_float (MonoCompile *cfg) MONO_INTERNAL;
+void mono_handle_global_vregs (MonoCompile *cfg) MONO_INTERNAL;
+void mono_spill_global_vars (MonoCompile *cfg, gboolean *need_local_opts) MONO_INTERNAL;
+void mono_if_conversion (MonoCompile *cfg) MONO_INTERNAL;
+
/* methods that must be provided by the arch-specific port */
void mono_arch_init (void) MONO_INTERNAL;
void mono_arch_cleanup (void) MONO_INTERNAL;
gpointer mono_arch_get_nullified_class_init_trampoline (guint32 *code_len) MONO_INTERNAL;
GList *mono_arch_get_allocatable_int_vars (MonoCompile *cfg) MONO_INTERNAL;
GList *mono_arch_get_global_int_regs (MonoCompile *cfg) MONO_INTERNAL;
+GList *mono_arch_get_global_fp_regs (MonoCompile *cfg) MONO_INTERNAL;
+GList *mono_arch_get_iregs_clobbered_by_call (MonoCallInst *call) MONO_INTERNAL;
+GList *mono_arch_get_fregs_clobbered_by_call (MonoCallInst *call) MONO_INTERNAL;
guint32 mono_arch_regalloc_cost (MonoCompile *cfg, MonoMethodVar *vmv) MONO_INTERNAL;
void mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors) MONO_INTERNAL;
void mono_arch_flush_icache (guint8 *code, gint size) MONO_INTERNAL;
void mono_arch_setup_jit_tls_data (MonoJitTlsData *tls) MONO_INTERNAL;
void mono_arch_free_jit_tls_data (MonoJitTlsData *tls) MONO_INTERNAL;
void mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg) MONO_INTERNAL;
+void mono_arch_fill_argument_info (MonoCompile *cfg) MONO_INTERNAL;
void mono_arch_allocate_vars (MonoCompile *m) MONO_INTERNAL;
int mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info) MONO_INTERNAL;
gboolean mono_arch_print_tree (MonoInst *tree, int arity) MONO_INTERNAL;
+void mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call) MONO_INTERNAL;
+void mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src) MONO_INTERNAL;
+void mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val) MONO_INTERNAL;
+MonoInst *mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args) MONO_INTERNAL;
+void mono_arch_decompose_opts (MonoCompile *cfg, MonoInst *ins) MONO_INTERNAL;
+void mono_arch_decompose_long_opts (MonoCompile *cfg, MonoInst *ins) MONO_INTERNAL;
+
MonoJitInfo *mono_arch_find_jit_info (MonoDomain *domain,
MonoJitTlsData *jit_tls,
MonoJitInfo *res,
void mono_ssa_strength_reduction (MonoCompile *cfg) MONO_INTERNAL;
void mono_free_loop_info (MonoCompile *cfg) MONO_INTERNAL;
+void mono_ssa_compute2 (MonoCompile *cfg);
+void mono_ssa_remove2 (MonoCompile *cfg);
+void mono_ssa_cprop2 (MonoCompile *cfg);
+void mono_ssa_deadce2 (MonoCompile *cfg);
+
/* debugging support */
void mono_debug_init_method (MonoCompile *cfg, MonoBasicBlock *start_block,
guint32 breakpoint_id) MONO_INTERNAL;
extern void
mono_perform_abc_removal (MonoCompile *cfg) MONO_INTERNAL;
extern void
+mono_perform_abc_removal2 (MonoCompile *cfg) MONO_INTERNAL;
+extern void
mono_perform_ssapre (MonoCompile *cfg) MONO_INTERNAL;
extern void
mono_local_cprop (MonoCompile *cfg) MONO_INTERNAL;
+extern void
+mono_local_cprop2 (MonoCompile *cfg);
+extern void
+mono_local_deadce (MonoCompile *cfg);
/* CAS - stack walk */
MonoSecurityFrame* ves_icall_System_Security_SecurityFrame_GetSecurityFrame (gint32 skip) MONO_INTERNAL;
return 0;
}
+ // 128 bits
+ struct Struct3 {
+ public long i, j, k, l;
+ }
+
+ static int pass_struct3 (int i, int j, int k, int l, int m, int n, int o, int p, Struct3 s, int q) {
+ if (s.i + s.j + s.k + s.l != 10)
+ return 1;
+ else
+ return 0;
+ }
+
+ static int test_0_struct3_args () {
+ Struct3 s = new Struct3 ();
+ s.i = 1;
+ s.j = 2;
+ s.k = 3;
+ s.l = 4;
+
+ return pass_struct3 (1, 2, 3, 4, 5, 6, 7, 8, s, 9);
+ }
+
struct AStruct {
public int i;
void
mono_regstate_reset (MonoRegState *rs) {
- rs->next_vreg = MAX (MONO_MAX_IREGS, MONO_MAX_FREGS);
+ rs->next_vreg = MONO_MAX_IREGS + MONO_MAX_FREGS;
}
inline int
MINI_OP(OP_INEG, "int_neg")
MINI_OP(OP_INOT, "int_not")
-MINI_OP(OP_LONG_SHRUN_32, "long_shr_un_32")
+MINI_OP(OP_LSHR_UN_32, "long_shr_un_32")
MINI_OP(OP_FADD, "float_add")
MINI_OP(OP_FSUB, "float_sub")
/* insert phi functions */
for (i = 0; i < cfg->num_varinfo; ++i) {
set = mono_compile_iterated_dfrontier (cfg, vinfo [i].def_in);
- vinfo [i].dfrontier = set;
mono_bitset_foreach_bit (set, idx, cfg->num_bblocks) {
MonoBasicBlock *bb = cfg->bblocks [idx];
store->inst_i1 = inst;
store->klass = store->inst_i0->klass;
- MONO_INST_LIST_ADD (&store->node, &bb->ins_list);
+ store->next = bb->code;
+ bb->code = store;
+ if (!bb->last_ins)
+ bb->last_ins = bb->code;
#ifdef DEBUG_SSA
printf ("ADD PHI BB%d %s\n", cfg->bblocks [idx]->block_num, mono_method_full_name (cfg->method, TRUE));
printf ("REPLACE COPY BB%d %d %d\n", bb->block_num, idx, new_var->inst_c0);
g_assert (cfg->varinfo [mv->reg]->inst_vtype == cfg->varinfo [idx]->inst_vtype);
#endif
- inst->inst_i0 = new_var;
+ inst->inst_p0 = new_var;
} else {
is_live [mv->idx] = 1;
}
}
}
#endif
- next = mono_inst_list_next (&inst->node, &bb->ins_list);
+ next = inst->next;
if (next && next->ssa_op == MONO_SSA_STORE &&
next->inst_i0->opcode == OP_LOCAL &&
next->inst_i1->ssa_op == MONO_SSA_LOAD &&
}
store = mono_compile_create_var_store (area->cfg, current_bb->phi_variable_index, phi);
if (current_bb->phi_insertion_point != NULL) {
- MONO_INST_LIST_ADD (&store->node,
- ¤t_bb->phi_insertion_point->node);
+ store->next = current_bb->phi_insertion_point->next;
+ current_bb->phi_insertion_point->next = store;
} else {
- MONO_INST_LIST_ADD (&store->node,
- ¤t_bb->bb->ins_list);
+ store->next = current_bb->bb->code;
+ current_bb->bb->code = store;
}
MONO_VARINFO (area->cfg, current_bb->phi_variable_index)->def = store;
current_bb->phi_insertion_point = store;
*moved_expression = *(current_expression->occurrence);
store = mono_compile_create_var_store (area->cfg, current_expression->variable_index, moved_expression);
if (current_expression->previous_tree != NULL) {
- MONO_INST_LIST_ADD (&store->node,
- ¤t_expression->previous_tree->node);
+ store->next = current_expression->previous_tree->next;
+ current_expression->previous_tree->next = store;
} else {
- MONO_INST_LIST_ADD (&store->node,
- ¤t_bb->bb->ins_list);
+ store->next = current_bb->bb->code;
+ current_bb->bb->code = store;
}
MONO_VARINFO (area->cfg, current_expression->variable_index)->def = store;
mono_compile_make_var_load (area->cfg, current_expression->occurrence, current_expression->variable_index);
inserted_expression = create_expression (area, &expression_description, &prototype_occurrence);
store = mono_compile_create_var_store (area->cfg, current_bb->phi_argument_variable_index, inserted_expression);
MONO_VARINFO (area->cfg, current_bb->phi_argument_variable_index)->def = store;
- MONO_INST_LIST_INIT (&store->node);
+ store->next = NULL;
mono_add_ins_to_end (current_bb->bb, store);
area->inserted_occurrences ++;
printf ("SSAPRE STARTS PROCESSING METHOD %s\n", mono_method_full_name (cfg->method, TRUE));
}
if (area.cfg->verbose_level >= DUMP_LEVEL) {
- printf ("BEFORE SSAPRE START\n");
- mono_print_code (area.cfg);
- printf ("BEFORE SSAPRE END\n");
+ mono_print_code (area.cfg, "BEFORE SSAPRE");
}
area.first_in_queue = NULL;
}
if (area.cfg->verbose_level >= DUMP_LEVEL) {
- printf ("AFTER SSAPRE START\n");
- mono_print_code (area.cfg);
- printf ("AFTER SSAPRE END\n");
+ mono_print_code (area.cfg, "AFTER SSAPRE");
}
if (area.cfg->verbose_level >= TRACE_LEVEL) {
printf ("SSAPRE ENDS PROCESSING METHOD %s\n", mono_method_full_name (cfg->method, TRUE));
if (diff < 0)
indent_level += diff;
v = indent_level;
- while (v-- > 0) {
- printf (". ");
- }
+ printf ("[%d] ", indent_level);
if (diff > 0)
indent_level += diff;
}