} while (0)
#define GET_RGCTX(rgctx, context_used) do { \
MonoInst *this = NULL; \
- g_assert ((context_used) && !((context_used) & MONO_GENERIC_CONTEXT_USED_METHOD)); \
+ g_assert (context_used); \
GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && \
!((context_used) & MONO_GENERIC_CONTEXT_USED_METHOD)) \
(rgctx) = get_runtime_generic_context (cfg, method, (context_used), this, ip); \
} while (0)
+
#define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && (ins)->ssa_op == MONO_SSA_LOAD && (ins)->inst_left->inst_c0 == 0)
static void setup_stat_profiler (void);
static void dec_foreach (MonoInst *tree, MonoCompile *cfg);
+int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
+ MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
+ guint inline_offset, gboolean is_virtual_call);
+
static int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
int locals_offset, MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call);
#endif
/* helper methods signature */
-static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
-static MonoMethodSignature *helper_sig_domain_get = NULL;
+/* FIXME: Make these static again */
+MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_domain_get = NULL;
+MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
#ifndef DISABLE_AOT
gboolean mono_compile_aot = FALSE;
#endif
+/* If this is set, no code is generated dynamically, everything is taken from AOT files */
+gboolean mono_aot_only = FALSE;
+/* Whenever to use IMT */
+#ifdef MONO_ARCH_HAVE_IMT
+gboolean mono_use_imt = TRUE;
+#else
+gboolean mono_use_imt = FALSE;
+#endif
MonoMethodDesc *mono_inject_async_exc_method = NULL;
int mono_inject_async_exc_pos;
MonoMethodDesc *mono_break_at_bb_method = NULL;
static MonoCodeManager *global_codeman = NULL;
-static GHashTable *jit_icall_name_hash = NULL;
+/* FIXME: Make this static again */
+GHashTable *jit_icall_name_hash = NULL;
static MonoDebugOptions debug_options;
/* Whenever to check for pending exceptions in managed-to-native wrappers */
gboolean check_for_pending_exc = TRUE;
+/* Whenever to disable passing/returning small valuetypes in registers for managed methods */
+gboolean disable_vtypes_in_regs = FALSE;
+
gboolean
mono_running_on_valgrind (void)
{
{
void *ptr;
+ if (mono_aot_only)
+ g_error ("Attempting to allocate from the global code manager while running with --aot-only.\n");
+
if (!global_codeman) {
/* This can happen during startup */
global_codeman = mono_code_manager_new ();
* dfn: Depth First Number
* block_num: unique ID assigned at bblock creation
*/
-#define NEW_BBLOCK(cfg,new_bb) do { \
- new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)); \
- MONO_INST_LIST_INIT (&new_bb->ins_list); \
- } while (0)
-
+#define NEW_BBLOCK(cfg) (mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)))
#define ADD_BBLOCK(cfg,b) do { \
cfg->cil_offset_to_bb [(b)->cil_code - cfg->cil_start] = (b); \
(b)->block_num = cfg->num_bblocks++; \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
- NEW_BBLOCK (cfg, (tblock)); \
+ (tblock) = NEW_BBLOCK (cfg); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
#define CHECK_BBLOCK(target,ip,tblock) do { \
- if ((target) < (ip) && \
- MONO_INST_LIST_EMPTY (&(tblock)->ins_list)) { \
- bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
- if (cfg->verbose_level > 2) \
- g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
- } \
+ if ((target) < (ip) && !(tblock)->code) { \
+ bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
+ if (cfg->verbose_level > 2) g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
+ } \
} while (0)
#define NEW_ICONST(cfg,dest,val) do { \
*
* Unlink two basic blocks.
*/
-static void
+void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
}
}
+/*
+ * mono_bblocks_linked:
+ *
+ * Return whenever BB1 and BB2 are linked in the CFG.
+ */
+static gboolean
+mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
+{
+ int i;
+
+ for (i = 0; i < bb1->out_count; ++i) {
+ if (bb1->out_bb [i] == bb2)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/**
* mono_find_block_region:
*
int i;
array [*dfn] = start;
- /*g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num);*/
+ /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
MonoInst *inst;
MonoBasicBlock *bb;
- if (!MONO_INST_LIST_EMPTY (&second->ins_list))
+ if (second->code)
return;
/*
first->out_bb = NULL;
link_bblock (cfg, first, second);
+ second->last_ins = first->last_ins;
+
/*g_print ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
MONO_BB_FOR_EACH_INS (first, inst) {
- MonoInst *inst_next;
-
/*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
g_print ("found %p: %s", inst->next->cil_code, code);
g_free (code);*/
- if (inst->cil_code >= second->cil_code)
- continue;
-
- inst_next = mono_inst_list_next (&inst->node, &first->ins_list);
- if (!inst_next)
- break;
-
- if (inst_next->cil_code < second->cil_code)
- continue;
-
- second->ins_list.next = inst->node.next;
- second->ins_list.prev = first->ins_list.prev;
- inst->node.next = &first->ins_list;
- first->ins_list.prev = &inst->node;
-
- second->next_bb = first->next_bb;
- first->next_bb = second;
- return;
+ if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
+ second->code = inst->next;
+ inst->next = NULL;
+ first->last_ins = inst;
+ second->next_bb = first->next_bb;
+ first->next_bb = second;
+ return;
+ }
}
- if (MONO_INST_LIST_EMPTY (&second->ins_list)) {
+ if (!second->code) {
g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
//G_BREAKPOINT ();
}
return opcode;
}
+guint
+mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_STORE_MEMBASE_REG;
+
+handle_enum:
+ switch (type->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_STOREI1_MEMBASE_REG;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_STOREI2_MEMBASE_REG;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ return OP_STOREI4_MEMBASE_REG;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_STOREI8_MEMBASE_REG;
+ case MONO_TYPE_R4:
+ return OP_STORER4_MEMBASE_REG;
+ case MONO_TYPE_R8:
+ return OP_STORER8_MEMBASE_REG;
+ case MONO_TYPE_VALUETYPE:
+ if (type->data.klass->enumtype) {
+ type = type->data.klass->enum_basetype;
+ goto handle_enum;
+ }
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ type = &type->data.generic_class->container_class->byval_arg;
+ goto handle_enum;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_STORE_MEMBASE_REG;
+ default:
+ g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
+ }
+ return -1;
+}
+
+guint
+mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_LOAD_MEMBASE;
+
+ switch (mono_type_get_underlying_type (type)->type) {
+ case MONO_TYPE_I1:
+ return OP_LOADI1_MEMBASE;
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_LOADU1_MEMBASE;
+ case MONO_TYPE_I2:
+ return OP_LOADI2_MEMBASE;
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_LOADU2_MEMBASE;
+ case MONO_TYPE_I4:
+ return OP_LOADI4_MEMBASE;
+ case MONO_TYPE_U4:
+ return OP_LOADU4_MEMBASE;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_LOADI8_MEMBASE;
+ case MONO_TYPE_R4:
+ return OP_LOADR4_MEMBASE;
+ case MONO_TYPE_R8:
+ return OP_LOADR8_MEMBASE;
+ case MONO_TYPE_VALUETYPE:
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_LOADV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ if (mono_type_generic_inst_is_valuetype (type))
+ return OP_LOADV_MEMBASE;
+ else
+ return OP_LOAD_MEMBASE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_LOAD_MEMBASE;
+ default:
+ g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
+ }
+ return -1;
+}
+
#ifdef MONO_ARCH_SOFT_FLOAT
static int
condbr_to_fp_br (int opcode)
CEE_STIND_REF
};
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+static void
+handle_store_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, MonoInst *val, const unsigned char *ip)
+{
+ MonoInst *iargs [2];
+ iargs [0] = val;
+ iargs [1] = ptr;
+
+ mono_emit_jit_icall (cfg, bblock, mono_fstore_r4, iargs, ip);
+}
+
+static int
+handle_load_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, const unsigned char *ip)
+{
+ MonoInst *iargs [1];
+ iargs [0] = ptr;
+
+ return mono_emit_jit_icall (cfg, bblock, mono_fload_r4, iargs, ip);
+}
+
+#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
+ int temp; \
+ NEW_LOCLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, bblock, (ins), (ip)); \
+ NEW_TEMPLOAD (cfg, (ins), temp); \
+ } \
+ } while (0)
+#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
+ NEW_LOCLOADA (cfg, (ins), (idx)); \
+ handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
+ MONO_INST_NEW (cfg, (ins), OP_NOP); \
+ } \
+ } while (0)
+#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
+ int temp; \
+ NEW_ARGLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, bblock, (ins), (ip)); \
+ NEW_TEMPLOAD (cfg, (ins), temp); \
+ } \
+ } while (0)
+#define STARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
+ NEW_ARGLOADA (cfg, (ins), (idx)); \
+ handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
+ MONO_INST_NEW (cfg, (ins), OP_NOP); \
+ } \
+ } while (0)
+
+#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num,ip) do { \
+ if ((ins)->opcode == CEE_LDIND_R4) { \
+ int idx = (num); \
+ int temp; \
+ NEW_TEMPLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, (bblock), (ins), ip); \
+ NEW_TEMPLOAD (cfg, (ins), (temp)); \
+ } \
+ } while (0)
+
+#define NEW_TEMPSTORE_SOFT_FLOAT(cfg,bblock,ins,num,val,ip) do { \
+ if ((ins)->opcode == CEE_STIND_R4) { \
+ int idx = (num); \
+ NEW_TEMPLOADA (cfg, (ins), (idx)); \
+ handle_store_float ((cfg), (bblock), (ins), (val), (ip)); \
+ } \
+ } while (0)
+
+#else
+
+#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip)
+#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip)
+#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip)
+#define STARG_SOFT_FLOAT(cfg,ins,idx,ip)
+#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num,ip)
+#define NEW_TEMPSTORE_SOFT_FLOAT(cfg,bblock,ins,num,val,ip)
+#endif
+
#if 0
static const char
return mono_type_to_ldind (type);
}
-static guint
+guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
if (cfg->generic_sharing_context && !type->byref) {
{
switch (opcode) {
case OP_ADD_IMM:
- return OP_PADD;
+#if SIZEOF_VOID_P == 4
+ return OP_IADD;
+#else
+ return OP_LADD;
+#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
+ case OP_LOCALLOC_IMM:
+ return OP_LOCALLOC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
+ return -1;
}
}
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
-mono_decompose_op_imm (MonoCompile *cfg, MonoInst *ins)
+mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
MonoInst *temp;
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
- MONO_INST_LIST_ADD_TAIL (&(temp)->node, &(ins)->node);
+ mono_bblock_insert_before_ins (bb, ins, temp);
ins->opcode = mono_op_imm_to_op (ins->opcode);
- ins->sreg2 = temp->dreg;
+ if (ins->opcode == OP_LOCALLOC)
+ ins->sreg1 = temp->dreg;
+ else
+ ins->sreg2 = temp->dreg;
+
+ bb->max_vreg = MAX (bb->max_vreg, cfg->rs->next_vreg);
}
/*
return cfg->rgctx_var;
}
+static void
+set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
+{
+ if (vreg >= cfg->vreg_to_inst_len) {
+ MonoInst **tmp = cfg->vreg_to_inst;
+ int size = cfg->vreg_to_inst_len;
+
+ while (vreg >= cfg->vreg_to_inst_len)
+ cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
+ cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
+ if (size)
+ memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
+ }
+ cfg->vreg_to_inst [vreg] = inst;
+}
+
+#define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
+#define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
+
MonoInst*
-mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
+ gboolean regpair;
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
- cfg->varinfo_count = (cfg->varinfo_count + 2) * 2;
+ cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 64;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
- /*g_print ("created temp %d of type 0x%x\n", num, type->type);*/
mono_jit_stats.allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type (type);
+ type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
+ inst->dreg = vreg;
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
+ if (vreg != -1)
+ set_vreg_to_inst (cfg, vreg, inst);
+
+#if SIZEOF_VOID_P == 4
+#ifdef MONO_ARCH_SOFT_FLOAT
+ regpair = mono_type_is_long (type) || mono_type_is_float (type);
+#else
+ regpair = mono_type_is_long (type);
+#endif
+#else
+ regpair = FALSE;
+#endif
+
+ if (regpair) {
+ MonoInst *tree;
+
+ /*
+ * These two cannot be allocated using create_var_for_vreg since that would
+ * put it into the cfg->varinfo array, confusing many parts of the JIT.
+ */
+
+ /*
+ * Set flags to VOLATILE so SSA skips it.
+ */
+
+ if (cfg->verbose_level >= 4) {
+ printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2);
+ }
+
+ /* Allocate a dummy MonoInst for the first vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 1;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 1, tree);
+
+ /* Allocate a dummy MonoInst for the second vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 2;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 2, tree);
+ }
+
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
- g_print ("created temp %d of type %s\n", num, mono_type_get_name (type));
+ g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
+MonoInst*
+mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+{
+ int dreg;
+
+ if (mono_type_is_long (type))
+ dreg = mono_alloc_dreg (cfg, STACK_I8);
+#ifdef MONO_ARCH_SOFT_FLOAT
+ else if (mono_type_is_float (type))
+ dreg = mono_alloc_dreg (cfg, STACK_R8);
+#endif
+ else
+ /* All the others are unified */
+ dreg = mono_alloc_preg (cfg);
+
+ return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
+}
+
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
return NULL;
}
+/*
+ * mono_add_ins_to_end:
+ *
+ * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
+ */
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
- MonoInst *last = mono_inst_list_last (&bb->ins_list);
+ int opcode;
- if (last && ((last->opcode >= CEE_BEQ &&
- last->opcode <= CEE_BLT_UN) ||
- last->opcode == OP_BR ||
- last->opcode == OP_SWITCH)) {
- MONO_INST_LIST_ADD_TAIL (&inst->node, &last->node);
- } else {
+ if (!bb->code) {
MONO_ADD_INS (bb, inst);
+ return;
+ }
+
+ switch (bb->last_ins->opcode) {
+ case OP_BR:
+ case OP_BR_REG:
+ case CEE_BEQ:
+ case CEE_BGE:
+ case CEE_BGT:
+ case CEE_BLE:
+ case CEE_BLT:
+ case CEE_BNE_UN:
+ case CEE_BGE_UN:
+ case CEE_BGT_UN:
+ case CEE_BLE_UN:
+ case CEE_BLT_UN:
+ case OP_SWITCH:
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ break;
+ default:
+ if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ /* Need to insert the ins before the compare */
+ if (bb->code == bb->last_ins) {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ return;
+ }
+
+ if (bb->code->next == bb->last_ins) {
+ /* Only two instructions */
+ opcode = bb->code->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->code, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ } else {
+ opcode = bb->last_ins->prev->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ }
+ }
+ else
+ MONO_ADD_INS (bb, inst);
+ break;
+ }
+}
+
+/**
+ * mono_replace_ins:
+ *
+ * Replace INS with its decomposition which is stored in a series of bblocks starting
+ * at FIRST_BB and ending at LAST_BB. On enter, PREV points to the predecessor of INS.
+ * On return, it will be set to the last ins of the decomposition.
+ */
+void
+mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb)
+{
+ MonoInst *next = ins->next;
+
+ if (next && next->opcode == OP_NOP) {
+ /* Avoid NOPs following branches */
+ ins->next = next->next;
+ next = next->next;
+ }
+
+ if (first_bb == last_bb) {
+ /*
+ * Only one replacement bb, merge the code into
+ * the current bb.
+ */
+
+ /* Delete links between the first_bb and its successors */
+ while (first_bb->out_count)
+ mono_unlink_bblock (cfg, first_bb, first_bb->out_bb [0]);
+
+ /* Head */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+
+ /* Tail */
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ else
+ bb->last_ins = last_bb->last_ins;
+ *prev = last_bb->last_ins;
+ } else {
+ int i, count;
+ MonoBasicBlock **tmp_bblocks, *tmp;
+ MonoInst *last;
+
+ /* Multiple BBs */
+
+ /* Set region */
+ for (tmp = first_bb; tmp; tmp = tmp->next_bb)
+ tmp->region = bb->region;
+
+ /* Split the original bb */
+ if (ins->next)
+ ins->next->prev = NULL;
+ ins->next = NULL;
+ bb->last_ins = ins;
+
+ /* Merge the second part of the original bb into the last bb */
+ if (last_bb->last_ins) {
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ } else {
+ last_bb->code = next;
+ }
+
+ if (next) {
+ for (last = next; last->next != NULL; last = last->next)
+ ;
+ last_bb->last_ins = last;
+ }
+
+ for (i = 0; i < bb->out_count; ++i)
+ link_bblock (cfg, last_bb, bb->out_bb [i]);
+
+ /* Merge the first (dummy) bb to the original bb */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+ bb->last_ins = first_bb->last_ins;
+
+ /* Delete the links between the original bb and its successors */
+ tmp_bblocks = bb->out_bb;
+ count = bb->out_count;
+ for (i = 0; i < count; ++i)
+ mono_unlink_bblock (cfg, bb, tmp_bblocks [i]);
+
+ /* Add links between the original bb and the first_bb's successors */
+ for (i = 0; i < first_bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = first_bb->out_bb [i];
+
+ link_bblock (cfg, bb, out_bb);
+ }
+ /* Delete links between the first_bb and its successors */
+ for (i = 0; i < bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = bb->out_bb [i];
+
+ mono_unlink_bblock (cfg, first_bb, out_bb);
+ }
+ last_bb->next_bb = bb->next_bb;
+ bb->next_bb = first_bb->next_bb;
+
+ *prev = NULL;
}
}
MonoInst **args, int calli, int virtual, const guint8 *ip, gboolean to_end)
{
MonoCallInst *call;
- MonoInst *arg, *n;
+ MonoInst *arg;
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
call = mono_arch_call_opcode (cfg, bblock, call, virtual);
type_to_eval_stack_type (cfg, sig->ret, &call->inst);
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (arg, n, &call->out_args, node) {
+ for (arg = call->out_args; arg;) {
+ MonoInst *narg = arg->next;
+ arg->next = NULL;
if (!arg->cil_code)
arg->cil_code = ip;
if (to_end)
mono_add_ins_to_end (bblock, arg);
else
MONO_ADD_INS (bblock, arg);
+ arg = narg;
}
return call;
}
static void
mono_emulate_opcode (MonoCompile *cfg, MonoInst *tree, MonoInst **iargs, MonoJitICallInfo *info)
{
- MonoInst *ins, *temp = NULL, *store, *load;
- MonoInstList *head, *list;
+ MonoInst *ins, *temp = NULL, *store, *load, *begin;
+ MonoInst *last_arg = NULL;
int nargs;
MonoCallInst *call;
//mono_print_tree_nl (tree);
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (info->sig->ret, FALSE, FALSE, cfg->generic_sharing_context));
ins = (MonoInst*)call;
- MONO_INST_LIST_INIT (&ins->node);
call->inst.cil_code = tree->cil_code;
call->args = iargs;
temp = mono_compile_create_var (cfg, info->sig->ret, OP_LOCAL);
temp->flags |= MONO_INST_IS_TEMP;
NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
- MONO_INST_LIST_INIT (&store->node);
/* FIXME: handle CEE_STIND_R4 */
store->cil_code = tree->cil_code;
} else {
nargs = info->sig->param_count + info->sig->hasthis;
- if (nargs) {
- MONO_INST_LIST_ADD_TAIL (&store->node,
- &call->out_args);
- list = &call->out_args;
- } else {
- list = &store->node;
- }
+ for (last_arg = call->out_args; last_arg && last_arg->next; last_arg = last_arg->next) ;
+
+ if (nargs)
+ last_arg->next = store;
+
+ if (nargs)
+ begin = call->out_args;
+ else
+ begin = store;
if (cfg->prev_ins) {
/*
* node before it is called for its children. dec_foreach needs to
* take this into account.
*/
- head = &cfg->prev_ins->node;
+ store->next = cfg->prev_ins->next;
+ cfg->prev_ins->next = begin;
} else {
- head = &cfg->cbb->ins_list;
+ store->next = cfg->cbb->code;
+ cfg->cbb->code = begin;
}
- MONO_INST_LIST_SPLICE_INIT (list, head);
-
call->fptr = mono_icall_get_wrapper (info);
if (!MONO_TYPE_IS_VOID (info->sig->ret)) {
for (i = 0; i < arity; i++)
res->params [i + 1] = &mono_defaults.int_class->byval_arg;
- res->ret = &mono_defaults.int_class->byval_arg;
+ res->ret = &mono_defaults.object_class->byval_arg;
g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
mono_jit_unlock ();
return res;
}
-#ifdef MONO_ARCH_SOFT_FLOAT
-static void
-handle_store_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, MonoInst *val, const unsigned char *ip)
-{
- MonoInst *iargs [2];
- iargs [0] = val;
- iargs [1] = ptr;
-
- mono_emit_jit_icall (cfg, bblock, mono_fstore_r4, iargs, ip);
-}
-
-static int
-handle_load_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, const unsigned char *ip)
+MonoJitICallInfo *
+mono_get_array_new_va_icall (int rank)
{
- MonoInst *iargs [1];
- iargs [0] = ptr;
+ MonoMethodSignature *esig;
+ char icall_name [256];
+ char *name;
+ MonoJitICallInfo *info;
- return mono_emit_jit_icall (cfg, bblock, mono_fload_r4, iargs, ip);
-}
+ /* Need to register the icall so it gets an icall wrapper */
+ sprintf (icall_name, "ves_array_new_va_%d", rank);
-#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
- int temp; \
- NEW_LOCLOADA (cfg, (ins), (idx)); \
- temp = handle_load_float (cfg, bblock, (ins), (ip)); \
- NEW_TEMPLOAD (cfg, (ins), temp); \
- } \
- } while (0)
-#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
- int temp; \
- NEW_LOCLOADA (cfg, (ins), (idx)); \
- handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
- MONO_INST_NEW (cfg, (ins), OP_NOP); \
- } \
- } while (0)
-#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
- int temp; \
- NEW_ARGLOADA (cfg, (ins), (idx)); \
- temp = handle_load_float (cfg, bblock, (ins), (ip)); \
- NEW_TEMPLOAD (cfg, (ins), temp); \
- } \
- } while (0)
-#define STARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
- int temp; \
- NEW_ARGLOADA (cfg, (ins), (idx)); \
- handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
- MONO_INST_NEW (cfg, (ins), OP_NOP); \
- } \
- } while (0)
+ mono_jit_lock ();
+ info = mono_find_jit_icall_by_name (icall_name);
+ if (info == NULL) {
+ esig = mono_get_array_new_va_signature (rank);
+ name = g_strdup (icall_name);
+ info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
-#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num) do { \
- if ((ins)->opcode == CEE_LDIND_R4) { \
- int idx = (num); \
- int temp; \
- NEW_TEMPLOADA (cfg, (ins), (idx)); \
- temp = handle_load_float (cfg, (bblock), (ins), ip); \
- NEW_TEMPLOAD (cfg, (ins), (temp)); \
- } \
- } while (0)
+ g_hash_table_insert (jit_icall_name_hash, name, name);
+ }
+ mono_jit_unlock ();
-#else
-#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip)
-#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip)
-#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip)
-#define STARG_SOFT_FLOAT(cfg,ins,idx,ip)
-#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num)
-#endif
+ return info;
+}
static MonoMethod*
get_memcpy_method (void)
static int
handle_array_new (MonoCompile *cfg, MonoBasicBlock *bblock, int rank, MonoInst **sp, unsigned char *ip)
{
- MonoMethodSignature *esig;
- char icall_name [256];
- char *name;
MonoJitICallInfo *info;
- /* Need to register the icall so it gets an icall wrapper */
- sprintf (icall_name, "ves_array_new_va_%d", rank);
-
- mono_jit_lock ();
- info = mono_find_jit_icall_by_name (icall_name);
- if (info == NULL) {
- esig = mono_get_array_new_va_signature (rank);
- name = g_strdup (icall_name);
- info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
-
- g_hash_table_insert (jit_icall_name_hash, name, name);
- }
- mono_jit_unlock ();
+ info = mono_get_array_new_va_icall (rank);
cfg->flags |= MONO_CFG_HAS_VARARGS;
NEW_TEMPSTORE (cfg, store, cfg->got_var->inst_c0, get_got);
/* Add it to the start of the first bblock */
- MONO_INST_LIST_ADD (&store->node, &cfg->bb_entry->ins_list);
+ if (cfg->bb_entry->code) {
+ store->next = cfg->bb_entry->code;
+ cfg->bb_entry->code = store;
+ }
+ else
+ MONO_ADD_INS (cfg->bb_entry, store);
cfg->got_var_allocated = TRUE;
#define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
-static gboolean
+gboolean
mini_class_is_system_array (MonoClass *klass)
{
if (klass->parent == mono_defaults.array_class)
mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
/* allocate starte and end blocks */
- NEW_BBLOCK (cfg, sbblock);
+ sbblock = NEW_BBLOCK (cfg);
sbblock->block_num = cfg->num_bblocks++;
sbblock->real_offset = real_offset;
- NEW_BBLOCK (cfg, ebblock);
+ ebblock = NEW_BBLOCK (cfg);
ebblock->block_num = cfg->num_bblocks++;
ebblock->real_offset = real_offset;
if (rvar) {
NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
- NEW_TEMPLOAD_SOFT_FLOAT (cfg, ebblock, ins, rvar->inst_c0);
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, ebblock, ins, rvar->inst_c0, ip);
*sp++ = ins;
}
*last_b = ebblock;
g_free (method_code);
}
+static void
+set_exception_object (MonoCompile *cfg, MonoException *exception)
+{
+ cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
+ MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
+ cfg->exception_ptr = exception;
+}
+
static MonoInst*
get_runtime_generic_context (MonoCompile *cfg, MonoMethod *method, int context_used, MonoInst *this, unsigned char *ip)
{
g_assert (!method->klass->valuetype);
- if (method->flags & METHOD_ATTRIBUTE_STATIC) {
+ if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
+ MonoInst *mrgctx_loc, *mrgctx_var;
+
+ g_assert (!this);
+ g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
+
+ mrgctx_loc = mono_get_vtable_var (cfg);
+ NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+
+ return mrgctx_var;
+ } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
MonoInst *vtable_loc, *vtable_var;
+ g_assert (!this);
+
vtable_loc = mono_get_vtable_var (cfg);
NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
+ if (method->is_inflated && mono_method_get_context (method)->method_inst) {
+ MonoInst *mrgctx_var = vtable_var;
+
+ g_assert (G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) == 0);
+
+ MONO_INST_NEW (cfg, vtable_var, CEE_LDIND_I);
+ vtable_var->cil_code = ip;
+ vtable_var->inst_left = mrgctx_var;
+ vtable_var->type = STACK_PTR;
+ }
+
return vtable_var;
} else {
MonoInst *vtable;
+ g_assert (this);
+
MONO_INST_NEW (cfg, vtable, CEE_LDIND_I);
vtable->inst_left = this;
vtable->type = STACK_PTR;
}
}
-static gpointer
-create_rgctx_lazy_fetch_trampoline (guint32 offset)
+gpointer
+mini_create_rgctx_lazy_fetch_trampoline (guint32 offset)
{
static gboolean inited = FALSE;
static int num_trampolines = 0;
MonoInst *rgc_ptr, guint32 slot, const unsigned char *ip)
{
MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
- guint8 *tramp = create_rgctx_lazy_fetch_trampoline (slot);
+ guint8 *tramp = mini_create_rgctx_lazy_fetch_trampoline (slot);
int temp;
MonoInst *field;
static MonoInst*
get_runtime_generic_context_ptr (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
- MonoClass *klass, guint32 type_token, int token_source, MonoGenericContext *generic_context, MonoInst *rgctx,
- int rgctx_type, unsigned char *ip)
+ MonoClass *klass, MonoGenericContext *generic_context, MonoInst *rgctx, int rgctx_type, unsigned char *ip)
{
guint32 slot = mono_method_lookup_or_register_other_info (method,
context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, generic_context);
return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
}
+static MonoInst*
+get_runtime_generic_context_method_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
+ MonoMethod *rgctx_method, MonoGenericContext *generic_context, MonoInst *rgctx, const unsigned char *ip)
+{
+ guint32 slot = mono_method_lookup_or_register_other_info (method,
+ context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
+ MONO_RGCTX_INFO_METHOD_RGCTX, generic_context);
+
+ return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
+}
+
static gboolean
generic_class_is_reference_type (MonoCompile *cfg, MonoClass *klass)
{
return dest;
}
-static MonoObject*
-mono_object_castclass (MonoObject *obj, MonoClass *klass)
-{
- if (!obj)
- return NULL;
-
- if (mono_object_isinst (obj, klass))
- return obj;
-
- mono_raise_exception (mono_exception_from_name (mono_defaults.corlib,
- "System", "InvalidCastException"));
-
- return NULL;
-}
-
static int
emit_castclass (MonoClass *klass, guint32 token, int context_used, gboolean inst_is_castclass, MonoCompile *cfg,
MonoMethod *method, MonoInst **arg_array, MonoType **param_types, GList *dont_inline,
/* klass */
GET_RGCTX (rgctx, context_used);
args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
temp = mono_emit_jit_icall (cfg, bblock, mono_object_castclass, args, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
goto do_return;
}
-static gboolean
+static int
+emit_unbox (MonoClass *klass, guint32 token, int context_used,
+ MonoCompile *cfg, MonoMethod *method, MonoInst **arg_array, MonoType **param_types, GList *dont_inline,
+ unsigned char *end, MonoMethodHeader *header, MonoGenericContext *generic_context,
+ MonoBasicBlock **_bblock, unsigned char **_ip, MonoInst ***_sp, int *_inline_costs, guint *_real_offset)
+{
+ MonoBasicBlock *bblock = *_bblock;
+ unsigned char *ip = *_ip;
+ MonoInst **sp = *_sp;
+ int inline_costs = *_inline_costs;
+ guint real_offset = *_real_offset;
+ int return_value = 0;
+
+ MonoInst *add, *vtoffset, *ins;
+
+ /* Needed by the code generated in inssel.brg */
+ mono_get_got_var (cfg);
+
+ if (context_used) {
+ MonoInst *rgctx, *element_class;
+
+ /* This assertion is from the unboxcast insn */
+ g_assert (klass->rank == 0);
+
+ GET_RGCTX (rgctx, context_used);
+ element_class = get_runtime_generic_context_ptr (cfg, method, context_used, bblock,
+ klass->element_class, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+
+ MONO_INST_NEW (cfg, ins, OP_UNBOXCAST_REG);
+ ins->type = STACK_OBJ;
+ ins->inst_left = *sp;
+ ins->inst_right = element_class;
+ ins->klass = klass;
+ ins->cil_code = ip;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
+ ins->type = STACK_OBJ;
+ ins->inst_left = *sp;
+ ins->klass = klass;
+ ins->inst_newa_class = klass;
+ ins->cil_code = ip;
+ }
+
+ MONO_INST_NEW (cfg, add, OP_PADD);
+ NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
+ add->inst_left = ins;
+ add->inst_right = vtoffset;
+ add->type = STACK_MP;
+ add->klass = klass;
+ *sp = add;
+
+do_return:
+ *_bblock = bblock;
+ *_ip = ip;
+ *_sp = sp;
+ *_inline_costs = inline_costs;
+ *_real_offset = real_offset;
+ return return_value;
+exception_exit:
+ return_value = -2;
+ goto do_return;
+}
+
+gboolean
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
*
* Returns true if the method is invalid.
*/
-static gboolean
+gboolean
mini_method_verify (MonoCompile *cfg, MonoMethod *method)
{
GSList *tmp, *res;
if (!cfg->generic_sharing_context)
g_assert (!sig->has_type_parameters);
+ if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
+ g_assert (method->is_inflated);
+ g_assert (mono_method_get_context (method)->method_inst);
+ }
+ if (method->is_inflated && mono_method_get_context (method)->method_inst)
+ g_assert (sig->generic_param_count);
+
if (cfg->method == method)
real_offset = 0;
else
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
/* ENTRY BLOCK */
- NEW_BBLOCK (cfg, start_bblock);
- cfg->bb_entry = start_bblock;
+ cfg->bb_entry = start_bblock = NEW_BBLOCK (cfg);
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
start_bblock->block_num = cfg->num_bblocks++;
/* EXIT BLOCK */
- NEW_BBLOCK (cfg, end_bblock);
- cfg->bb_exit = end_bblock;
+ cfg->bb_exit = end_bblock = NEW_BBLOCK (cfg);
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
end_bblock->block_num = cfg->num_bblocks++;
clause->data.catch_class &&
cfg->generic_sharing_context &&
mono_class_check_context_used (clause->data.catch_class)) {
+ if (mono_method_get_context (method)->method_inst)
+ GENERIC_SHARING_FAILURE (CEE_NOP);
+
/*
* In shared generic code with catch
* clauses containing type variables
* the exception handling code has to
* be able to get to the rgctx.
* Therefore we have to make sure that
- * the rgctx argument (for static
- * methods) or the "this" argument
- * (for non-static methods) are live.
+ * the vtable/mrgctx argument (for
+ * static or generic methods) or the
+ * "this" argument (for non-static
+ * methods) are live.
*/
- if (method->flags & METHOD_ATTRIBUTE_STATIC) {
+ if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method)->method_inst) {
mono_get_vtable_var (cfg);
} else {
MonoInst *this, *dummy_use;
}
/* FIRST CODE BLOCK */
- NEW_BBLOCK (cfg, bblock);
+ bblock = NEW_BBLOCK (cfg);
bblock->cil_code = ip;
ADD_BBLOCK (cfg, bblock);
if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
/* we use a separate basic block for the initialization code */
- NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
+ cfg->bb_init = init_localsbb = NEW_BBLOCK (cfg);
init_localsbb->real_offset = real_offset;
start_bblock->next_bb = init_localsbb;
init_localsbb->next_bb = bblock;
gboolean pass_imt_from_rgctx = FALSE;
MonoInst *imt_arg = NULL;
gboolean pass_vtable = FALSE;
+ gboolean pass_mrgctx = FALSE;
MonoInst *vtable_arg = NULL;
+ gboolean check_this = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
UNVERIFIED;
+ if (!cfg->generic_sharing_context && cmethod)
+ g_assert (!mono_method_check_context_used (cmethod));
+
CHECK_STACK (n);
//g_assert (!virtual || fsig->hasthis);
* context is sharable (and it's not a
* generic method).
*/
- if (sharing_enabled && context_sharable)
+ if (sharing_enabled && context_sharable &&
+ !mini_method_get_context (cmethod)->method_inst)
pass_vtable = TRUE;
}
+ if (cmethod && mini_method_get_context (cmethod) &&
+ mini_method_get_context (cmethod)->method_inst) {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_method_get_context (cmethod);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ g_assert (!pass_vtable);
+
+ if (sharing_enabled && context_sharable)
+ pass_mrgctx = TRUE;
+ }
+
if (cfg->generic_sharing_context && cmethod) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mono_method_check_context_used (cmethod);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (*ip);
if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
/* Generic method interface
calls are resolved via a
pass_imt_from_rgctx = TRUE;
}
- if (context_used &&
- (cmethod_context && cmethod_context->method_inst && cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL)) {
- GENERIC_SHARING_FAILURE (*ip);
- }
+ /*
+ * If a shared method calls another
+ * shared method then the caller must
+ * have a generic sharing context
+ * because the magic trampoline
+ * requires it. FIXME: We shouldn't
+ * have to force the vtable/mrgctx
+ * variable here. Instead there
+ * should be a flag in the cfg to
+ * request a generic sharing context.
+ */
+ if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
+ mono_get_vtable_var (cfg);
}
if (pass_vtable) {
GET_RGCTX (rgctx, context_used);
vtable_arg = get_runtime_generic_context_ptr (cfg, method, context_used,
- bblock, cmethod->klass,
- token, MINI_TOKEN_SOURCE_METHOD, generic_context,
- rgctx, MONO_RGCTX_INFO_VTABLE, ip);
+ bblock, cmethod->klass, generic_context, rgctx, MONO_RGCTX_INFO_VTABLE, ip);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
}
}
+ if (pass_mrgctx) {
+ g_assert (!vtable_arg);
+
+ if (context_used) {
+ MonoInst *rgctx;
+
+ GET_RGCTX (rgctx, context_used);
+ vtable_arg = get_runtime_generic_context_method_rgctx (cfg, method,
+ context_used, bblock, cmethod, generic_context, rgctx, ip);
+ } else {
+ MonoMethodRuntimeGenericContext *mrgctx;
+
+ mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
+ mini_method_get_context (cmethod)->method_inst);
+
+ NEW_PCONST (cfg, vtable_arg, mrgctx);
+ }
+
+ if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
+ (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
+ if (virtual)
+ check_this = TRUE;
+ virtual = 0;
+ }
+ }
+
if (pass_imt_from_rgctx) {
MonoInst *rgctx;
generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
}
+ if (check_this) {
+ MonoInst *check;
+
+ MONO_INST_NEW (cfg, check, OP_CHECK_THIS_PASSTHROUGH);
+ check->cil_code = ip;
+ check->inst_left = sp [0];
+ check->type = sp [0]->type;
+ sp [0] = check;
+ }
+
if (cmethod && virtual &&
(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
}
/* FIXME: runtime generic context pointer for jumps? */
+ /* FIXME: handle this for generic sharing eventually */
if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
(mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
int i;
- GENERIC_SHARING_FAILURE (*ip);
-
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
INLINE_FAILURE;
/* FIXME: This assumes the two methods has the same number and type of arguments */
handle_loaded_temps (cfg, bblock, stack_start, sp);
- if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
+ if ((cfg->opt & MONO_OPT_INLINE) && cmethod && //!check_this &&
(!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
mono_method_check_inlining (cfg, cmethod) &&
!g_list_find (dont_inline, cmethod)) {
they are not shared! */
if (context_used &&
(cmethod->klass->valuetype ||
- (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) ||
+ (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
mono_class_generic_sharing_enabled (cmethod->klass)) ||
- (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod) &&
+ (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
(!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
MonoInst *rgctx;
temp = mono_emit_rgctx_calli_spilled (cfg, bblock, fsig, sp, addr, vtable_arg, ip);
if (temp != -1) {
NEW_TEMPLOAD (cfg, *sp, temp);
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, bblock, *sp, temp, ip);
sp++;
}
}
*/
if ((ins->opcode == OP_LSHR_UN) && (ins->type == STACK_I8)
&& (ins->inst_right->opcode == OP_ICONST) && (ins->inst_right->inst_c0 == 32)) {
- ins->opcode = OP_LONG_SHRUN_32;
+ ins->opcode = OP_LSHR_UN_32;
/*g_print ("applied long shr speedup to %s\n", cfg->method->name);*/
ip++;
break;
if (!mono_class_init (cmethod->klass))
goto load_error;
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_method_check_context_used (cmethod);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_NEWOBJ);
- }
-
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod, bblock, ip))
INLINE_FAILURE;
g_assert (!context_used);
NEW_METHODCONST (cfg, *sp, cmethod);
- temp = handle_array_new (cfg, bblock, fsig->param_count, sp, ip);
+
+ if (fsig->param_count == 2)
+ /* Avoid varargs in the common case */
+ temp = mono_emit_jit_icall (cfg, bblock, mono_array_new_2, sp, ip);
+ else
+ temp = handle_array_new (cfg, bblock, fsig->param_count, sp, ip);
} else if (cmethod->string_ctor) {
g_assert (!context_used);
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock,
- cmethod->klass, token, MINI_TOKEN_SOURCE_METHOD, generic_context,
- rgctx, rgctx_info, ip);
+ cmethod->klass, generic_context, rgctx, rgctx_info, ip);
temp = handle_alloc_from_inst (cfg, bblock, cmethod->klass, data, FALSE, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
}
} else if (context_used &&
(cmethod->klass->valuetype ||
- !mono_method_is_generic_sharable_impl (cmethod))) {
+ !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
MonoInst *rgctx, *cmethod_addr;
g_assert (!callvirt_this_arg);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_ISINST);
- }
-
/* Needed by the code generated in inssel.brg */
if (!context_used)
mono_get_got_var (cfg);
/* klass */
GET_RGCTX (rgctx, context_used);
args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
temp = mono_emit_jit_icall (cfg, bblock, mono_object_isinst, args, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
}
break;
case CEE_UNBOX_ANY: {
- MonoInst *add, *vtoffset;
MonoInst *iargs [3];
guint32 align;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_UNBOX_ANY);
- }
-
if (generic_class_is_reference_type (cfg, klass)) {
switch (emit_castclass (klass, token, context_used, FALSE,
cfg, method, arg_array, param_types, dont_inline, end, header,
break;
}
- /* Needed by the code generated in inssel.brg */
- mono_get_got_var (cfg);
-
- if (context_used) {
- MonoInst *rgctx, *element_class;
-
- /* This assertion is from the
- unboxcast insn */
- g_assert (klass->rank == 0);
-
- GET_RGCTX (rgctx, context_used);
- /* FIXME: Passing token here is
- technically not correct, because we
- don't use klass but
- klass->element_class. Since it's
- only used by code for debugging the
- extensible runtime generic context
- it's not a big deal. To be correct
- we'd have to invent a new token
- source. */
- element_class = get_runtime_generic_context_ptr (cfg, method, context_used, bblock,
- klass->element_class, token, MINI_TOKEN_SOURCE_CLASS,
- generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
-
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST_REG);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->inst_right = element_class;
- ins->klass = klass;
- } else {
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->klass = klass;
- ins->inst_newa_class = klass;
+ switch (emit_unbox (klass, token, context_used,
+ cfg, method, arg_array, param_types, dont_inline, end, header,
+ generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
+ case 0: break;
+ case -1: goto unverified;
+ case -2: goto exception_exit;
+ default: g_assert_not_reached ();
}
-
- MONO_INST_NEW (cfg, add, OP_PADD);
- NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
- add->inst_left = ins;
- add->inst_right = vtoffset;
- add->type = STACK_MP;
- add->klass = mono_defaults.object_class;
- *sp = add;
ip += 5;
/* LDOBJ impl */
n = mono_class_value_size (klass, &align);
inline_costs += 2;
break;
}
- case CEE_UNBOX: {
- MonoInst *add, *vtoffset;
-
+ case CEE_UNBOX:
CHECK_STACK (1);
--sp;
CHECK_OPSIZE (5);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- GENERIC_SHARING_FAILURE (CEE_UNBOX);
- }
if (mono_class_is_nullable (klass)) {
- int v = handle_unbox_nullable (cfg, method, context_used, bblock, *sp, ip, klass,
- generic_context, NULL);
+ int v;
+ MonoInst *rgctx = NULL;
+
+ if (context_used)
+ GET_RGCTX (rgctx, context_used);
+ v = handle_unbox_nullable (cfg, method, context_used, bblock, *sp, ip, klass,
+ generic_context, rgctx);
NEW_TEMPLOAD (cfg, *sp, v);
sp ++;
ip += 5;
break;
}
- /* Needed by the code generated in inssel.brg */
- mono_get_got_var (cfg);
+ switch (emit_unbox (klass, token, context_used,
+ cfg, method, arg_array, param_types, dont_inline, end, header,
+ generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
+ case 0: break;
+ case -1: goto unverified;
+ case -2: goto exception_exit;
+ default: g_assert_not_reached ();
+ }
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->klass = klass;
- ins->inst_newa_class = klass;
-
- MONO_INST_NEW (cfg, add, OP_PADD);
- NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
- add->inst_left = ins;
- add->inst_right = vtoffset;
- add->type = STACK_MP;
- add->klass = klass;
- *sp++ = add;
+ sp++;
ip += 5;
inline_costs += 2;
break;
- }
case CEE_CASTCLASS:
CHECK_STACK (1);
--sp;
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_CASTCLASS);
- }
-
switch (emit_castclass (klass, token, context_used, TRUE,
cfg, method, arg_array, param_types, dont_inline, end, header,
generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
} else {
temp = mono_emit_method_call_spilled (cfg, bblock, wrapper, mono_method_signature (wrapper), iargs, ip, NULL);
NEW_TEMPLOAD (cfg, *sp, temp);
- NEW_TEMPLOAD_SOFT_FLOAT (cfg, bblock, *sp, temp);
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, bblock, *sp, temp, ip);
sp++;
}
} else {
case CEE_LDSFLDA:
case CEE_STSFLD: {
MonoClassField *field;
+ gboolean is_special_static;
gpointer addr = NULL;
CHECK_OPSIZE (5);
GENERIC_SHARING_FAILURE (*ip);
#endif
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD ||
- klass->valuetype)
- GENERIC_SHARING_FAILURE (*ip);
- }
-
g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
if ((*ip) == CEE_STSFLD)
handle_loaded_temps (cfg, bblock, stack_start, sp);
- /* The special_static_fields field is init'd in mono_class_vtable, so it needs
- * to be called here.
- */
- if (!(cfg->opt & MONO_OPT_SHARED)) {
- mono_class_vtable (cfg->domain, klass);
- CHECK_TYPELOAD (klass);
- }
- mono_domain_lock (cfg->domain);
- if (cfg->domain->special_static_fields)
- addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
- mono_domain_unlock (cfg->domain);
+ is_special_static = mono_class_field_is_special_static (field);
- if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
+ if ((cfg->opt & MONO_OPT_SHARED) ||
+ (cfg->compile_aot && is_special_static) ||
+ (context_used && is_special_static)) {
int temp;
MonoInst *iargs [2];
- MonoInst *domain_var;
g_assert (field->parent);
- /* avoid depending on undefined C behavior in sequence points */
- domain_var = mono_get_domainvar (cfg);
- NEW_TEMPLOAD (cfg, iargs [0], domain_var->inst_c0);
+ if ((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) {
+ MonoInst *domain_var;
+ /* avoid depending on undefined C behavior in sequence points */
+ domain_var = mono_get_domainvar (cfg);
+ NEW_TEMPLOAD (cfg, iargs [0], domain_var->inst_c0);
+ } else {
+ NEW_DOMAINCONST (cfg, iargs [0]);
+ }
if (context_used) {
MonoInst *rgctx;
GET_RGCTX (rgctx, context_used);
vtable = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_FIELD, generic_context,
- rgctx, MONO_RGCTX_INFO_VTABLE, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_VTABLE, ip);
call = mono_emit_call_args (cfg, bblock, sig, NULL, FALSE, FALSE, ip, FALSE);
call->inst.opcode = OP_TRAMPCALL_VTABLE;
*/
GET_RGCTX (rgctx, context_used);
static_data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_FIELD, generic_context,
- rgctx, MONO_RGCTX_INFO_STATIC_DATA, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_STATIC_DATA, ip);
if (field->offset == 0) {
ins = static_data;
vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
- if (!addr) {
+ if (!is_special_static) {
if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
guint8 *tramp = mono_create_class_init_trampoline (vtable);
mono_emit_native_call (cfg, bblock, tramp,
class_inits = g_slist_prepend (class_inits, vtable);
} else {
if (cfg->run_cctors) {
+ MonoException *ex;
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
INLINE_FAILURE;
- mono_runtime_class_init (vtable);
+ ex = mono_runtime_class_init_full (vtable, FALSE);
+ if (ex) {
+ set_exception_object (cfg, ex);
+ goto exception_exit;
+ }
}
}
addr = (char*)vtable->data + field->offset;
else
NEW_PCONST (cfg, ins, addr);
} else {
+ int temp;
+ MonoInst *iargs [1];
+
+ /* The special_static_fields
+ * field is init'd in
+ * mono_class_vtable, so it
+ * needs to be called here.
+ */
+ if (!(cfg->opt & MONO_OPT_SHARED)) {
+ mono_class_vtable (cfg->domain, klass);
+ CHECK_TYPELOAD (klass);
+ }
+ mono_domain_lock (cfg->domain);
+ if (cfg->domain->special_static_fields)
+ addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
+ mono_domain_unlock (cfg->domain);
+
/*
* insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
* This could be later optimized to do just a couple of
* memory dereferences with constant offsets.
*/
- int temp;
- MonoInst *iargs [1];
NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
temp = mono_emit_jit_icall (cfg, bblock, mono_get_special_static_data, iargs, ip);
NEW_TEMPLOAD (cfg, ins, temp);
MONO_ADD_INS (bblock, store);
} else {
gboolean is_const = FALSE;
- MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+ MonoVTable *vtable = NULL;
+
+ if (!context_used)
+ vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
- context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (*ip);
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
if (generic_class_is_reference_type (cfg, klass)) {
*sp++ = val;
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, rgctx_info, ip);
+ generic_context, rgctx, rgctx_info, ip);
*sp++ = handle_box_from_inst (cfg, bblock, val, ip, klass, data);
}
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD || klass->valuetype)
- GENERIC_SHARING_FAILURE (CEE_NEWARR);
- }
-
if (context_used) {
MonoInst *rgctx, *args [3];
int temp;
/* domain */
+ /* FIXME: what about domain-neutral code? */
NEW_DOMAINCONST (cfg, args [0]);
/* klass */
GET_RGCTX (rgctx, context_used);
args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
/* array len */
args [2] = *sp;
CHECK_TYPELOAD (klass);
mono_class_init (klass);
+ /* Needed by the code generated in inssel.brg */
+ mono_get_got_var (cfg);
+
if (cfg->generic_sharing_context) {
context_used = mono_class_check_context_used (klass);
if (context_used && cfg->compile_aot)
GET_RGCTX (rgctx, context_used);
ins->inst_right = get_runtime_generic_context_ptr (cfg, method, context_used,
- bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ bblock, klass, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
} else {
MONO_INST_NEW (cfg, ins, *ip);
ins->type = STACK_MP;
GET_RGCTX (rgctx, context_used);
klass_klass = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
GET_RGCTX (rgctx, context_used);
klass_type = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_TYPE, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_TYPE, ip);
NEW_TEMPLOADA (cfg, loc_load, loc->inst_c0);
context_used = mono_method_check_context_used (handle);
else
g_assert_not_reached ();
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_LDTOKEN);
}
if (cfg->opt & MONO_OPT_SHARED) {
GET_RGCTX (rgctx, context_used);
ins = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, tclass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_REFLECTION_TYPE, ip);
+ generic_context, rgctx, MONO_RGCTX_INFO_REFLECTION_TYPE, ip);
} else if (cfg->compile_aot) {
+ /*
+ * FIXME: We would have to include the context into the
+ * aot constant too (tests/generic-array-type.2.exe).
+ */
+ if (generic_context)
+ cfg->disable_aot = TRUE;
NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
} else {
NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
if (handle_class == mono_defaults.typehandle_class) {
ins = get_runtime_generic_context_ptr (cfg, method,
context_used, bblock,
- mono_class_from_mono_type (handle), token,
- MINI_TOKEN_SOURCE_CLASS, generic_context,
+ mono_class_from_mono_type (handle), generic_context,
rgctx, MONO_RGCTX_INFO_TYPE, ip);
} else if (handle_class == mono_defaults.methodhandle_class) {
ins = get_runtime_generic_context_method (cfg, method,
token = read32 (ip + 2);
ptr = mono_method_get_wrapper_data (method, token);
- if (cfg->compile_aot && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
ip += 6;
break;
}
+
+ if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
+ MonoJitICallInfo *callinfo;
+ const char *icall_name;
+
+ icall_name = method->name + strlen ("__icall_wrapper_");
+ g_assert (icall_name);
+ callinfo = mono_find_jit_icall_by_name (icall_name);
+ g_assert (callinfo);
+
+ if (ptr == callinfo->func) {
+ /* Will be transformed into an AOTCONST later */
+ NEW_PCONST (cfg, ins, ptr);
+ *sp++ = ins;
+ ip += 6;
+ break;
+ }
+ }
+ }
+ /* FIXME: Generalize this */
+ if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
+ NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ *sp++ = ins;
+ ip += 6;
+ break;
}
NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
token = read32 (ip + 2);
/* Needed by the code generated in inssel.brg */
mono_get_got_var (cfg);
+
+#ifdef __i386__
+ /*
+ * The code generated for CCASTCLASS has too much register pressure
+ * (obj+vtable+ibitmap_byte_reg+iid_reg), leading to the usual
+ * branches-inside-bblocks problem.
+ */
+ cfg->disable_aot = TRUE;
+#endif
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_CISINST) ? OP_CISINST : OP_CCASTCLASS);
args [1] = get_runtime_generic_context_method (cfg, method, context_used,
bblock, cmethod,
generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn_gshared, args, ip);
} else {
NEW_METHODCONST (cfg, args [1], cmethod);
+ temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn, args, ip);
}
- temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn, args, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
sp ++;
store->inst_i0 = sp [0];
store->inst_i1 = load;
} else {
- GENERIC_SHARING_FAILURE (CEE_INITOBJ);
handle_initobj (cfg, bblock, *sp, NULL, klass, stack_start, sp);
}
ip += 6;
break;
}
case CEE_SIZEOF:
- GENERIC_SHARING_FAILURE (CEE_SIZEOF);
-
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
token = read32 (ip + 2);
wrapper = mono_marshal_get_icall_wrapper (callinfo->sig, name, callinfo->func, check_for_pending_exc);
g_free (name);
- trampoline = mono_create_ftnptr (domain, mono_create_jit_trampoline_in_domain (domain, wrapper));
+ trampoline = mono_create_ftnptr (domain, mono_create_jit_trampoline_in_domain (domain, wrapper, TRUE));
mono_register_jit_icall_wrapper (callinfo, trampoline);
callinfo->trampoline = trampoline;
typedef struct {
MonoClass *vtype;
- GList *active;
+ GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
return new_list;
}
+static gint
+compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
+{
+ MonoMethodVar *v1 = (MonoMethodVar*)a;
+ MonoMethodVar *v2 = (MonoMethodVar*)b;
+
+ if (v1 == v2)
+ return 0;
+ else if (v1->interval->range && v2->interval->range)
+ return v1->interval->range->from - v2->interval->range->from;
+ else if (v1->interval->range)
+ return -1;
+ else
+ return 1;
+}
+
+#if 0
+#define LSCAN_DEBUG(a) do { a; } while (0)
+#else
+#define LSCAN_DEBUG(a)
+#endif
+
+static gint32*
+mono_allocate_stack_slots_full2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
+{
+ int i, slot, offset, size;
+ guint32 align;
+ MonoMethodVar *vmv;
+ MonoInst *inst;
+ gint32 *offsets;
+ GList *vars = NULL, *l, *unhandled;
+ StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
+ MonoType *t;
+ int nvtypes;
+
+ LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
+
+ scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
+ vtype_stack_slots = NULL;
+ nvtypes = 0;
+
+ offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
+ for (i = 0; i < cfg->num_varinfo; ++i)
+ offsets [i] = -1;
+
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ inst = cfg->varinfo [i];
+ vmv = MONO_VARINFO (cfg, i);
+
+ if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
+ continue;
+
+ vars = g_list_prepend (vars, vmv);
+ }
+
+ vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
+
+ /* Sanity check */
+ /*
+ i = 0;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ if (current->interval->range) {
+ g_assert (current->interval->range->from >= i);
+ i = current->interval->range->from;
+ }
+ }
+ */
+
+ offset = 0;
+ *stack_align = 0;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ vmv = current;
+ inst = cfg->varinfo [vmv->idx];
+
+ /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
+ * pinvoke wrappers when they call functions returning structures */
+ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
+ size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
+ else {
+ int ialign;
+
+ size = mono_type_size (inst->inst_vtype, &ialign);
+ align = ialign;
+ }
+
+ t = mono_type_get_underlying_type (inst->inst_vtype);
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
+ break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ break;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+#if SIZEOF_VOID_P == 4
+ case MONO_TYPE_I4:
+#else
+ case MONO_TYPE_I8:
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ break;
+#endif
+ default:
+ slot_info = &scalar_stack_slots [t->type];
+ }
+
+ slot = 0xffffff;
+ if (cfg->comp_done & MONO_COMP_LIVENESS) {
+ int pos;
+ gboolean changed;
+
+ //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
+
+ if (!current->interval->range) {
+ if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
+ pos = ~0;
+ else {
+ /* Dead */
+ inst->flags |= MONO_INST_IS_DEAD;
+ continue;
+ }
+ }
+ else
+ pos = current->interval->range->from;
+
+ LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
+ if (current->interval->range)
+ LSCAN_DEBUG (mono_linterval_print (current->interval));
+ LSCAN_DEBUG (printf ("\n"));
+
+ /* Check for intervals in active which expired or inactive */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->active; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (!mono_linterval_covers (v->interval, pos)) {
+ slot_info->inactive = g_list_append (slot_info->inactive, v);
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Check for intervals in inactive which expired or active */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->inactive; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ // FIXME: Enabling this seems to cause impossible to debug crashes
+ //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (mono_linterval_covers (v->interval, pos)) {
+ slot_info->active = g_list_append (slot_info->active, v);
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /*
+ * This also handles the case when the variable is used in an
+ * exception region, as liveness info is not computed there.
+ */
+ /*
+ * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
+ * opcodes.
+ */
+ if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
+ if (slot_info->slots) {
+ slot = GPOINTER_TO_INT (slot_info->slots->data);
+
+ slot_info->slots = slot_info->slots->next;
+ }
+
+ /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
+
+ slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
+ }
+ }
+
+#if 0
+ {
+ static int count = 0;
+ count ++;
+
+ if (count == atoi (getenv ("COUNT3")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT3")))
+ slot = 0xffffff;
+ else {
+ mono_print_tree_nl (inst);
+ }
+ }
+#endif
+
+ LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
+
+ if (slot == 0xffffff) {
+ /*
+ * Allways allocate valuetypes to sizeof (gpointer) to allow more
+ * efficient copying (and to work around the fact that OP_MEMCPY
+ * and OP_MEMSET ignores alignment).
+ */
+ if (MONO_TYPE_ISSTRUCT (t))
+ align = sizeof (gpointer);
+
+ if (backward) {
+ offset += size;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ }
+ else {
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ offset += size;
+ }
+
+ if (*stack_align == 0)
+ *stack_align = align;
+ }
+
+ offsets [vmv->idx] = slot;
+ }
+ g_list_free (vars);
+ for (i = 0; i < MONO_TYPE_PINNED; ++i) {
+ if (scalar_stack_slots [i].active)
+ g_list_free (scalar_stack_slots [i].active);
+ }
+ for (i = 0; i < nvtypes; ++i) {
+ if (vtype_stack_slots [i].active)
+ g_list_free (vtype_stack_slots [i].active);
+ }
+
+ mono_jit_stats.locals_stack_size += offset;
+
+ *stack_size = offset;
+ return offsets;
+}
+
/*
* mono_allocate_stack_slots_full:
*
MonoType *t;
int nvtypes;
+ if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
+ return mono_allocate_stack_slots_full2 (cfg, backward, stack_size, stack_align);
+
scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
int i, j;
char *code;
MonoBasicBlock *bb;
+ MonoInst *c;
g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
for (i = 0; i < cfg->num_bblocks; ++i) {
- MonoInst *c;
-
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
g_free (code2);
} else*/
code = g_strdup ("\n");
- g_print ("\nBB%d DFN%d (len: %d): %s", bb->block_num, i, bb->cil_length, code);
+ g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
- mono_print_tree (c);
- g_print ("\n");
+ if (cfg->new_ir) {
+ mono_print_ins_index (-1, c);
+ } else {
+ mono_print_tree (c);
+ g_print ("\n");
+ }
}
g_print ("\tprev:");
MONO_ADD_INS (bb, inst);
}
+void
+mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = ins_to_insert;
+ ins_to_insert->next = ins;
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with next */
+ ins_to_insert->next = ins->next;
+ if (ins->next)
+ ins->next->prev = ins_to_insert;
+
+ /* Link with previous */
+ ins->next = ins_to_insert;
+ ins_to_insert->prev = ins;
+
+ if (bb->last_ins == ins)
+ bb->last_ins = ins_to_insert;
+ }
+}
+
+void
+mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ NOT_IMPLEMENTED;
+ ins = bb->code;
+ bb->code = ins_to_insert;
+ ins_to_insert->next = ins;
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with previous */
+ if (ins->prev)
+ ins->prev->next = ins_to_insert;
+ ins_to_insert->prev = ins->prev;
+
+ /* Link with next */
+ ins->prev = ins_to_insert;
+ ins_to_insert->next = ins;
+
+ if (bb->code == ins)
+ bb->code = ins_to_insert;
+ }
+}
+
+/*
+ * mono_verify_bblock:
+ *
+ * Verify that the next and prev pointers are consistent inside the instructions in BB.
+ */
+void
+mono_verify_bblock (MonoBasicBlock *bb)
+{
+ MonoInst *ins, *prev;
+
+ prev = NULL;
+ for (ins = bb->code; ins; ins = ins->next) {
+ g_assert (ins->prev == prev);
+ prev = ins;
+ }
+ if (bb->last_ins)
+ g_assert (!bb->last_ins->next);
+}
+
+/*
+ * mono_verify_cfg:
+ *
+ * Perform consistency checks on the JIT data structures and the IR
+ */
+void
+mono_verify_cfg (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_verify_bblock (bb);
+}
+
void
mono_destroy_compile (MonoCompile *cfg)
{
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
+ g_free (cfg->reverse_inst_list);
+
g_free (cfg->varinfo);
g_free (cfg->vars);
g_free (cfg->exception_message);
cfg->patch_info = ji;
}
+MonoJumpInfo *
+mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target)
+{
+ MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1);
+
+ ji->ip.i = ip;
+ ji->type = type;
+ ji->data.target = target;
+ ji->next = list;
+
+ return ji;
+}
+
void
mono_remove_patch_info (MonoCompile *cfg, int ip)
{
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
+ g_assert (patch_info->data.bb->native_offset);
target = patch_info->data.bb->native_offset + code;
break;
case MONO_PATCH_INFO_ABS:
target = code;
} else {
/* get the trampoline to the method from the domain */
- if (method && method->wrapper_type == MONO_WRAPPER_STATIC_RGCTX_INVOKE)
- target = mono_ldftn_nosync (patch_info->data.method);
- else
+ if (method && method->wrapper_type == MONO_WRAPPER_STATIC_RGCTX_INVOKE) {
+ target = mono_create_jit_trampoline_in_domain (mono_domain_get (),
+ patch_info->data.method, FALSE);
+ } else {
target = mono_create_jit_trampoline (patch_info->data.method);
+ }
}
break;
case MONO_PATCH_INFO_SWITCH: {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
mono_domain_lock (domain);
- jump_table = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
+ if (mono_aot_only)
+ jump_table = mono_mempool_alloc (domain->mp, sizeof (gpointer) * patch_info->data.table->table_size);
+ else
+ jump_table = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
mono_domain_unlock (domain);
}
- for (i = 0; i < patch_info->data.table->table_size; i++) {
+ for (i = 0; i < patch_info->data.table->table_size; i++)
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
- }
target = jump_table;
break;
}
case MONO_PATCH_INFO_ICALL_ADDR:
target = mono_lookup_internal_call (patch_info->data.method);
break;
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR: {
+ MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name);
+ if (!mi) {
+ g_warning ("unknown MONO_PATCH_INFO_JIT_ICALL_ADDR %s", patch_info->data.name);
+ g_assert_not_reached ();
+ }
+ target = mi->func;
+ break;
+ }
+ case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
+ target = mono_thread_interruption_request_flag ();
+ break;
case MONO_PATCH_INFO_BB_OVF:
case MONO_PATCH_INFO_EXC_OVF:
case MONO_PATCH_INFO_GOT_OFFSET:
bb->in_bb = NULL;
bb->out_bb = NULL;
bb->next_bb = NULL;
- MONO_INST_LIST_INIT (&bb->ins_list);
+ bb->code = bb->last_ins = NULL;
bb->cil_code = NULL;
}
static void
replace_out_block_in_code (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl) {
- MonoInst *inst;
+ MonoInst *ins;
- MONO_BB_FOR_EACH_INS (bb, inst) {
- if (inst->opcode == OP_CALL_HANDLER) {
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- }
- }
-
- inst = mono_inst_list_last (&bb->ins_list);
- if (!inst)
- return;
-
- switch (inst->opcode) {
- case OP_BR:
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- break;
- case OP_SWITCH: {
- int i;
- int n = GPOINTER_TO_INT (inst->klass);
- for (i = 0; i < n; i++ ) {
- if (inst->inst_many_bb [i] == orig)
- inst->inst_many_bb [i] = repl;
+ for (ins = bb->code; ins != NULL; ins = ins->next) {
+ switch (ins->opcode) {
+ case OP_BR:
+ if (ins->inst_target_bb == orig)
+ ins->inst_target_bb = repl;
+ break;
+ case OP_CALL_HANDLER:
+ if (ins->inst_target_bb == orig)
+ ins->inst_target_bb = repl;
+ break;
+ case OP_SWITCH: {
+ int i;
+ int n = GPOINTER_TO_INT (ins->klass);
+ for (i = 0; i < n; i++ ) {
+ if (ins->inst_many_bb [i] == orig)
+ ins->inst_many_bb [i] = repl;
+ }
+ break;
}
- break;
- }
- case CEE_BNE_UN:
- case CEE_BEQ:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN:
- if (inst->inst_true_bb == orig)
- inst->inst_true_bb = repl;
- if (inst->inst_false_bb == orig)
- inst->inst_false_bb = repl;
- break;
- default:
- break;
- }
-}
-
-static void
-replace_basic_block (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl)
-{
- int i, j;
-
- for (i = 0; i < bb->out_count; i++) {
- MonoBasicBlock *ob = bb->out_bb [i];
- for (j = 0; j < ob->in_count; j++) {
- if (ob->in_bb [j] == orig) {
- ob->in_bb [j] = repl;
+ default:
+ if (MONO_IS_COND_BRANCH_OP (ins)) {
+ if (ins->inst_true_bb == orig)
+ ins->inst_true_bb = repl;
+ if (ins->inst_false_bb == orig)
+ ins->inst_false_bb = repl;
+ } else if (MONO_IS_JUMP_TABLE (ins)) {
+ int i;
+ MonoJumpInfoBBTable *table = MONO_JUMP_TABLE_FROM_INS (ins);
+ for (i = 0; i < table->table_size; i++ ) {
+ if (table->table [i] == orig)
+ table->table [i] = repl;
+ }
}
+
+ break;
}
}
-
}
/**
remove_block_if_useless (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *previous_bb) {
MonoBasicBlock *target_bb = NULL;
MonoInst *inst;
-
+
/* Do not touch handlers */
if (bb->region != -1) {
bb->not_useless = TRUE;
}
/* Do not touch BBs following a switch (they are the "default" branch) */
- inst = mono_inst_list_last (&previous_bb->ins_list);
- if (inst && inst->opcode == OP_SWITCH)
+ if ((previous_bb->last_ins != NULL) && (previous_bb->last_ins->opcode == OP_SWITCH)) {
return FALSE;
+ }
/* Do not touch BBs following the entry BB and jumping to something that is not */
/* thiry "next" bb (the entry BB cannot contain the branch) */
/* Check that there is a target BB, and that bb is not an empty loop (Bug 75061) */
if ((target_bb != NULL) && (target_bb != bb)) {
- MonoInst *last_ins;
int i;
if (cfg->verbose_level > 1) {
mono_unlink_bblock (cfg, bb, target_bb);
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
-
if ((previous_bb != cfg->bb_entry) &&
(previous_bb->region == bb->region) &&
- ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH)))) {
+ ((previous_bb->last_ins == NULL) ||
+ ((previous_bb->last_ins->opcode != OP_BR) &&
+ (! (MONO_IS_COND_BRANCH_OP (previous_bb->last_ins))) &&
+ (previous_bb->last_ins->opcode != OP_SWITCH)))) {
for (i = 0; i < previous_bb->out_count; i++) {
if (previous_bb->out_bb [i] == target_bb) {
MonoInst *jump;
}
}
-static void
-merge_basic_blocks (MonoBasicBlock *bb, MonoBasicBlock *bbn)
+void
+mono_merge_basic_blocks (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *bbn)
{
- MonoInst *last_ins;
-
- bb->out_count = bbn->out_count;
- bb->out_bb = bbn->out_bb;
-
- replace_basic_block (bb, bbn, bb);
+ MonoInst *inst;
+ MonoBasicBlock *prev_bb;
+ int i;
- last_ins = mono_inst_list_last (&bb->ins_list);
+ bb->has_array_access |= bbn->has_array_access;
+ bb->extended |= bbn->extended;
- /* Nullify branch at the end of bb */
- if (last_ins && MONO_IS_BRANCH_OP (last_ins))
- last_ins->opcode = OP_NOP;
+ mono_unlink_bblock (cfg, bb, bbn);
+ for (i = 0; i < bbn->out_count; ++i)
+ mono_link_bblock (cfg, bb, bbn->out_bb [i]);
+ while (bbn->out_count)
+ mono_unlink_bblock (cfg, bbn, bbn->out_bb [0]);
- MONO_INST_LIST_SPLICE_TAIL_INIT (&bbn->ins_list, &bb->ins_list);
+ /* Handle the branch at the end of the bb */
+ for (inst = bb->code; inst != NULL; inst = inst->next) {
+ if (inst->opcode == OP_CALL_HANDLER) {
+ g_assert (inst->inst_target_bb == bbn);
+ NULLIFY_INS (inst);
+ }
+ if (MONO_IS_JUMP_TABLE (inst)) {
+ int i;
+ MonoJumpInfoBBTable *table = MONO_JUMP_TABLE_FROM_INS (inst);
+ for (i = 0; i < table->table_size; i++ ) {
+ /* Might be already NULL from a previous merge */
+ if (table->table [i])
+ g_assert (table->table [i] == bbn);
+ table->table [i] = NULL;
+ }
+ /* Can't nullify this as later instructions depend on it */
+ }
+ }
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ g_assert (bb->last_ins->inst_false_bb == bbn);
+ bb->last_ins->inst_false_bb = NULL;
+ bb->extended = TRUE;
+ } else if (bb->last_ins && MONO_IS_BRANCH_OP (bb->last_ins)) {
+ NULLIFY_INS (bb->last_ins);
+ }
- bb->next_bb = bbn->next_bb;
+ if (bb->last_ins) {
+ if (bbn->code) {
+ bb->last_ins->next = bbn->code;
+ bbn->code->prev = bb->last_ins;
+ bb->last_ins = bbn->last_ins;
+ }
+ } else {
+ bb->code = bbn->code;
+ bb->last_ins = bbn->last_ins;
+ }
+ for (prev_bb = cfg->bb_entry; prev_bb && prev_bb->next_bb != bbn; prev_bb = prev_bb->next_bb)
+ ;
+ if (prev_bb) {
+ prev_bb->next_bb = bbn->next_bb;
+ } else {
+ /* bbn might not be in the bb list yet */
+ if (bb->next_bb == bbn)
+ bb->next_bb = bbn->next_bb;
+ }
nullify_basic_block (bbn);
}
move_basic_block_to_end (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoBasicBlock *bbn, *next;
- MonoInst *last_ins;
next = bb->next_bb;
bbn->next_bb = bb;
bb->next_bb = NULL;
- last_ins = mono_inst_list_last (&bb->ins_list);
-
/* Add a branch */
- if (next && (!last_ins || (last_ins->opcode != OP_NOT_REACHED))) {
+ if (next && (!bb->last_ins || ((bb->last_ins->opcode != OP_NOT_REACHED) && (bb->last_ins->opcode != OP_BR) && (bb->last_ins->opcode != OP_BR_REG) && (!MONO_IS_COND_BRANCH_OP (bb->last_ins))))) {
MonoInst *ins;
MONO_INST_NEW (cfg, ins, OP_BR);
}
}
+/*
+ * mono_remove_block:
+ *
+ * Remove BB from the control flow graph
+ */
+void
+mono_remove_bblock (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+ MonoBasicBlock *tmp_bb;
+
+ for (tmp_bb = cfg->bb_entry; tmp_bb && tmp_bb->next_bb != bb; tmp_bb = tmp_bb->next_bb)
+ ;
+
+ g_assert (tmp_bb);
+ tmp_bb->next_bb = bb->next_bb;
+}
+
/* checks that a and b represent the same instructions, conservatively,
* it can return FALSE also for two trees that are equal.
* FIXME: also make sure there are no side effects.
* Note that this can't be applied if the second arg is not positive...
*/
static int
-try_unsigned_compare (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *bb_last)
+try_unsigned_compare (MonoCompile *cfg, MonoBasicBlock *bb)
{
MonoBasicBlock *truet, *falset;
- MonoInst *cmp_inst = bb_last->inst_left;
+ MonoInst *cmp_inst = bb->last_ins->inst_left;
MonoInst *condb;
if (!cmp_inst->inst_right->inst_c0 == 0)
return FALSE;
- truet = bb_last->inst_true_bb;
- falset = bb_last->inst_false_bb;
+ truet = bb->last_ins->inst_true_bb;
+ falset = bb->last_ins->inst_false_bb;
if (falset->in_count != 1)
return FALSE;
- condb = mono_inst_list_last (&falset->ins_list);
+ condb = falset->last_ins;
/* target bb must have one instruction */
- if (!condb || (condb->node.next != &falset->ins_list))
+ if (!condb || (condb != falset->code))
return FALSE;
if ((((condb->opcode == CEE_BLE || condb->opcode == CEE_BLT) && (condb->inst_false_bb == truet))
|| ((condb->opcode == CEE_BGE || condb->opcode == CEE_BGT) && (condb->inst_true_bb == truet)))
return FALSE;
condb->opcode = get_unsigned_condbranch (condb->opcode);
/* change the original condbranch to just point to the new unsigned check */
- bb_last->opcode = OP_BR;
- bb_last->inst_target_bb = falset;
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = falset;
replace_out_block (bb, truet, NULL);
replace_in_block (truet, bb, NULL);
return TRUE;
* Optimizes the branches on the Control Flow Graph
*
*/
-static void
-optimize_branches (MonoCompile *cfg)
+void
+mono_optimize_branches (MonoCompile *cfg)
{
int i, changed = FALSE;
MonoBasicBlock *bb, *bbn;
niterations = cfg->num_bblocks * 2;
else
niterations = 1000;
-
+
do {
MonoBasicBlock *previous_bb;
changed = FALSE;
/* we skip the entry block (exit is handled specially instead ) */
for (previous_bb = cfg->bb_entry, bb = cfg->bb_entry->next_bb; bb; previous_bb = bb, bb = bb->next_bb) {
- MonoInst *last_ins;
-
/* dont touch code inside exception clauses */
if (bb->region != -1)
continue;
changed = TRUE;
}
- last_ins = mono_inst_list_last (&bb->ins_list);
if (bb->out_count == 1) {
bbn = bb->out_bb [0];
/* conditional branches where true and false targets are the same can be also replaced with OP_BR */
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins)) {
- MonoInst *pop;
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_left;
- mono_add_ins_to_end (bb, pop);
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_right;
- mono_add_ins_to_end (bb, pop);
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = last_ins->inst_true_bb;
+ if (bb->last_ins && (bb->last_ins->opcode != OP_BR) && MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ if (!cfg->new_ir) {
+ MonoInst *pop;
+ MONO_INST_NEW (cfg, pop, CEE_POP);
+ pop->inst_left = bb->last_ins->inst_left->inst_left;
+ mono_add_ins_to_end (bb, pop);
+ MONO_INST_NEW (cfg, pop, CEE_POP);
+ pop->inst_left = bb->last_ins->inst_left->inst_right;
+ mono_add_ins_to_end (bb, pop);
+ }
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = bb->last_ins->inst_true_bb;
changed = TRUE;
if (cfg->verbose_level > 2)
g_print ("cond branch removal triggered in %d %d\n", bb->block_num, bb->out_count);
/* the block are in sequence anyway ... */
/* branches to the following block can be removed */
- if (last_ins && last_ins->opcode == OP_BR) {
- last_ins->opcode = OP_NOP;
+ if (bb->last_ins && bb->last_ins->opcode == OP_BR) {
+ bb->last_ins->opcode = OP_NOP;
changed = TRUE;
if (cfg->verbose_level > 2)
g_print ("br removal triggered %d -> %d\n", bb->block_num, bbn->block_num);
}
- if (bbn->in_count == 1) {
-
+ if (bbn->in_count == 1 && !bb->extended) {
if (bbn != cfg->bb_exit) {
if (cfg->verbose_level > 2)
g_print ("block merge triggered %d -> %d\n", bb->block_num, bbn->block_num);
- merge_basic_blocks (bb, bbn);
+ mono_merge_basic_blocks (cfg, bb, bbn);
changed = TRUE;
continue;
}
}
}
}
+
if ((bbn = bb->next_bb) && bbn->in_count == 0 && bb->region == bbn->region) {
if (cfg->verbose_level > 2) {
g_print ("nullify block triggered %d\n", bbn->block_num);
if (bb->out_count == 1) {
bbn = bb->out_bb [0];
- if (last_ins && last_ins->opcode == OP_BR) {
- MonoInst *bbn_code;
-
- bbn = last_ins->inst_target_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code &&
- bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ if (bb->last_ins && bb->last_ins->opcode == OP_BR) {
+ bbn = bb->last_ins->inst_target_bb;
+ if (bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
+
if (cfg->verbose_level > 2)
- g_print ("in %s branch to branch triggered %d -> %d -> %d\n", cfg->method->name,
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num);
+ g_print ("branch to branch triggered %d -> %d -> %d\n", bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num);
replace_in_block (bbn, bb, NULL);
- replace_out_block (bb, bbn, bbn_code->inst_target_bb);
- link_bblock (cfg, bb, bbn_code->inst_target_bb);
- last_ins->inst_target_bb = bbn_code->inst_target_bb;
+ replace_out_block (bb, bbn, bbn->code->inst_target_bb);
+ link_bblock (cfg, bb, bbn->code->inst_target_bb);
+ bb->last_ins->inst_target_bb = bbn->code->inst_target_bb;
changed = TRUE;
continue;
}
}
} else if (bb->out_count == 2) {
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- int branch_result = mono_eval_cond_branch (last_ins);
+ if (bb->last_ins && MONO_IS_COND_BRANCH_NOFP (bb->last_ins)) {
+ int branch_result;
MonoBasicBlock *taken_branch_target = NULL, *untaken_branch_target = NULL;
- MonoInst *bbn_code;
+
+ if (cfg->new_ir) {
+ if (bb->last_ins->flags & MONO_INST_CFOLD_TAKEN)
+ branch_result = BRANCH_TAKEN;
+ else if (bb->last_ins->flags & MONO_INST_CFOLD_NOT_TAKEN)
+ branch_result = BRANCH_NOT_TAKEN;
+ else
+ branch_result = BRANCH_UNDEF;
+ }
+ else
+ branch_result = mono_eval_cond_branch (bb->last_ins);
if (branch_result == BRANCH_TAKEN) {
- taken_branch_target = last_ins->inst_true_bb;
- untaken_branch_target = last_ins->inst_false_bb;
+ taken_branch_target = bb->last_ins->inst_true_bb;
+ untaken_branch_target = bb->last_ins->inst_false_bb;
} else if (branch_result == BRANCH_NOT_TAKEN) {
- taken_branch_target = last_ins->inst_false_bb;
- untaken_branch_target = last_ins->inst_true_bb;
+ taken_branch_target = bb->last_ins->inst_false_bb;
+ untaken_branch_target = bb->last_ins->inst_true_bb;
}
if (taken_branch_target) {
/* if mono_eval_cond_branch () is ever taken to handle
* non-constant values to compare, issue a pop here.
*/
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = taken_branch_target;
- mono_unlink_bblock (cfg, bb, untaken_branch_target);
+ bb->last_ins->opcode = OP_BR;
+ bb->last_ins->inst_target_bb = taken_branch_target;
+ if (!bb->extended)
+ mono_unlink_bblock (cfg, bb, untaken_branch_target);
changed = TRUE;
continue;
}
- bbn = last_ins->inst_true_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ bbn = bb->last_ins->inst_true_bb;
+ if (bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
if (cfg->verbose_level > 2)
g_print ("cbranch1 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
+ bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num,
+ bbn->code->opcode);
/*
* Unlink, then relink bblocks to avoid various
* tricky situations when the two targets of the branch
* are equal, or will become equal after the change.
*/
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_false_bb);
- last_ins->inst_true_bb = bbn_code->inst_target_bb;
+ bb->last_ins->inst_true_bb = bbn->code->inst_target_bb;
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_false_bb);
changed = TRUE;
continue;
}
- bbn = last_ins->inst_false_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
+ bbn = bb->last_ins->inst_false_bb;
+ if (bbn && bb->region == bbn->region && bbn->code && bbn->code->opcode == OP_BR &&
+ bbn->code->inst_target_bb->region == bb->region) {
if (cfg->verbose_level > 2)
g_print ("cbranch2 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
+ bb->block_num, bbn->block_num, bbn->code->inst_target_bb->block_num,
+ bbn->code->opcode);
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ mono_unlink_bblock (cfg, bb, bb->last_ins->inst_false_bb);
- last_ins->inst_false_bb = bbn_code->inst_target_bb;
+ bb->last_ins->inst_false_bb = bbn->code->inst_target_bb;
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_true_bb);
+ link_bblock (cfg, bb, bb->last_ins->inst_false_bb);
changed = TRUE;
continue;
}
+
+ bbn = bb->last_ins->inst_false_bb;
+ /*
+ * If bb is an extended bb, it could contain an inside branch to bbn.
+ * FIXME: Enable the optimization if that is not true.
+ * If bblocks_linked () is true, then merging bb and bbn
+ * would require addition of an extra branch at the end of bbn
+ * slowing down loops.
+ */
+ if (cfg->new_ir && bbn && bb->region == bbn->region && bbn->in_count == 1 && cfg->enable_extended_bblocks && bbn != cfg->bb_exit && !bb->extended && !bbn->out_of_line && !mono_bblocks_linked (bbn, bb)) {
+ g_assert (bbn->in_bb [0] == bb);
+ if (cfg->verbose_level > 2)
+ g_print ("merge false branch target triggered BB%d -> BB%d\n", bb->block_num, bbn->block_num);
+ mono_merge_basic_blocks (cfg, bb, bbn);
+ changed = TRUE;
+ continue;
+ }
}
/* detect and optimize to unsigned compares checks like: if (v < 0 || v > limit */
- if (last_ins && last_ins->opcode == CEE_BLT && last_ins->inst_left->inst_right->opcode == OP_ICONST) {
- if (try_unsigned_compare (cfg, bb, last_ins)) {
- /*g_print ("applied in bb %d (->%d) %s\n", bb->block_num, last_ins->inst_target_bb->block_num, mono_method_full_name (cfg->method, TRUE));*/
+ if (bb->last_ins && bb->last_ins->opcode == CEE_BLT && !cfg->new_ir && bb->last_ins->inst_left->inst_right->opcode == OP_ICONST) {
+ if (try_unsigned_compare (cfg, bb)) {
+ /*g_print ("applied in bb %d (->%d) %s\n", bb->block_num, bb->last_ins->inst_target_bb->block_num, mono_method_full_name (cfg->method, TRUE));*/
changed = TRUE;
continue;
}
}
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- if (last_ins->inst_false_bb->out_of_line && (bb->region == last_ins->inst_false_bb->region)) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_NOFP (bb->last_ins)) {
+ if (bb->last_ins->inst_false_bb && bb->last_ins->inst_false_bb->out_of_line && (bb->region == bb->last_ins->inst_false_bb->region)) {
/* Reverse the branch */
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
- bbn = last_ins->inst_false_bb;
- last_ins->inst_false_bb = last_ins->inst_true_bb;
- last_ins->inst_true_bb = bbn;
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
+ bbn = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bbn;
- move_basic_block_to_end (cfg, last_ins->inst_true_bb);
+ move_basic_block_to_end (cfg, bb->last_ins->inst_true_bb);
if (cfg->verbose_level > 2)
g_print ("cbranch to throw block triggered %d.\n",
bb->block_num);
}
}
} while (changed && (niterations > 0));
-
}
static void
sig = mono_method_signature (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
- cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- cfg->ret->opcode = OP_RETARG;
- cfg->ret->inst_vtype = sig->ret;
- cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ if (cfg->new_ir) {
+ cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
+ /* Inhibit optimizations */
+ cfg->ret->flags |= MONO_INST_VOLATILE;
+ } else {
+ cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ cfg->ret->opcode = OP_RETARG;
+ cfg->ret->inst_vtype = sig->ret;
+ cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ }
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
for (i = 0; i < sig->param_count; ++i) {
cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
if (sig->params [i]->byref) {
- cfg->disable_ssa = TRUE;
+ if (!cfg->new_ir) cfg->disable_ssa = TRUE;
+ }
+ }
+
+ if (cfg->new_ir && cfg->verbose_level > 2) {
+ if (cfg->ret) {
+ printf ("\treturn : ");
+ mono_print_ins (cfg->ret);
+ }
+
+ if (sig->hasthis) {
+ printf ("\tthis: ");
+ mono_print_ins (cfg->args [0]);
+ }
+
+ for (i = 0; i < sig->param_count; ++i) {
+ printf ("\targ [%d]: ", i);
+ mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
+ cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i)
- mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+ cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+
if (cfg->verbose_level > 2)
g_print ("locals done\n");
}
void
-mono_print_code (MonoCompile *cfg)
+mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
+ MonoInst *tree = bb->code;
- if (!MONO_INST_LIST_EMPTY (&bb->ins_list))
- g_print ("CODE BLOCK %d (nesting %d):\n",
- bb->block_num, bb->nesting);
+ if (cfg->new_ir) {
+ mono_print_bb (bb, msg);
+ } else {
+ if (!tree)
+ continue;
+
+ g_print ("%s CODE BLOCK %d (nesting %d):\n", msg, bb->block_num, bb->nesting);
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
}
cfg->rs = mono_regstate_new ();
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins = mono_inst_list_last (&bb->ins_list);
-
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins) &&
- bb->next_bb != last_ins->inst_false_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
/* we are careful when inverting, since bugs like #59580
* could show up when dealing with NaNs.
*/
- if (MONO_IS_COND_BRANCH_NOFP(last_ins) && bb->next_bb == last_ins->inst_true_bb) {
- MonoBasicBlock *tmp = last_ins->inst_true_bb;
- last_ins->inst_true_bb = last_ins->inst_false_bb;
- last_ins->inst_false_bb = tmp;
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
} else {
- MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- inst->opcode = OP_BR;
- inst->inst_target_bb = last_ins->inst_false_bb;
- mono_bblock_add_inst (bb, inst);
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ ins->inst_target_bb = bb->last_ins->inst_false_bb;
+ MONO_ADD_INS (bb, ins);
}
}
}
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 4) {
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
- g_print ("DUMP BLOCK %d:\n", bb->block_num);
-
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
- }
#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree, *n;
- MonoInstList head;
+ MonoInst *tree = bb->code, *next;
MBState *mbstate;
- MONO_INST_LIST_INIT (&head);
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
+ if (!tree)
continue;
- MONO_INST_LIST_SPLICE_INIT (&bb->ins_list, &head);
+ bb->code = NULL;
+ bb->last_ins = NULL;
cfg->cbb = bb;
mono_regstate_reset (cfg->rs);
if (cfg->verbose_level >= 3)
g_print ("LABEL BLOCK %d:\n", bb->block_num);
#endif
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (tree, n, &head, node) {
+ for (; tree; tree = next) {
+ next = tree->next;
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 3) {
mono_print_tree (tree);
}
bb->max_vreg = cfg->rs->next_vreg;
+ if (bb->last_ins)
+ bb->last_ins->next = NULL;
+
mono_mempool_empty (cfg->state_pool);
}
mono_mempool_destroy (cfg->state_pool);
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
#ifdef MONO_ARCH_ENABLE_NORMALIZE_OPCODES
- mono_normalize_opcodes (cfg, bb);
+ if (!cfg->new_ir)
+ mono_normalize_opcodes (cfg, bb);
#endif
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- mono_local_regalloc (cfg, bb);
+ if (!cfg->globalra)
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
- mono_arch_output_basic_block (cfg, bb);
+ //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
+ mono_arch_output_basic_block (cfg, bb);
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
if (cfg->method->dynamic) {
+ guint unwindlen = 0;
+#ifdef WIN64
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+#endif
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size);
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
} else {
+ guint unwindlen = 0;
+#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+#endif
mono_domain_lock (cfg->domain);
- code = mono_code_manager_reserve (cfg->domain->code_mp, cfg->code_size);
+ code = mono_code_manager_reserve (cfg->domain->code_mp, cfg->code_size + unwindlen);
mono_domain_unlock (cfg->domain);
}
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
if (info) {
//printf ("TEST %s %p\n", info->name, patch_info->data.target);
+ // FIXME: CLEAN UP THIS MESS.
if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
- strstr (cfg->method->name, info->name))
+ strstr (cfg->method->name, info->name)) {
/*
* This is an icall wrapper, and this is a call to the
* wrapped function.
*/
- ;
- else {
+ if (cfg->compile_aot) {
+ patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ADDR;
+ patch_info->data.name = info->name;
+ }
+ } else {
/* for these array methods we currently register the same function pointer
* since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
* will return the incorrect one depending on the order they are registered.
mono_domain_unlock (cfg->domain);
}
- if (!cfg->compile_aot)
+ if (!cfg->compile_aot && !cfg->new_ir)
/* In the aot case, the patch already points to the correct location */
patch_info->ip.i = patch_info->ip.label->inst_c0;
for (i = 0; i < patch_info->data.table->table_size; i++) {
- table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ /* Might be NULL if the switch is eliminated */
+ if (patch_info->data.table->table [i]) {
+ g_assert (patch_info->data.table->table [i]->native_offset);
+ table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ } else {
+ table [i] = NULL;
+ }
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
g_free (nm);
}
+ {
+ gboolean is_generic = FALSE;
+
+ if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
+ cfg->method->klass->generic_container || cfg->method->klass->generic_class) {
+ is_generic = TRUE;
+ }
+
+ if (cfg->generic_sharing_context)
+ g_assert (is_generic);
+ }
+
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
mono_debug_close_method (cfg);
+#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
+ mono_arch_unwindinfo_install_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
+#endif
}
-
-
-static void
-remove_critical_edges (MonoCompile *cfg) {
+void
+mono_remove_critical_edges (MonoCompile *cfg)
+{
MonoBasicBlock *bb;
MonoBasicBlock *previous_bb;
if (cfg->verbose_level > 3) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
int i;
- printf ("remove_critical_edges %s, BEFORE BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
+ printf ("remove_critical_edges, BEFORE BB%d (in:", bb->block_num);
for (i = 0; i < bb->in_count; i++) {
printf (" %d", bb->in_bb [i]->block_num);
}
printf (" %d", bb->out_bb [i]->block_num);
}
printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
+ if (bb->last_ins != NULL) {
printf (" ");
- mono_print_tree (last_ins);
+ mono_print_tree (bb->last_ins);
}
printf ("\n");
}
MonoBasicBlock *in_bb = bb->in_bb [in_bb_index];
if (in_bb->out_count > 1) {
MonoBasicBlock *new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb->ins_list);
new_bb->block_num = cfg->num_bblocks++;
// new_bb->real_offset = bb->real_offset;
new_bb->region = bb->region;
/* Do not alter the CFG while altering the BB list */
if (previous_bb->region == bb->region) {
if (previous_bb != cfg->bb_entry) {
- MonoInst *last_ins;
/* If previous_bb "followed through" to bb, */
/* keep it linked with a OP_BR */
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
- if ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH))) {
+ if ((previous_bb->last_ins == NULL) ||
+ ((previous_bb->last_ins->opcode != OP_BR) &&
+ (! (MONO_IS_COND_BRANCH_OP (previous_bb->last_ins))) &&
+ (previous_bb->last_ins->opcode != OP_SWITCH))) {
int i;
/* Make sure previous_bb really falls through bb */
for (i = 0; i < previous_bb->out_count; i++) {
/* put a new BB in the middle to hold the OP_BR */
MonoInst *jump;
MonoBasicBlock *new_bb_after_entry = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb_after_entry->ins_list);
new_bb_after_entry->block_num = cfg->num_bblocks++;
// new_bb_after_entry->real_offset = bb->real_offset;
new_bb_after_entry->region = bb->region;
previous_bb = new_bb_after_entry;
if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, added helper BB%d jumping to BB%d\n", mono_method_full_name (cfg->method, TRUE), new_bb_after_entry->block_num, bb->block_num);
+ printf ("remove_critical_edges, added helper BB%d jumping to BB%d\n", new_bb_after_entry->block_num, bb->block_num);
}
}
}
replace_in_block (bb, in_bb, new_bb);
if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, removed critical edge from BB%d to BB%d (added BB%d)\n", mono_method_full_name (cfg->method, TRUE), in_bb->block_num, bb->block_num, new_bb->block_num);
+ printf ("remove_critical_edges, removed critical edge from BB%d to BB%d (added BB%d)\n", in_bb->block_num, bb->block_num, new_bb->block_num);
}
}
}
if (cfg->verbose_level > 3) {
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
int i;
- printf ("remove_critical_edges %s, AFTER BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
+ printf ("remove_critical_edges, AFTER BB%d (in:", bb->block_num);
for (i = 0; i < bb->in_count; i++) {
printf (" %d", bb->in_bb [i]->block_num);
}
printf (" %d", bb->out_bb [i]->block_num);
}
printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
+ if (bb->last_ins != NULL) {
printf (" ");
- mono_print_tree (last_ins);
+ mono_print_tree (bb->last_ins);
}
printf ("\n");
}
}
}
+static MonoGenericInst*
+get_object_generic_inst (int type_argc)
+{
+ MonoType **type_argv;
+ int i;
+
+ type_argv = alloca (sizeof (MonoType*) * type_argc);
+
+ for (i = 0; i < type_argc; ++i)
+ type_argv [i] = &mono_defaults.object_class->byval_arg;
+
+ return mono_metadata_get_generic_inst (type_argc, type_argv);
+}
+
/*
* mini_method_compile:
* @method: the method to compile
guint8 *ip;
MonoCompile *cfg;
MonoJitInfo *jinfo;
- int dfn = 0, i, code_size_ratio;
+ int dfn, i, code_size_ratio;
gboolean deadce_has_run = FALSE;
gboolean try_generic_shared;
- MonoMethod *method_to_compile;
+ MonoMethod *method_to_compile, *method_to_register;
int generic_info_size;
mono_jit_stats.methods_compiled++;
(opts & MONO_OPT_GSHARED) && (method->is_generic || method->klass->generic_container);
else
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method);
+ (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method, FALSE);
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
declaring_method = method;
} else {
declaring_method = mono_method_get_declaring_generic_method (method);
- g_assert (method->klass->generic_class->container_class == declaring_method->klass);
+ if (method->klass->generic_class)
+ g_assert (method->klass->generic_class->container_class == declaring_method->klass);
+ else
+ g_assert (method->klass == declaring_method->klass);
}
if (declaring_method->is_generic)
cfg->opt &= ~MONO_OPT_INLINE;
cfg->opt &= ~MONO_OPT_COPYPROP;
cfg->opt &= ~MONO_OPT_CONSPROP;
+ cfg->opt &= ~MONO_OPT_GSHARED;
}
header = mono_method_get_header (method_to_compile);
return cfg;
}
+ if (getenv ("MONO_VERBOSE_METHOD")) {
+ if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0)
+ cfg->verbose_level = 4;
+ }
+
ip = (guint8 *)header->code;
+ cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
+
if (cfg->verbose_level > 2) {
if (cfg->generic_sharing_context)
g_print ("converting shared method %s\n", mono_method_full_name (method, TRUE));
g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
}
+ if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ cfg->opt |= MONO_OPT_SSA;
+
+ {
+ static int count = 0;
+
+ count ++;
+
+ if (getenv ("MONO_COUNT")) {
+ if (count == atoi (getenv ("MONO_COUNT"))) {
+ printf ("LAST: %s\n", mono_method_full_name (method, TRUE));
+ //cfg->verbose_level = 5;
+ }
+ if (count <= atoi (getenv ("MONO_COUNT")))
+ cfg->new_ir = TRUE;
+
+ /*
+ * Passing/returning vtypes in registers in managed methods is an ABI change
+ * from the old JIT.
+ */
+ disable_vtypes_in_regs = TRUE;
+ }
+ else
+ cfg->new_ir = TRUE;
+ }
+
+ /*
+ if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
+ cfg->globalra = TRUE;
+ */
+
+ //cfg->globalra = TRUE;
+
+ //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
+ // cfg->globalra = TRUE;
+
+ {
+ static int count = 0;
+ count ++;
+
+ if (getenv ("COUNT2")) {
+ if (count == atoi (getenv ("COUNT2")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT2")))
+ cfg->globalra = FALSE;
+ }
+ }
+
+ if (header->clauses)
+ cfg->globalra = FALSE;
+
+ if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
+ /* The code in the prolog clobbers caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME: Disable globalra in case of tracing/profiling
+
+ if (cfg->method->save_lmf)
+ /* The LMF saving code might clobber caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME:
+ if (!strcmp (cfg->method->name, "CompareInternal"))
+ cfg->globalra = FALSE;
+
+ /*
+ if (strstr (cfg->method->name, "LoadData"))
+ cfg->new_ir = FALSE;
+ */
+
+ if (cfg->new_ir) {
+ cfg->rs = mono_regstate_new ();
+ cfg->next_vreg = cfg->rs->next_vreg;
+ }
+
+ /* FIXME: Fix SSA to handle branches inside bblocks */
+ if (cfg->opt & MONO_OPT_SSA)
+ cfg->enable_extended_bblocks = FALSE;
+
+ /*
+ * FIXME: This confuses liveness analysis because variables which are assigned after
+ * a branch inside a bblock become part of the kill set, even though the assignment
+ * might not get executed. This causes the optimize_initlocals pass to delete some
+ * assignments which are needed.
+ * Also, the mono_if_conversion pass needs to be modified to recognize the code
+ * created by this.
+ */
+ //cfg->enable_extended_bblocks = TRUE;
+
/*
* create MonoInst* which represents arguments and local variables
*/
mono_compile_create_vars (cfg);
- if ((i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE)) < 0) {
+ if (cfg->new_ir) {
+ /* SSAPRE is not supported on linear IR */
+ cfg->opt &= ~MONO_OPT_SSAPRE;
+
+ i = mono_method_to_ir2 (cfg, method_to_compile, NULL, NULL, NULL, NULL, NULL, 0, FALSE);
+ }
+ else {
+ i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE);
+ }
+
+ if (i < 0) {
if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
if (compile_aot) {
if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ())
mono_jit_stats.basic_blocks += cfg->num_bblocks;
mono_jit_stats.max_basic_blocks = MAX (cfg->num_bblocks, mono_jit_stats.max_basic_blocks);
- if ((cfg->num_varinfo > 2000) && !cfg->compile_aot) {
- /*
- * we disable some optimizations if there are too many variables
- * because JIT time may become too expensive. The actual number needs
- * to be tweaked and eventually the non-linear algorithms should be fixed.
- */
- cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
- cfg->disable_ssa = TRUE;
- }
-
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
+ if (cfg->new_ir) {
+ mono_decompose_long_opts (cfg);
+
+ /* Should be done before branch opts */
+ if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
+ mono_local_cprop2 (cfg);
+ }
+
if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ mono_optimize_branches (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- remove_critical_edges (cfg);
+ if (cfg->new_ir) {
+ /* This must be done _before_ global reg alloc and _after_ decompose */
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ mono_if_conversion (cfg);
}
+ if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
+ mono_remove_critical_edges (cfg);
+
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+ dfn = 0;
df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
if (cfg->num_bblocks != dfn + 1) {
MonoBasicBlock *bb;
cfg->num_bblocks = dfn + 1;
- if (!header->clauses) {
- /* remove unreachable code, because the code in them may be
- * inconsistent (access to dead variables for example) */
- for (bb = cfg->bb_entry; bb;) {
- MonoBasicBlock *bbn = bb->next_bb;
-
- if (bbn && bbn->region == -1 && !bbn->dfn) {
- if (cfg->verbose_level > 1)
- g_print ("found unreachable code in BB%d\n", bbn->block_num);
- bb->next_bb = bbn->next_bb;
- nullify_basic_block (bbn);
- } else {
- bb = bb->next_bb;
- }
+ /* remove unreachable code, because the code in them may be
+ * inconsistent (access to dead variables for example) */
+ for (bb = cfg->bb_entry; bb;) {
+ MonoBasicBlock *bbn = bb->next_bb;
+
+ /*
+ * FIXME: Can't use the second case in methods with clauses, since the
+ * bblocks inside the clauses are not processed during dfn computation.
+ */
+ if ((header->clauses && (bbn && bbn->region == -1 && bbn->in_count == 0)) ||
+ (!header->clauses && (bbn && bbn->region == -1 && !bbn->dfn))) {
+ if (cfg->verbose_level > 1)
+ g_print ("found unreachable code in BB%d\n", bbn->block_num);
+ /* There may exist unreachable branches to this bb */
+ bb->next_bb = bbn->next_bb;
+ nullify_basic_block (bbn);
+ } else {
+ bb = bb->next_bb;
}
}
}
+ if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
+ /*
+ * we disable some optimizations if there are too many variables
+ * because JIT time may become too expensive. The actual number needs
+ * to be tweaked and eventually the non-linear algorithms should be fixed.
+ */
+ cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
+ cfg->disable_ssa = TRUE;
+ }
+
if (cfg->opt & MONO_OPT_LOOP) {
mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM);
mono_compute_natural_loops (cfg);
if (!header->num_clauses && !cfg->disable_ssa) {
mono_local_cprop (cfg);
+
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
}
#else
-
- /* fixme: add all optimizations which requires SSA */
- if (cfg->opt & (MONO_OPT_SSA | MONO_OPT_ABCREM | MONO_OPT_SSAPRE)) {
+ if (cfg->opt & MONO_OPT_SSA) {
if (!(cfg->comp_done & MONO_COMP_SSA) && !header->num_clauses && !cfg->disable_ssa) {
- mono_local_cprop (cfg);
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
if (cfg->verbose_level >= 2) {
if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
if (cfg->comp_done & MONO_COMP_SSA) {
#ifndef DISABLE_SSA
- mono_ssa_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_cprop2 (cfg);
+ else
+ mono_ssa_cprop (cfg);
#endif
} else {
- mono_local_cprop (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
}
}
#ifndef DISABLE_SSA
if (cfg->comp_done & MONO_COMP_SSA) {
- //mono_ssa_deadce (cfg);
-
//mono_ssa_strength_reduction (cfg);
if (cfg->opt & MONO_OPT_SSAPRE) {
mono_perform_ssapre (cfg);
//mono_local_cprop (cfg);
}
-
+
if (cfg->opt & MONO_OPT_DEADCE) {
- mono_ssa_deadce (cfg);
+ if (cfg->new_ir)
+ mono_ssa_deadce2 (cfg);
+ else
+ mono_ssa_deadce (cfg);
deadce_has_run = TRUE;
}
-
- if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
- mono_perform_abc_removal (cfg);
-
- mono_ssa_remove (cfg);
- if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ if (cfg->new_ir) {
+ if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal2 (cfg);
+ } else {
+ if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal (cfg);
+ }
+
+ if (cfg->new_ir) {
+ mono_ssa_remove2 (cfg);
+ mono_local_cprop2 (cfg);
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ }
+ else
+ mono_ssa_remove (cfg);
+
+ if (cfg->opt & MONO_OPT_BRANCH) {
+ MonoBasicBlock *bb;
+
+ mono_optimize_branches (cfg);
+
+ /* Have to recompute cfg->bblocks and bb->dfn */
+ if (cfg->globalra) {
+ mono_remove_critical_edges (cfg);
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ bb->dfn = 0;
+
+ /* Depth-first ordering on basic blocks */
+ cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+
+ dfn = 0;
+ df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
+ cfg->num_bblocks = dfn + 1;
+ }
+ }
}
#endif
return cfg;
}
- if (cfg->verbose_level > 4) {
- printf ("BEFORE DECOMPSE START\n");
- mono_print_code (cfg);
- printf ("BEFORE DECOMPSE END\n");
+ if (cfg->new_ir) {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ mono_handle_soft_float (cfg);
+#endif
+ mono_decompose_vtype_opts (cfg);
+ if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
+ mono_decompose_array_access_opts (cfg);
+ }
+
+ if (!cfg->new_ir) {
+ if (cfg->verbose_level > 4)
+ mono_print_code (cfg, "BEFORE DECOMPOSE");
+
+ decompose_pass (cfg);
}
-
- decompose_pass (cfg);
if (cfg->got_var) {
GList *regs;
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->opt & MONO_OPT_LINEARS) {
+ if (cfg->globalra) {
+ MonoBasicBlock *bb;
+
+ /* Have to do this before regalloc since it can create vregs */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_arch_lowering_pass (cfg, bb);
+
+ mono_global_regalloc (cfg);
+ }
+
+ if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
GList *vars, *regs;
/* For now, compute aliasing info only if needed for deadce... */
- if ((cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
+ if (!cfg->new_ir && (cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
cfg->aliasing_info = mono_build_aliasing_information (cfg);
}
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- mono_arch_allocate_vars (cfg);
+ if (!cfg->globalra)
+ mono_arch_allocate_vars (cfg);
- if (cfg->opt & MONO_OPT_CFOLD)
+ if (!cfg->new_ir && cfg->opt & MONO_OPT_CFOLD)
mono_constant_fold (cfg);
- mini_select_instructions (cfg);
+ if (cfg->new_ir) {
+ MonoBasicBlock *bb;
+ gboolean need_local_opts;
+
+ if (!cfg->globalra) {
+ mono_spill_global_vars (cfg, &need_local_opts);
+
+ if (need_local_opts || cfg->compile_aot) {
+ /* To optimize code created by spill_global_vars */
+ mono_local_cprop2 (cfg);
+ mono_local_deadce (cfg);
+ }
+ }
+
+ /* Add branches between non-consecutive bblocks */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
+ /* we are careful when inverting, since bugs like #59580
+ * could show up when dealing with NaNs.
+ */
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
+
+ bb->last_ins->opcode = reverse_branch_op (bb->last_ins->opcode);
+ } else {
+ MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ inst->opcode = OP_BR;
+ inst->inst_target_bb = bb->last_ins->inst_false_bb;
+ mono_bblock_add_inst (bb, inst);
+ }
+ }
+ }
+
+ if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree = bb->code;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ if (!tree)
+ continue;
+ for (; tree; tree = tree->next) {
+ mono_print_ins_index (-1, tree);
+ }
+ }
+ }
+
+ /* FIXME: */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ bb->max_vreg = cfg->next_vreg;
+ }
+ }
+ else
+ mini_select_instructions (cfg);
mono_codegen (cfg);
if (cfg->verbose_level >= 2) {
mono_domain_unlock (cfg->domain);
}
- jinfo->method = method;
+ if (cfg->generic_sharing_context) {
+ MonoGenericContext object_context;
+
+ g_assert (!method_to_compile->klass->generic_class);
+ if (method_to_compile->klass->generic_container) {
+ int type_argc = method_to_compile->klass->generic_container->type_argc;
+
+ object_context.class_inst = get_object_generic_inst (type_argc);
+ } else {
+ object_context.class_inst = NULL;
+ }
+
+ if (mini_method_get_context (method_to_compile)->method_inst) {
+ int type_argc = mini_method_get_context (method_to_compile)->method_inst->type_argc;
+
+ object_context.method_inst = get_object_generic_inst (type_argc);
+ } else {
+ object_context.method_inst = NULL;
+ }
+
+ g_assert (object_context.class_inst || object_context.method_inst);
+
+ method_to_register = mono_class_inflate_generic_method (method_to_compile, &object_context);
+ } else {
+ g_assert (method == method_to_compile);
+ method_to_register = method;
+ }
+
+ jinfo->method = method_to_register;
jinfo->code_start = cfg->native_code;
jinfo->code_size = cfg->code_len;
jinfo->used_regs = cfg->used_int_regs;
jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */
jinfo->num_clauses = header->num_clauses;
- /*
- * Static methods only get a generic JIT info if they use the
- * rgctx variable (which they are forced to if they have any
- * open catch clauses).
- */
- if (cfg->generic_sharing_context &&
- (cfg->rgctx_var || !(method_to_compile->flags & METHOD_ATTRIBUTE_STATIC))) {
+ if (cfg->generic_sharing_context) {
MonoInst *inst;
MonoGenericJitInfo *gi;
gi->generic_sharing_context = cfg->generic_sharing_context;
- if (method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) {
- inst = cfg->rgctx_var;
- g_assert (inst->opcode == OP_REGOFFSET);
- } else {
- inst = cfg->args [0];
- }
+ /*
+ * Non-generic static methods only get a "this" info
+ * if they use the rgctx variable (which they are
+ * forced to if they have any open catch clauses).
+ */
+ if (cfg->rgctx_var ||
+ (!(method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) &&
+ !mini_method_get_context (method_to_compile)->method_inst)) {
+ gi->has_this = 1;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst) {
+ inst = cfg->rgctx_var;
+ g_assert (inst->opcode == OP_REGOFFSET);
+ } else {
+ inst = cfg->args [0];
+ }
- if (inst->opcode == OP_REGVAR) {
- gi->this_in_reg = 1;
- gi->this_reg = inst->dreg;
+ if (inst->opcode == OP_REGVAR) {
+ gi->this_in_reg = 1;
+ gi->this_reg = inst->dreg;
- //g_print ("this in reg %d\n", inst->dreg);
- } else {
- g_assert (inst->opcode == OP_REGOFFSET);
+ //g_print ("this in reg %d\n", inst->dreg);
+ } else {
+ g_assert (inst->opcode == OP_REGOFFSET);
#ifdef __i386__
- g_assert (inst->inst_basereg == X86_EBP);
+ g_assert (inst->inst_basereg == X86_EBP);
#elif defined(__x86_64__)
- g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
+ g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
#endif
- g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
+ g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
- gi->this_in_reg = 0;
- gi->this_reg = inst->inst_basereg;
- gi->this_offset = inst->inst_offset;
+ gi->this_in_reg = 0;
+ gi->this_reg = inst->inst_basereg;
+ gi->this_offset = inst->inst_offset;
- //g_print ("this at offset %d\n", inst->inst_offset);
+ //g_print ("this at offset %d from reg %d\n", gi->this_offset, gi->this_reg);
+ }
+ } else {
+ gi->has_this = 0;
}
}
mono_arch_fixup_jinfo (cfg);
#endif
- mono_domain_lock (cfg->domain);
- mono_jit_info_table_add (cfg->domain, jinfo);
+ if (!cfg->compile_aot) {
+ mono_domain_lock (cfg->domain);
+ mono_jit_info_table_add (cfg->domain, jinfo);
- if (cfg->method->dynamic)
- mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = jinfo;
- mono_domain_unlock (cfg->domain);
+ if (cfg->method->dynamic)
+ mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = jinfo;
+ mono_domain_unlock (cfg->domain);
+ }
/* collect statistics */
mono_jit_stats.allocated_code_size += cfg->code_len;
code_size_ratio = cfg->code_len;
- if (code_size_ratio > mono_jit_stats.biggest_method_size) {
- mono_jit_stats.biggest_method_size = code_size_ratio;
- mono_jit_stats.biggest_method = method;
+ if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) {
+ mono_jit_stats.biggest_method_size = code_size_ratio;
+ g_free (mono_jit_stats.biggest_method);
+ mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
}
code_size_ratio = (code_size_ratio * 100) / mono_method_get_header (method)->code_size;
- if (code_size_ratio > mono_jit_stats.max_code_size_ratio) {
+ if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
mono_jit_stats.max_code_size_ratio = code_size_ratio;
- mono_jit_stats.max_ratio_method = method;
+ g_free (mono_jit_stats.max_ratio_method);
+ mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
}
mono_jit_stats.native_code_size += cfg->code_len;
{
MonoMethod *open_method;
- if (!mono_method_is_generic_sharable_impl (method))
+ if (!mono_method_is_generic_sharable_impl (method, FALSE))
return NULL;
open_method = mono_method_get_declaring_generic_method (method);
return mono_domain_lookup_shared_generic (domain, open_method);
}
+/*
+ * LOCKING: Assumes domain->jit_code_hash_lock is held.
+ */
static MonoJitInfo*
-lookup_method (MonoDomain *domain, MonoMethod *method)
+lookup_method_inner (MonoDomain *domain, MonoMethod *method)
{
MonoJitInfo *ji = mono_internal_hash_table_lookup (&domain->jit_code_hash, method);
- if (ji != NULL)
+ if (ji)
return ji;
return lookup_generic_method (domain, method);
}
+static MonoJitInfo*
+lookup_method (MonoDomain *domain, MonoMethod *method)
+{
+ MonoJitInfo *info;
+
+ mono_domain_jit_code_hash_lock (domain);
+ info = lookup_method_inner (domain, method);
+ mono_domain_jit_code_hash_unlock (domain);
+
+ return info;
+}
+
static gpointer
mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt)
{
return NULL;
}
+ if (mono_aot_only)
+ g_error ("Attempting to JIT compile method '%s' while running with --aot-only.\n", mono_method_full_name (method, TRUE));
+
cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0);
switch (cfg->exception_type) {
mono_raise_exception ((MonoException*)exc);
}
+ case MONO_EXCEPTION_OBJECT_SUPPLIED: {
+ MonoException *exp = cfg->exception_ptr;
+ MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
+ mono_destroy_compile (cfg);
+ mono_raise_exception (exp);
+ break;
+ }
default:
g_assert_not_reached ();
}
/* Check if some other thread already did the job. In this case, we can
discard the code this thread generated. */
- if ((info = lookup_method (target_domain, method))) {
+ mono_domain_jit_code_hash_lock (target_domain);
+
+ info = lookup_method_inner (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
code = info->code_start;
}
if (code == NULL) {
- mono_internal_hash_table_insert (&target_domain->jit_code_hash, method, cfg->jit_info);
+ mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->method, cfg->jit_info);
+ mono_domain_jit_code_hash_unlock (target_domain);
code = cfg->native_code;
- if (cfg->generic_sharing_context && mono_method_is_generic_sharable_impl (method)) {
+ if (cfg->generic_sharing_context && mono_method_is_generic_sharable_impl (method, FALSE)) {
/* g_print ("inserting method %s.%s.%s\n", method->klass->name_space, method->klass->name, method->name); */
mono_domain_register_shared_generic (target_domain,
mono_method_get_declaring_generic_method (method), cfg->jit_info);
mono_stats.generics_shared_methods++;
}
+ } else {
+ mono_domain_jit_code_hash_unlock (target_domain);
}
mono_destroy_compile (cfg);
else
target_domain = domain;
- mono_domain_lock (target_domain);
-
- if ((info = lookup_method (target_domain, method))) {
+ info = lookup_method (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
MonoVTable *vtable;
- mono_domain_unlock (target_domain);
mono_jit_stats.methods_lookups++;
vtable = mono_class_vtable (domain, method->klass);
mono_runtime_class_init (vtable);
}
}
- mono_domain_unlock (target_domain);
p = mono_create_ftnptr (target_domain, mono_jit_compile_method_inner (method, target_domain, opt));
if (callinfo) {
return mono_jit_compile_method_with_opt (method, default_opt);
}
+#ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD
static void
invalidated_delegate_trampoline (char *desc)
{
"See http://www.go-mono.com/delegate.html for an explanation and ways to fix this.",
desc);
}
+#endif
/*
* mono_jit_free_method:
else
target_domain = domain;
- mono_domain_lock (target_domain);
-
- if ((info = lookup_method (target_domain, method))) {
+ info = lookup_method (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
- mono_domain_unlock (target_domain);
mono_jit_stats.methods_lookups++;
return info->code_start;
}
}
- mono_domain_unlock (target_domain);
-
return NULL;
}
return NULL;
}
- if ((method->flags & METHOD_ATTRIBUTE_STATIC) &&
+ if (((method->flags & METHOD_ATTRIBUTE_STATIC) ||
+ (method->is_inflated && mono_method_get_context (method)->method_inst)) &&
mono_class_generic_sharing_enabled (method->klass) &&
- mono_method_is_generic_sharable_impl (method)) {
+ mono_method_is_generic_sharable_impl (method, FALSE)) {
to_compile = mono_marshal_get_static_rgctx_invoke (method);
} else {
to_compile = method;
{
static gpointer tramp = NULL;
if (!tramp)
- tramp = mono_arch_create_specific_trampoline (MONO_FAKE_IMT_METHOD, MONO_TRAMPOLINE_GENERIC, mono_get_root_domain (), NULL);
+ tramp = mono_create_specific_trampoline (MONO_FAKE_IMT_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
return tramp;
}
#endif
{
static gpointer tramp = NULL;
if (!tramp)
- tramp = mono_arch_create_specific_trampoline (MONO_FAKE_VTABLE_METHOD, MONO_TRAMPOLINE_GENERIC, mono_get_root_domain (), NULL);
+ tramp = mono_create_specific_trampoline (MONO_FAKE_VTABLE_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
return tramp;
}
#endif
debug_options.break_on_unverified = TRUE;
else if (!strcmp (arg, "no-gdb-backtrace"))
debug_options.no_gdb_backtrace = TRUE;
+ else if (!strcmp (arg, "dont-free-domains"))
+ mono_dont_free_domains = TRUE;
else {
fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg);
- fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace'\n");
+ fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains'\n");
exit (1);
}
}
+
+ g_strfreev (args);
}
MonoDebugOptions *
mono_trampolines_init ();
- mono_exceptions_init ();
-
if (!g_thread_supported ())
g_thread_init (NULL);
#define JIT_INVOKE_WORKS
#ifdef JIT_INVOKE_WORKS
mono_install_runtime_invoke (mono_jit_runtime_invoke);
- mono_install_handler (mono_arch_get_throw_exception ());
#endif
mono_install_stack_walk (mono_jit_walk_stack);
mono_install_get_cached_class_info (mono_aot_get_cached_class_info);
domain = mono_init_version (filename, runtime_version);
else
domain = mono_init_from_assembly (filename, filename);
+
+ if (mono_aot_only) {
+ /* The IMT tables are very dynamic thus they are hard to AOT */
+ mono_use_imt = FALSE;
+ /* This helps catch code allocation requests */
+ mono_code_manager_set_read_only (domain->code_mp);
+ }
+
#ifdef MONO_ARCH_HAVE_IMT
- mono_install_imt_thunk_builder (mono_arch_build_imt_thunk);
- mono_install_imt_trampoline (mini_get_imt_trampoline ());
+ if (mono_use_imt) {
+ mono_install_imt_thunk_builder (mono_arch_build_imt_thunk);
+ mono_install_imt_trampoline (mini_get_imt_trampoline ());
#if MONO_ARCH_COMMON_VTABLE_TRAMPOLINE
- mono_install_vtable_trampoline (mini_get_vtable_trampoline ());
+ mono_install_vtable_trampoline (mini_get_vtable_trampoline ());
#endif
+ }
#endif
+
+ /* This must come after mono_init () in the aot-only case */
+ mono_exceptions_init ();
+ mono_install_handler (mono_get_throw_exception ());
+
mono_icall_init ();
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
register_icall (mono_jit_thread_attach, "mono_jit_thread_attach", "void", TRUE);
register_icall (mono_domain_get, "mono_domain_get", "ptr", TRUE);
- register_icall (mono_arch_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
- register_icall (mono_arch_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
- register_icall (mono_arch_get_throw_exception_by_name (), "mono_arch_throw_exception_by_name", "void ptr", TRUE);
+ register_icall (mono_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
+ register_icall (mono_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
+ register_icall (mono_get_throw_exception_by_name (), "mono_arch_throw_exception_by_name", "void ptr", TRUE);
#if MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION
- register_icall (mono_arch_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
+ register_icall (mono_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
"void ptr", TRUE);
#endif
register_icall (mono_thread_get_undeniable_exception, "mono_thread_get_undeniable_exception", "object", FALSE);
mono_register_opcode_emulation (CEE_DIV_UN, "__emul_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
mono_register_opcode_emulation (CEE_REM, "__emul_irem", "int32 int32 int32", mono_irem, FALSE);
mono_register_opcode_emulation (CEE_REM_UN, "__emul_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
+ mono_register_opcode_emulation (OP_IDIV, "__emul_op_idiv", "int32 int32 int32", mono_idiv, FALSE);
+ mono_register_opcode_emulation (OP_IDIV_UN, "__emul_op_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
+ mono_register_opcode_emulation (OP_IREM, "__emul_op_irem", "int32 int32 int32", mono_irem, FALSE);
+ mono_register_opcode_emulation (OP_IREM_UN, "__emul_op_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_MUL_DIV
mono_register_opcode_emulation (CEE_MUL_OVF, "__emul_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
mono_register_opcode_emulation (CEE_MUL_OVF_UN, "__emul_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
mono_register_opcode_emulation (CEE_MUL, "__emul_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL, "__emul_op_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL_OVF, "__emul_op_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
+ mono_register_opcode_emulation (OP_IMUL_OVF_UN, "__emul_op_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT)
mono_register_opcode_emulation (OP_FDIV, "__emul_fdiv", "double double double", mono_fdiv, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_CONV_R8_UN
mono_register_opcode_emulation (CEE_CONV_R_UN, "__emul_conv_r_un", "double int32", mono_conv_to_r8_un, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R_UN, "__emul_iconv_to_r_un", "double int32", mono_conv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_LCONV_TO_R8
mono_register_opcode_emulation (OP_LCONV_TO_R8, "__emul_lconv_to_r8", "double long", mono_lconv_to_r8, FALSE);
mono_register_opcode_emulation (OP_FMUL, "__emul_fmul", "double double double", mono_fmul, FALSE);
mono_register_opcode_emulation (OP_FNEG, "__emul_fneg", "double double", mono_fneg, FALSE);
mono_register_opcode_emulation (CEE_CONV_R8, "__emul_conv_r8", "double int32", mono_conv_to_r8, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R8, "__emul_iconv_to_r8", "double int32", mono_conv_to_r8, FALSE);
mono_register_opcode_emulation (CEE_CONV_R4, "__emul_conv_r4", "double int32", mono_conv_to_r4, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R4, "__emul_iconv_to_r4", "double int32", mono_conv_to_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_R4, "__emul_fconv_to_r4", "double double", mono_fconv_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I1, "__emul_fconv_to_i1", "int8 double", mono_fconv_i1, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I2, "__emul_fconv_to_i2", "int16 double", mono_fconv_i2, FALSE);
register_icall (mono_ldftn, "mono_ldftn", "ptr ptr", FALSE);
register_icall (mono_ldftn_nosync, "mono_ldftn_nosync", "ptr ptr", FALSE);
register_icall (mono_ldvirtfn, "mono_ldvirtfn", "ptr object ptr", FALSE);
+ register_icall (mono_ldvirtfn_gshared, "mono_ldvirtfn_gshared", "ptr object ptr", FALSE);
register_icall (mono_helper_compile_generic_method, "compile_generic_method", "ptr object ptr ptr ptr", FALSE);
register_icall (mono_helper_compile_generic_method_wo_context, "compile_generic_method_wo_context",
"ptr object ptr ptr", FALSE);
register_icall (mono_create_corlib_exception_0, "mono_create_corlib_exception_0", "object int", TRUE);
register_icall (mono_create_corlib_exception_1, "mono_create_corlib_exception_1", "object int object", TRUE);
register_icall (mono_create_corlib_exception_2, "mono_create_corlib_exception_2", "object int object object", TRUE);
+ register_icall (mono_array_new_2, "mono_array_new_2", "object ptr int int", FALSE);
#endif
#define JIT_RUNTIME_WORKS
g_print ("Analyze stack repeat: %ld\n", mono_jit_stats.analyze_stack_repeat);
g_print ("Compiled CIL code size: %ld\n", mono_jit_stats.cil_code_size);
g_print ("Native code size: %ld\n", mono_jit_stats.native_code_size);
- g_print ("Max code size ratio: %.2f (%s::%s)\n", mono_jit_stats.max_code_size_ratio/100.0,
- mono_jit_stats.max_ratio_method->klass->name, mono_jit_stats.max_ratio_method->name);
- g_print ("Biggest method: %ld (%s::%s)\n", mono_jit_stats.biggest_method_size,
- mono_jit_stats.biggest_method->klass->name, mono_jit_stats.biggest_method->name);
+ g_print ("Max code size ratio: %.2f (%s)\n", mono_jit_stats.max_code_size_ratio/100.0,
+ mono_jit_stats.max_ratio_method);
+ g_print ("Biggest method: %ld (%s)\n", mono_jit_stats.biggest_method_size,
+ mono_jit_stats.biggest_method);
g_print ("Code reallocs: %ld\n", mono_jit_stats.code_reallocs);
g_print ("Allocated code size: %ld\n", mono_jit_stats.allocated_code_size);
g_print ("Inlineable methods: %ld\n", mono_jit_stats.inlineable_methods);
g_print ("Inlined methods: %ld\n", mono_jit_stats.inlined_methods);
+ g_print ("Regvars: %ld\n", mono_jit_stats.regvars);
g_print ("Locals stack size: %ld\n", mono_jit_stats.locals_stack_size);
g_print ("\nCreated object count: %ld\n", mono_stats.new_object_count);
g_print ("Metadata pagefaults : %d\n", mono_raw_buffer_get_n_pagefaults ());
g_print ("AOT pagefaults : %d\n", mono_aot_get_n_pagefaults ());
}
+
+ g_free (mono_jit_stats.max_ratio_method);
+ mono_jit_stats.max_ratio_method = NULL;
+ g_free (mono_jit_stats.biggest_method);
+ mono_jit_stats.biggest_method = NULL;
}
}