#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/verify.h>
#include <mono/metadata/verify-internals.h>
+#include <mono/metadata/mempool-internals.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-counters.h>
if (method->klass->valuetype) \
GENERIC_SHARING_FAILURE ((opcode)); \
} while (0)
-#define GET_RGCTX(rgctx) do { \
+#define GET_RGCTX(rgctx, context_used) do { \
MonoInst *this = NULL; \
+ g_assert (context_used); \
GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD(*ip); \
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC)) \
+ if (!(method->flags & METHOD_ATTRIBUTE_STATIC) && \
+ !((context_used) & MONO_GENERIC_CONTEXT_USED_METHOD)) \
NEW_ARGLOAD (cfg, this, 0); \
- (rgctx) = get_runtime_generic_context (cfg, method, this, ip); \
+ (rgctx) = get_runtime_generic_context (cfg, method, (context_used), this, ip); \
} while (0)
+
#define MONO_CHECK_THIS(ins) (mono_method_signature (cfg->method)->hasthis && (ins)->ssa_op == MONO_SSA_LOAD && (ins)->inst_left->inst_c0 == 0)
static void setup_stat_profiler (void);
static void dec_foreach (MonoInst *tree, MonoCompile *cfg);
+int mono_method_to_ir2 (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
+ MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
+ guint inline_offset, gboolean is_virtual_call);
+
static int mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_bblock, MonoBasicBlock *end_bblock,
int locals_offset, MonoInst *return_var, GList *dont_inline, MonoInst **inline_args,
guint inline_offset, gboolean is_virtual_call);
#endif
/* helper methods signature */
-static MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
-static MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
-static MonoMethodSignature *helper_sig_domain_get = NULL;
+/* FIXME: Make these static again */
+MonoMethodSignature *helper_sig_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_domain_get = NULL;
+MonoMethodSignature *helper_sig_generic_class_init_trampoline = NULL;
+MonoMethodSignature *helper_sig_rgctx_lazy_fetch_trampoline = NULL;
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
#ifndef DISABLE_AOT
gboolean mono_compile_aot = FALSE;
#endif
+/* If this is set, no code is generated dynamically, everything is taken from AOT files */
+gboolean mono_aot_only = FALSE;
+/* Whenever to use IMT */
+#ifdef MONO_ARCH_HAVE_IMT
+gboolean mono_use_imt = TRUE;
+#else
+gboolean mono_use_imt = FALSE;
+#endif
MonoMethodDesc *mono_inject_async_exc_method = NULL;
int mono_inject_async_exc_pos;
MonoMethodDesc *mono_break_at_bb_method = NULL;
#define mono_jit_unlock() LeaveCriticalSection (&jit_mutex)
static CRITICAL_SECTION jit_mutex;
-static GHashTable *rgctx_lazy_fetch_trampoline_hash = NULL;
-
static MonoCodeManager *global_codeman = NULL;
-static GHashTable *jit_icall_name_hash = NULL;
+/* FIXME: Make this static again */
+GHashTable *jit_icall_name_hash = NULL;
static MonoDebugOptions debug_options;
/* Whenever to check for pending exceptions in managed-to-native wrappers */
gboolean check_for_pending_exc = TRUE;
+/* Whenever to disable passing/returning small valuetypes in registers for managed methods */
+gboolean disable_vtypes_in_regs = FALSE;
+
gboolean
mono_running_on_valgrind (void)
{
{
void *ptr;
+ if (mono_aot_only)
+ g_error ("Attempting to allocate from the global code manager while running with --aot-only.\n");
+
if (!global_codeman) {
/* This can happen during startup */
global_codeman = mono_code_manager_new ();
* dfn: Depth First Number
* block_num: unique ID assigned at bblock creation
*/
-#define NEW_BBLOCK(cfg,new_bb) do { \
- new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)); \
- MONO_INST_LIST_INIT (&new_bb->ins_list); \
- } while (0)
-
+#define NEW_BBLOCK(cfg) (mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock)))
#define ADD_BBLOCK(cfg,b) do { \
cfg->cil_offset_to_bb [(b)->cil_code - cfg->cil_start] = (b); \
(b)->block_num = cfg->num_bblocks++; \
(tblock) = cfg->cil_offset_to_bb [(ip) - cfg->cil_start]; \
if (!(tblock)) { \
if ((ip) >= end || (ip) < header->code) UNVERIFIED; \
- NEW_BBLOCK (cfg, (tblock)); \
+ (tblock) = NEW_BBLOCK (cfg); \
(tblock)->cil_code = (ip); \
ADD_BBLOCK (cfg, (tblock)); \
} \
} while (0)
#define CHECK_BBLOCK(target,ip,tblock) do { \
- if ((target) < (ip) && \
- MONO_INST_LIST_EMPTY (&(tblock)->ins_list)) { \
- bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
- if (cfg->verbose_level > 2) \
- g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
- } \
+ if ((target) < (ip) && !(tblock)->code) { \
+ bb_recheck = g_list_prepend (bb_recheck, (tblock)); \
+ if (cfg->verbose_level > 2) g_print ("queued block %d for check at IL%04x from IL%04x\n", (tblock)->block_num, (int)((target) - header->code), (int)((ip) - header->code)); \
+ } \
} while (0)
#define NEW_ICONST(cfg,dest,val) do { \
(cfg)->disable_ssa = TRUE; \
} while (0)
-
#define NEW_INDLOAD(cfg,dest,addr,vtype) do { \
MONO_INST_NEW ((cfg), (dest), OP_NOP); \
(dest)->inst_left = addr; \
*sp++ = ins; \
type_from_op (ins); \
CHECK_TYPE (ins); \
+ /* Have to insert a widening op */ \
+ /* FIXME: Need to add many more cases */ \
+ if (ins->inst_i0->type == STACK_PTR && ins->inst_i1->type == STACK_I4) { \
+ MonoInst *widen; \
+ MONO_INST_NEW (cfg, widen, CEE_CONV_I); \
+ widen->inst_i0 = ins->inst_i1; \
+ ins->inst_i1 = widen; \
+ } \
} while (0)
#define ADD_UNOP(op) do { \
*
* Unlink two basic blocks.
*/
-static void
+void
mono_unlink_bblock (MonoCompile *cfg, MonoBasicBlock *from, MonoBasicBlock* to)
{
int i, pos;
}
}
+/*
+ * mono_bblocks_linked:
+ *
+ * Return whenever BB1 and BB2 are linked in the CFG.
+ */
+gboolean
+mono_bblocks_linked (MonoBasicBlock *bb1, MonoBasicBlock *bb2)
+{
+ int i;
+
+ for (i = 0; i < bb1->out_count; ++i) {
+ if (bb1->out_bb [i] == bb2)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/**
* mono_find_block_region:
*
int i;
array [*dfn] = start;
- /*g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num);*/
+ /* g_print ("visit %d at %p (BB%ld)\n", *dfn, start->cil_code, start->block_num); */
for (i = 0; i < start->out_count; ++i) {
if (start->out_bb [i]->dfn)
continue;
MonoInst *inst;
MonoBasicBlock *bb;
- if (!MONO_INST_LIST_EMPTY (&second->ins_list))
+ if (second->code)
return;
/*
first->out_bb = NULL;
link_bblock (cfg, first, second);
+ second->last_ins = first->last_ins;
+
/*g_print ("start search at %p for %p\n", first->cil_code, second->cil_code);*/
MONO_BB_FOR_EACH_INS (first, inst) {
- MonoInst *inst_next;
-
/*char *code = mono_disasm_code_one (NULL, cfg->method, inst->next->cil_code, NULL);
g_print ("found %p: %s", inst->next->cil_code, code);
g_free (code);*/
- if (inst->cil_code >= second->cil_code)
- continue;
-
- inst_next = mono_inst_list_next (&inst->node, &first->ins_list);
- if (!inst_next)
- break;
-
- if (inst_next->cil_code < second->cil_code)
- continue;
-
- second->ins_list.next = inst->node.next;
- second->ins_list.prev = first->ins_list.prev;
- inst->node.next = &first->ins_list;
- first->ins_list.prev = &inst->node;
-
- second->next_bb = first->next_bb;
- first->next_bb = second;
- return;
+ if (inst->cil_code < second->cil_code && inst->next->cil_code >= second->cil_code) {
+ second->code = inst->next;
+ inst->next = NULL;
+ first->last_ins = inst;
+ second->next_bb = first->next_bb;
+ first->next_bb = second;
+ return;
+ }
}
- if (MONO_INST_LIST_EMPTY (&second->ins_list)) {
+ if (!second->code) {
g_warning ("bblock split failed in %s::%s\n", cfg->method->klass->name, cfg->method->name);
//G_BREAKPOINT ();
}
}
-static guint32
-reverse_branch_op (guint32 opcode)
+guint32
+mono_reverse_branch_op (guint32 opcode)
{
static const int reverse_map [] = {
CEE_BNE_UN, CEE_BLT, CEE_BLE, CEE_BGT, CEE_BGE,
return opcode;
}
+guint
+mono_type_to_store_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_STORE_MEMBASE_REG;
+
+handle_enum:
+ switch (type->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_STOREI1_MEMBASE_REG;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_STOREI2_MEMBASE_REG;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ return OP_STOREI4_MEMBASE_REG;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_STORE_MEMBASE_REG;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_STOREI8_MEMBASE_REG;
+ case MONO_TYPE_R4:
+ return OP_STORER4_MEMBASE_REG;
+ case MONO_TYPE_R8:
+ return OP_STORER8_MEMBASE_REG;
+ case MONO_TYPE_VALUETYPE:
+ if (type->data.klass->enumtype) {
+ type = type->data.klass->enum_basetype;
+ goto handle_enum;
+ }
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_STOREV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ type = &type->data.generic_class->container_class->byval_arg;
+ goto handle_enum;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_STORE_MEMBASE_REG;
+ default:
+ g_error ("unknown type 0x%02x in type_to_store_membase", type->type);
+ }
+ return -1;
+}
+
+guint
+mono_type_to_load_membase (MonoCompile *cfg, MonoType *type)
+{
+ if (type->byref)
+ return OP_LOAD_MEMBASE;
+
+ switch (mono_type_get_underlying_type (type)->type) {
+ case MONO_TYPE_I1:
+ return OP_LOADI1_MEMBASE;
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ return OP_LOADU1_MEMBASE;
+ case MONO_TYPE_I2:
+ return OP_LOADI2_MEMBASE;
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ return OP_LOADU2_MEMBASE;
+ case MONO_TYPE_I4:
+ return OP_LOADI4_MEMBASE;
+ case MONO_TYPE_U4:
+ return OP_LOADU4_MEMBASE;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ return OP_LOAD_MEMBASE;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ return OP_LOADI8_MEMBASE;
+ case MONO_TYPE_R4:
+ return OP_LOADR4_MEMBASE;
+ case MONO_TYPE_R8:
+ return OP_LOADR8_MEMBASE;
+ case MONO_TYPE_VALUETYPE:
+ case MONO_TYPE_TYPEDBYREF:
+ return OP_LOADV_MEMBASE;
+ case MONO_TYPE_GENERICINST:
+ if (mono_type_generic_inst_is_valuetype (type))
+ return OP_LOADV_MEMBASE;
+ else
+ return OP_LOAD_MEMBASE;
+ break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* FIXME: all the arguments must be references for now,
+ * later look inside cfg and see if the arg num is
+ * really a reference
+ */
+ g_assert (cfg->generic_sharing_context);
+ return OP_LOAD_MEMBASE;
+ default:
+ g_error ("unknown type 0x%02x in type_to_load_membase", type->type);
+ }
+ return -1;
+}
+
#ifdef MONO_ARCH_SOFT_FLOAT
static int
condbr_to_fp_br (int opcode)
CEE_STIND_REF
};
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+static void
+handle_store_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, MonoInst *val, const unsigned char *ip)
+{
+ MonoInst *iargs [2];
+ iargs [0] = val;
+ iargs [1] = ptr;
+
+ mono_emit_jit_icall (cfg, bblock, mono_fstore_r4, iargs, ip);
+}
+
+static int
+handle_load_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, const unsigned char *ip)
+{
+ MonoInst *iargs [1];
+ iargs [0] = ptr;
+
+ return mono_emit_jit_icall (cfg, bblock, mono_fload_r4, iargs, ip);
+}
+
+#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
+ int temp; \
+ NEW_LOCLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, bblock, (ins), (ip)); \
+ NEW_TEMPLOAD (cfg, (ins), temp); \
+ } \
+ } while (0)
+#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
+ NEW_LOCLOADA (cfg, (ins), (idx)); \
+ handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
+ MONO_INST_NEW (cfg, (ins), OP_NOP); \
+ } \
+ } while (0)
+#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
+ int temp; \
+ NEW_ARGLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, bblock, (ins), (ip)); \
+ NEW_TEMPLOAD (cfg, (ins), temp); \
+ } \
+ } while (0)
+#define STARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
+ if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
+ NEW_ARGLOADA (cfg, (ins), (idx)); \
+ handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
+ MONO_INST_NEW (cfg, (ins), OP_NOP); \
+ } \
+ } while (0)
+
+#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num,ip) do { \
+ if ((ins)->opcode == CEE_LDIND_R4) { \
+ int idx = (num); \
+ int temp; \
+ NEW_TEMPLOADA (cfg, (ins), (idx)); \
+ temp = handle_load_float (cfg, (bblock), (ins), ip); \
+ NEW_TEMPLOAD (cfg, (ins), (temp)); \
+ } \
+ } while (0)
+
+#define NEW_TEMPSTORE_SOFT_FLOAT(cfg,bblock,ins,num,val,ip) do { \
+ if ((ins)->opcode == CEE_STIND_R4) { \
+ int idx = (num); \
+ NEW_TEMPLOADA (cfg, (ins), (idx)); \
+ handle_store_float ((cfg), (bblock), (ins), (val), (ip)); \
+ } \
+ } while (0)
+
+#else
+
+#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip)
+#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip)
+#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip)
+#define STARG_SOFT_FLOAT(cfg,ins,idx,ip)
+#define NEW_TEMPLOAD_SOFT_FLOAT(cfg,bblock,ins,num,ip)
+#define NEW_TEMPSTORE_SOFT_FLOAT(cfg,bblock,ins,num,val,ip)
+#endif
+
#if 0
static const char
return mono_type_to_ldind (type);
}
-static guint
+guint
mini_type_to_stind (MonoCompile* cfg, MonoType *type)
{
if (cfg->generic_sharing_context && !type->byref) {
{
switch (opcode) {
case OP_ADD_IMM:
- return OP_PADD;
+#if SIZEOF_VOID_P == 4
+ return OP_IADD;
+#else
+ return OP_LADD;
+#endif
case OP_IADD_IMM:
return OP_IADD;
case OP_LADD_IMM:
return OP_ISUB;
case OP_LSUB_IMM:
return OP_LSUB;
+ case OP_IMUL_IMM:
+ return OP_IMUL;
case OP_AND_IMM:
#if SIZEOF_VOID_P == 4
return OP_IAND;
return OP_COMPARE;
case OP_ICOMPARE_IMM:
return OP_ICOMPARE;
+ case OP_LOCALLOC_IMM:
+ return OP_LOCALLOC;
default:
printf ("%s\n", mono_inst_name (opcode));
g_assert_not_reached ();
+ return -1;
}
}
* Replace the OP_.._IMM INS with its non IMM variant.
*/
void
-mono_decompose_op_imm (MonoCompile *cfg, MonoInst *ins)
+mono_decompose_op_imm (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
MonoInst *temp;
MONO_INST_NEW (cfg, temp, OP_ICONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_regstate_next_int (cfg->rs);
- MONO_INST_LIST_ADD_TAIL (&(temp)->node, &(ins)->node);
+ mono_bblock_insert_before_ins (bb, ins, temp);
ins->opcode = mono_op_imm_to_op (ins->opcode);
- ins->sreg2 = temp->dreg;
+ if (ins->opcode == OP_LOCALLOC)
+ ins->sreg1 = temp->dreg;
+ else
+ ins->sreg2 = temp->dreg;
+
+ bb->max_vreg = MAX (bb->max_vreg, cfg->rs->next_vreg);
}
/*
return cfg->rgctx_var;
}
+static void
+set_vreg_to_inst (MonoCompile *cfg, int vreg, MonoInst *inst)
+{
+ if (vreg >= cfg->vreg_to_inst_len) {
+ MonoInst **tmp = cfg->vreg_to_inst;
+ int size = cfg->vreg_to_inst_len;
+
+ while (vreg >= cfg->vreg_to_inst_len)
+ cfg->vreg_to_inst_len = cfg->vreg_to_inst_len ? cfg->vreg_to_inst_len * 2 : 32;
+ cfg->vreg_to_inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst*) * cfg->vreg_to_inst_len);
+ if (size)
+ memcpy (cfg->vreg_to_inst, tmp, size * sizeof (MonoInst*));
+ }
+ cfg->vreg_to_inst [vreg] = inst;
+}
+
+#define mono_type_is_long(type) (!(type)->byref && ((mono_type_get_underlying_type (type)->type == MONO_TYPE_I8) || (mono_type_get_underlying_type (type)->type == MONO_TYPE_U8)))
+#define mono_type_is_float(type) (!(type)->byref && (((type)->type == MONO_TYPE_R8) || ((type)->type == MONO_TYPE_R4)))
+
MonoInst*
-mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+mono_compile_create_var_for_vreg (MonoCompile *cfg, MonoType *type, int opcode, int vreg)
{
MonoInst *inst;
int num = cfg->num_varinfo;
+ gboolean regpair;
if ((num + 1) >= cfg->varinfo_count) {
int orig_count = cfg->varinfo_count;
- cfg->varinfo_count = (cfg->varinfo_count + 2) * 2;
+ cfg->varinfo_count = cfg->varinfo_count ? (cfg->varinfo_count * 2) : 64;
cfg->varinfo = (MonoInst **)g_realloc (cfg->varinfo, sizeof (MonoInst*) * cfg->varinfo_count);
cfg->vars = (MonoMethodVar *)g_realloc (cfg->vars, sizeof (MonoMethodVar) * cfg->varinfo_count);
memset (&cfg->vars [orig_count], 0, (cfg->varinfo_count - orig_count) * sizeof (MonoMethodVar));
}
- /*g_print ("created temp %d of type 0x%x\n", num, type->type);*/
mono_jit_stats.allocate_var++;
MONO_INST_NEW (cfg, inst, opcode);
inst->inst_c0 = num;
inst->inst_vtype = type;
inst->klass = mono_class_from_mono_type (type);
+ type_to_eval_stack_type (cfg, type, inst);
/* if set to 1 the variable is native */
inst->backend.is_pinvoke = 0;
+ inst->dreg = vreg;
cfg->varinfo [num] = inst;
MONO_INIT_VARINFO (&cfg->vars [num], num);
+ if (vreg != -1)
+ set_vreg_to_inst (cfg, vreg, inst);
+
+#if SIZEOF_VOID_P == 4
+#ifdef MONO_ARCH_SOFT_FLOAT
+ regpair = mono_type_is_long (type) || mono_type_is_float (type);
+#else
+ regpair = mono_type_is_long (type);
+#endif
+#else
+ regpair = FALSE;
+#endif
+
+ if (regpair) {
+ MonoInst *tree;
+
+ /*
+ * These two cannot be allocated using create_var_for_vreg since that would
+ * put it into the cfg->varinfo array, confusing many parts of the JIT.
+ */
+
+ /*
+ * Set flags to VOLATILE so SSA skips it.
+ */
+
+ if (cfg->verbose_level >= 4) {
+ printf (" Create LVAR R%d (R%d, R%d)\n", inst->dreg, inst->dreg + 1, inst->dreg + 2);
+ }
+
+ /* Allocate a dummy MonoInst for the first vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 1;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 1, tree);
+
+ /* Allocate a dummy MonoInst for the second vreg */
+ MONO_INST_NEW (cfg, tree, OP_LOCAL);
+ tree->dreg = inst->dreg + 2;
+ if (cfg->opt & MONO_OPT_SSA)
+ tree->flags = MONO_INST_VOLATILE;
+ tree->inst_c0 = num;
+ tree->type = STACK_I4;
+ tree->inst_vtype = &mono_defaults.int32_class->byval_arg;
+ tree->klass = mono_class_from_mono_type (tree->inst_vtype);
+
+ set_vreg_to_inst (cfg, inst->dreg + 2, tree);
+ }
+
cfg->num_varinfo++;
if (cfg->verbose_level > 2)
- g_print ("created temp %d of type %s\n", num, mono_type_get_name (type));
+ g_print ("created temp %d (R%d) of type %s\n", num, vreg, mono_type_get_name (type));
return inst;
}
+MonoInst*
+mono_compile_create_var (MonoCompile *cfg, MonoType *type, int opcode)
+{
+ int dreg;
+
+ if (mono_type_is_long (type))
+ dreg = mono_alloc_dreg (cfg, STACK_I8);
+#ifdef MONO_ARCH_SOFT_FLOAT
+ else if (mono_type_is_float (type))
+ dreg = mono_alloc_dreg (cfg, STACK_R8);
+#endif
+ else
+ /* All the others are unified */
+ dreg = mono_alloc_preg (cfg);
+
+ return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
+}
+
/*
* Transform a MonoInst into a load from the variable of index var_index.
*/
return NULL;
}
+/*
+ * mono_add_ins_to_end:
+ *
+ * Same as MONO_ADD_INS, but add INST before any branches at the end of BB.
+ */
void
mono_add_ins_to_end (MonoBasicBlock *bb, MonoInst *inst)
{
- MonoInst *last = mono_inst_list_last (&bb->ins_list);
+ int opcode;
- if (last && ((last->opcode >= CEE_BEQ &&
- last->opcode <= CEE_BLT_UN) ||
- last->opcode == OP_BR ||
- last->opcode == OP_SWITCH)) {
- MONO_INST_LIST_ADD_TAIL (&inst->node, &last->node);
- } else {
+ if (!bb->code) {
MONO_ADD_INS (bb, inst);
+ return;
}
-}
-void
-mono_add_varcopy_to_end (MonoCompile *cfg, MonoBasicBlock *bb, int src, int dest)
+ switch (bb->last_ins->opcode) {
+ case OP_BR:
+ case OP_BR_REG:
+ case CEE_BEQ:
+ case CEE_BGE:
+ case CEE_BGT:
+ case CEE_BLE:
+ case CEE_BLT:
+ case CEE_BNE_UN:
+ case CEE_BGE_UN:
+ case CEE_BGT_UN:
+ case CEE_BLE_UN:
+ case CEE_BLT_UN:
+ case OP_SWITCH:
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ break;
+ default:
+ if (MONO_IS_COND_BRANCH_OP (bb->last_ins)) {
+ /* Need to insert the ins before the compare */
+ if (bb->code == bb->last_ins) {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ return;
+ }
+
+ if (bb->code->next == bb->last_ins) {
+ /* Only two instructions */
+ opcode = bb->code->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->code, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ } else {
+ opcode = bb->last_ins->prev->opcode;
+
+ if ((opcode == OP_COMPARE) || (opcode == OP_COMPARE_IMM) || (opcode == OP_ICOMPARE) || (opcode == OP_ICOMPARE_IMM) || (opcode == OP_FCOMPARE) || (opcode == OP_LCOMPARE) || (opcode == OP_LCOMPARE_IMM)) {
+ /* NEW IR */
+ mono_bblock_insert_before_ins (bb, bb->last_ins->prev, inst);
+ } else {
+ mono_bblock_insert_before_ins (bb, bb->last_ins, inst);
+ }
+ }
+ }
+ else
+ MONO_ADD_INS (bb, inst);
+ break;
+ }
+}
+
+/**
+ * mono_replace_ins:
+ *
+ * Replace INS with its decomposition which is stored in a series of bblocks starting
+ * at FIRST_BB and ending at LAST_BB. On enter, PREV points to the predecessor of INS.
+ * On return, it will be set to the last ins of the decomposition.
+ */
+void
+mono_replace_ins (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins, MonoInst **prev, MonoBasicBlock *first_bb, MonoBasicBlock *last_bb)
+{
+ MonoInst *next = ins->next;
+
+ if (next && next->opcode == OP_NOP) {
+ /* Avoid NOPs following branches */
+ ins->next = next->next;
+ next = next->next;
+ }
+
+ if (first_bb == last_bb) {
+ /*
+ * Only one replacement bb, merge the code into
+ * the current bb.
+ */
+
+ /* Delete links between the first_bb and its successors */
+ while (first_bb->out_count)
+ mono_unlink_bblock (cfg, first_bb, first_bb->out_bb [0]);
+
+ /* Head */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+
+ /* Tail */
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ else
+ bb->last_ins = last_bb->last_ins;
+ *prev = last_bb->last_ins;
+ } else {
+ int i, count;
+ MonoBasicBlock **tmp_bblocks, *tmp;
+ MonoInst *last;
+
+ /* Multiple BBs */
+
+ /* Set region */
+ for (tmp = first_bb; tmp; tmp = tmp->next_bb)
+ tmp->region = bb->region;
+
+ /* Split the original bb */
+ if (ins->next)
+ ins->next->prev = NULL;
+ ins->next = NULL;
+ bb->last_ins = ins;
+
+ /* Merge the second part of the original bb into the last bb */
+ if (last_bb->last_ins) {
+ last_bb->last_ins->next = next;
+ if (next)
+ next->prev = last_bb->last_ins;
+ } else {
+ last_bb->code = next;
+ }
+
+ if (next) {
+ for (last = next; last->next != NULL; last = last->next)
+ ;
+ last_bb->last_ins = last;
+ }
+
+ for (i = 0; i < bb->out_count; ++i)
+ link_bblock (cfg, last_bb, bb->out_bb [i]);
+
+ /* Merge the first (dummy) bb to the original bb */
+ if (*prev) {
+ (*prev)->next = first_bb->code;
+ first_bb->code->prev = (*prev);
+ } else {
+ bb->code = first_bb->code;
+ }
+ bb->last_ins = first_bb->last_ins;
+
+ /* Delete the links between the original bb and its successors */
+ tmp_bblocks = bb->out_bb;
+ count = bb->out_count;
+ for (i = 0; i < count; ++i)
+ mono_unlink_bblock (cfg, bb, tmp_bblocks [i]);
+
+ /* Add links between the original bb and the first_bb's successors */
+ for (i = 0; i < first_bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = first_bb->out_bb [i];
+
+ link_bblock (cfg, bb, out_bb);
+ }
+ /* Delete links between the first_bb and its successors */
+ for (i = 0; i < bb->out_count; ++i) {
+ MonoBasicBlock *out_bb = bb->out_bb [i];
+
+ mono_unlink_bblock (cfg, first_bb, out_bb);
+ }
+ last_bb->next_bb = bb->next_bb;
+ bb->next_bb = first_bb->next_bb;
+
+ *prev = NULL;
+ }
+}
+
+void
+mono_add_varcopy_to_end (MonoCompile *cfg, MonoBasicBlock *bb, int src, int dest)
{
MonoInst *inst, *load;
MonoInst **args, int calli, int virtual, const guint8 *ip, gboolean to_end)
{
MonoCallInst *call;
- MonoInst *arg, *n;
+ MonoInst *arg;
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (sig->ret, calli, virtual, cfg->generic_sharing_context));
call = mono_arch_call_opcode (cfg, bblock, call, virtual);
type_to_eval_stack_type (cfg, sig->ret, &call->inst);
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (arg, n, &call->out_args, node) {
+ for (arg = call->out_args; arg;) {
+ MonoInst *narg = arg->next;
+ arg->next = NULL;
if (!arg->cil_code)
arg->cil_code = ip;
if (to_end)
mono_add_ins_to_end (bblock, arg);
else
MONO_ADD_INS (bblock, arg);
+ arg = narg;
}
return call;
}
static MonoCallInst*
mono_emit_rgctx_method_call (MonoCompile *cfg, MonoBasicBlock *bblock, MonoMethod *method, MonoMethodSignature *sig,
- MonoInst **args, MonoInst *rgctx_arg, const guint8 *ip, MonoInst *this)
+ MonoInst **args, MonoInst *rgctx_arg, MonoInst *imt_arg, const guint8 *ip, MonoInst *this)
{
MonoCallInst *call = mono_emit_method_call_full (cfg, bblock, method, sig, args, ip, this, FALSE);
+ g_assert (!(rgctx_arg && imt_arg));
+
if (rgctx_arg) {
switch (call->inst.opcode) {
case OP_CALL: call->inst.opcode = OP_CALL_RGCTX; break;
g_assert (!call->inst.inst_right);
call->inst.inst_right = rgctx_arg;
}
+ } else if (imt_arg) {
+ switch (call->inst.opcode) {
+ case OP_CALLVIRT: call->inst.opcode = OP_CALLVIRT_IMT; break;
+ case OP_VOIDCALLVIRT: call->inst.opcode = OP_VOIDCALLVIRT_IMT; break;
+ case OP_FCALLVIRT: call->inst.opcode = OP_FCALLVIRT_IMT; break;
+ case OP_LCALLVIRT: call->inst.opcode = OP_LCALLVIRT_IMT; break;
+ case OP_VCALLVIRT: {
+ MonoInst *group;
+
+ NEW_GROUP (cfg, group, call->inst.inst_left, NULL);
+ call->inst.inst_left = group;
+ call->inst.opcode = OP_VCALLVIRT_IMT;
+ break;
+ }
+ default: g_assert_not_reached ();
+ }
+
+ if (call->inst.opcode != OP_VCALLVIRT_IMT) {
+ g_assert (!call->inst.inst_right);
+ call->inst.inst_right = imt_arg;
+ } else {
+ g_assert (!call->inst.inst_left->inst_right);
+ call->inst.inst_left->inst_right = imt_arg;
+ }
}
return call;
inline static int
mono_emit_rgctx_method_call_spilled (MonoCompile *cfg, MonoBasicBlock *bblock, MonoMethod *method,
- MonoMethodSignature *signature, MonoInst **args, MonoInst *rgctx_arg, const guint8 *ip,
- MonoInst *this)
+ MonoMethodSignature *signature, MonoInst **args, MonoInst *rgctx_arg, MonoInst *imt_arg,
+ const guint8 *ip, MonoInst *this)
{
- MonoCallInst *call = mono_emit_rgctx_method_call (cfg, bblock, method, signature, args, rgctx_arg, ip, this);
+ MonoCallInst *call = mono_emit_rgctx_method_call (cfg, bblock, method, signature, args, rgctx_arg, imt_arg, ip, this);
return mono_spill_call (cfg, bblock, call, signature, method->string_ctor, ip, FALSE);
}
static void
mono_emulate_opcode (MonoCompile *cfg, MonoInst *tree, MonoInst **iargs, MonoJitICallInfo *info)
{
- MonoInst *ins, *temp = NULL, *store, *load;
- MonoInstList *head, *list;
+ MonoInst *ins, *temp = NULL, *store, *load, *begin;
+ MonoInst *last_arg = NULL;
int nargs;
MonoCallInst *call;
//mono_print_tree_nl (tree);
MONO_INST_NEW_CALL (cfg, call, ret_type_to_call_opcode (info->sig->ret, FALSE, FALSE, cfg->generic_sharing_context));
ins = (MonoInst*)call;
- MONO_INST_LIST_INIT (&ins->node);
call->inst.cil_code = tree->cil_code;
call->args = iargs;
temp = mono_compile_create_var (cfg, info->sig->ret, OP_LOCAL);
temp->flags |= MONO_INST_IS_TEMP;
NEW_TEMPSTORE (cfg, store, temp->inst_c0, ins);
- MONO_INST_LIST_INIT (&store->node);
/* FIXME: handle CEE_STIND_R4 */
store->cil_code = tree->cil_code;
} else {
nargs = info->sig->param_count + info->sig->hasthis;
- if (nargs) {
- MONO_INST_LIST_ADD_TAIL (&store->node,
- &call->out_args);
- list = &call->out_args;
- } else {
- list = &store->node;
- }
+ for (last_arg = call->out_args; last_arg && last_arg->next; last_arg = last_arg->next) ;
+
+ if (nargs)
+ last_arg->next = store;
+
+ if (nargs)
+ begin = call->out_args;
+ else
+ begin = store;
if (cfg->prev_ins) {
/*
* node before it is called for its children. dec_foreach needs to
* take this into account.
*/
- head = &cfg->prev_ins->node;
+ store->next = cfg->prev_ins->next;
+ cfg->prev_ins->next = begin;
} else {
- head = &cfg->cbb->ins_list;
+ store->next = cfg->cbb->code;
+ cfg->cbb->code = begin;
}
- MONO_INST_LIST_SPLICE_INIT (list, head);
-
call->fptr = mono_icall_get_wrapper (info);
if (!MONO_TYPE_IS_VOID (info->sig->ret)) {
for (i = 0; i < arity; i++)
res->params [i + 1] = &mono_defaults.int_class->byval_arg;
- res->ret = &mono_defaults.int_class->byval_arg;
+ res->ret = &mono_defaults.object_class->byval_arg;
g_hash_table_insert (sighash, GINT_TO_POINTER (arity), res);
mono_jit_unlock ();
return res;
}
-#ifdef MONO_ARCH_SOFT_FLOAT
-static void
-handle_store_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, MonoInst *val, const unsigned char *ip)
+MonoJitICallInfo *
+mono_get_array_new_va_icall (int rank)
{
- MonoInst *iargs [2];
- iargs [0] = val;
- iargs [1] = ptr;
+ MonoMethodSignature *esig;
+ char icall_name [256];
+ char *name;
+ MonoJitICallInfo *info;
- mono_emit_jit_icall (cfg, bblock, mono_fstore_r4, iargs, ip);
-}
+ /* Need to register the icall so it gets an icall wrapper */
+ sprintf (icall_name, "ves_array_new_va_%d", rank);
-static int
-handle_load_float (MonoCompile *cfg, MonoBasicBlock *bblock, MonoInst *ptr, const unsigned char *ip)
-{
- MonoInst *iargs [1];
- iargs [0] = ptr;
+ mono_jit_lock ();
+ info = mono_find_jit_icall_by_name (icall_name);
+ if (info == NULL) {
+ esig = mono_get_array_new_va_signature (rank);
+ name = g_strdup (icall_name);
+ info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
- return mono_emit_jit_icall (cfg, bblock, mono_fload_r4, iargs, ip);
-}
+ g_hash_table_insert (jit_icall_name_hash, name, name);
+ }
+ mono_jit_unlock ();
-#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
- int temp; \
- NEW_LOCLOADA (cfg, (ins), (idx)); \
- temp = handle_load_float (cfg, bblock, (ins), (ip)); \
- NEW_TEMPLOAD (cfg, (ins), temp); \
- } \
- } while (0)
-#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (header->locals [(idx)]->type == MONO_TYPE_R4 && !header->locals [(idx)]->byref) { \
- int temp; \
- NEW_LOCLOADA (cfg, (ins), (idx)); \
- handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
- MONO_INST_NEW (cfg, (ins), OP_NOP); \
- } \
- } while (0)
-#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
- int temp; \
- NEW_ARGLOADA (cfg, (ins), (idx)); \
- temp = handle_load_float (cfg, bblock, (ins), (ip)); \
- NEW_TEMPLOAD (cfg, (ins), temp); \
- } \
- } while (0)
-#define STARG_SOFT_FLOAT(cfg,ins,idx,ip) do {\
- if (param_types [(idx)]->type == MONO_TYPE_R4 && !param_types [(idx)]->byref) { \
- int temp; \
- NEW_ARGLOADA (cfg, (ins), (idx)); \
- handle_store_float (cfg, bblock, (ins), *sp, (ip)); \
- MONO_INST_NEW (cfg, (ins), OP_NOP); \
- } \
- } while (0)
-#else
-#define LDLOC_SOFT_FLOAT(cfg,ins,idx,ip)
-#define STLOC_SOFT_FLOAT(cfg,ins,idx,ip)
-#define LDARG_SOFT_FLOAT(cfg,ins,idx,ip)
-#define STARG_SOFT_FLOAT(cfg,ins,idx,ip)
-#endif
+ return info;
+}
static MonoMethod*
get_memcpy_method (void)
static int
handle_array_new (MonoCompile *cfg, MonoBasicBlock *bblock, int rank, MonoInst **sp, unsigned char *ip)
{
- MonoMethodSignature *esig;
- char icall_name [256];
- char *name;
MonoJitICallInfo *info;
- /* Need to register the icall so it gets an icall wrapper */
- sprintf (icall_name, "ves_array_new_va_%d", rank);
-
- mono_jit_lock ();
- info = mono_find_jit_icall_by_name (icall_name);
- if (info == NULL) {
- esig = mono_get_array_new_va_signature (rank);
- name = g_strdup (icall_name);
- info = mono_register_jit_icall (mono_array_new_va, name, esig, FALSE);
-
- g_hash_table_insert (jit_icall_name_hash, name, name);
- }
- mono_jit_unlock ();
+ info = mono_get_array_new_va_icall (rank);
cfg->flags |= MONO_CFG_HAS_VARARGS;
NEW_TEMPSTORE (cfg, store, cfg->got_var->inst_c0, get_got);
/* Add it to the start of the first bblock */
- MONO_INST_LIST_ADD (&store->node, &cfg->bb_entry->ins_list);
+ if (cfg->bb_entry->code) {
+ store->next = cfg->bb_entry->code;
+ cfg->bb_entry->code = store;
+ }
+ else
+ MONO_ADD_INS (cfg->bb_entry, store);
cfg->got_var_allocated = TRUE;
#define CODE_IS_STLOC(ip) (((ip) [0] >= CEE_STLOC_0 && (ip) [0] <= CEE_STLOC_3) || ((ip) [0] == CEE_STLOC_S))
-static gboolean
+gboolean
mini_class_is_system_array (MonoClass *klass)
{
if (klass->parent == mono_defaults.array_class)
return NULL;
}
-static int
-is_unsigned_regsize_type (MonoType *type)
-{
- switch (type->type) {
- case MONO_TYPE_U1:
- case MONO_TYPE_U2:
- case MONO_TYPE_U4:
-#if SIZEOF_VOID_P == 8
- /*case MONO_TYPE_U8: this requires different opcodes in inssel.brg */
-#endif
- return TRUE;
- default:
- return FALSE;
- }
-}
-
static MonoInst*
mini_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
store->inst_right = load;
return store;
} else if (cmethod->klass == mono_defaults.math_class) {
- if (strcmp (cmethod->name, "Min") == 0) {
- if (is_unsigned_regsize_type (fsig->params [0])) {
- MONO_INST_NEW (cfg, ins, OP_MIN);
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- return ins;
- }
- } else if (strcmp (cmethod->name, "Max") == 0) {
- if (is_unsigned_regsize_type (fsig->params [0])) {
- MONO_INST_NEW (cfg, ins, OP_MAX);
- ins->inst_i0 = args [0];
- ins->inst_i1 = args [1];
- return ins;
- }
- }
+ /*
+ * There is general branches code for Min/Max, but it does not work for
+ * all inputs:
+ * http://everything2.com/?node_id=1051618
+ */
} else if (cmethod->klass->image == mono_defaults.corlib &&
(strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
(strcmp (cmethod->klass->name, "Interlocked") == 0)) {
mono_compile_create_var (cfg, cheader->locals [i], OP_LOCAL);
/* allocate starte and end blocks */
- NEW_BBLOCK (cfg, sbblock);
+ sbblock = NEW_BBLOCK (cfg);
sbblock->block_num = cfg->num_bblocks++;
sbblock->real_offset = real_offset;
- NEW_BBLOCK (cfg, ebblock);
+ ebblock = NEW_BBLOCK (cfg);
ebblock->block_num = cfg->num_bblocks++;
ebblock->real_offset = real_offset;
if (rvar) {
NEW_TEMPLOAD (cfg, ins, rvar->inst_c0);
-#ifdef MONO_ARCH_SOFT_FLOAT
- if (ins->opcode == CEE_LDIND_R4) {
- int temp;
- NEW_TEMPLOADA (cfg, ins, rvar->inst_c0);
- temp = handle_load_float (cfg, bblock, ins, ip);
- NEW_TEMPLOAD (cfg, ins, temp);
- }
-#endif
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, ebblock, ins, rvar->inst_c0, ip);
*sp++ = ins;
}
*last_b = ebblock;
g_free (method_code);
}
+static void
+set_exception_object (MonoCompile *cfg, MonoException *exception)
+{
+ cfg->exception_type = MONO_EXCEPTION_OBJECT_SUPPLIED;
+ MONO_GC_REGISTER_ROOT (cfg->exception_ptr);
+ cfg->exception_ptr = exception;
+}
+
static MonoInst*
-get_runtime_generic_context (MonoCompile *cfg, MonoMethod *method, MonoInst *this, unsigned char *ip)
+get_runtime_generic_context (MonoCompile *cfg, MonoMethod *method, int context_used, MonoInst *this, unsigned char *ip)
{
g_assert (!method->klass->valuetype);
- if (method->flags & METHOD_ATTRIBUTE_STATIC) {
+ if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD) {
+ MonoInst *mrgctx_loc, *mrgctx_var;
+
+ g_assert (!this);
+ g_assert (method->is_inflated && mono_method_get_context (method)->method_inst);
+
+ mrgctx_loc = mono_get_vtable_var (cfg);
+ NEW_TEMPLOAD (cfg, mrgctx_var, mrgctx_loc->inst_c0);
+
+ return mrgctx_var;
+ } else if (method->flags & METHOD_ATTRIBUTE_STATIC) {
MonoInst *vtable_loc, *vtable_var;
+ g_assert (!this);
+
vtable_loc = mono_get_vtable_var (cfg);
NEW_TEMPLOAD (cfg, vtable_var, vtable_loc->inst_c0);
+ if (method->is_inflated && mono_method_get_context (method)->method_inst) {
+ MonoInst *mrgctx_var = vtable_var;
+
+ g_assert (G_STRUCT_OFFSET (MonoMethodRuntimeGenericContext, class_vtable) == 0);
+
+ MONO_INST_NEW (cfg, vtable_var, CEE_LDIND_I);
+ vtable_var->cil_code = ip;
+ vtable_var->inst_left = mrgctx_var;
+ vtable_var->type = STACK_PTR;
+ }
+
return vtable_var;
} else {
MonoInst *vtable;
+ g_assert (this);
+
MONO_INST_NEW (cfg, vtable, CEE_LDIND_I);
vtable->inst_left = this;
vtable->type = STACK_PTR;
}
}
-static gpointer
-create_rgctx_lazy_fetch_trampoline (guint32 offset)
-{
- static gboolean inited = FALSE;
- static int num_trampolines = 0;
-
- gpointer tramp, ptr;
-
- mono_jit_lock ();
- if (rgctx_lazy_fetch_trampoline_hash)
- tramp = g_hash_table_lookup (rgctx_lazy_fetch_trampoline_hash, GUINT_TO_POINTER (offset));
- else
- tramp = NULL;
- mono_jit_unlock ();
- if (tramp)
- return tramp;
-
- tramp = mono_arch_create_rgctx_lazy_fetch_trampoline (offset);
- ptr = mono_create_ftnptr (mono_get_root_domain (), tramp);
-
- mono_jit_lock ();
- if (!rgctx_lazy_fetch_trampoline_hash)
- rgctx_lazy_fetch_trampoline_hash = g_hash_table_new (NULL, NULL);
- g_hash_table_insert (rgctx_lazy_fetch_trampoline_hash, GUINT_TO_POINTER (offset), ptr);
- mono_jit_unlock ();
-
- if (!inited) {
- mono_counters_register ("RGCTX num lazy fetch trampolines",
- MONO_COUNTER_GENERICS | MONO_COUNTER_INT, &num_trampolines);
- inited = TRUE;
- }
- num_trampolines++;
-
- return ptr;
-}
-
-/*
- * Generates rgc->other_infos [index].XXX if index is non-negative, or
- * rgc->extra_other_infos [-index + 1] if index is negative. XXX is
- * specified by rgctx_type;
- */
static MonoInst*
get_runtime_generic_context_other_table_ptr (MonoCompile *cfg, MonoBasicBlock *bblock,
- MonoInst *rgc_ptr, int slot, const unsigned char *ip)
+ MonoInst *rgc_ptr, guint32 slot, const unsigned char *ip)
{
MonoMethodSignature *sig = helper_sig_rgctx_lazy_fetch_trampoline;
- guint8 *tramp = create_rgctx_lazy_fetch_trampoline (slot);
+ guint8 *tramp = mono_create_rgctx_lazy_fetch_trampoline (slot);
int temp;
MonoInst *field;
}
static MonoInst*
-get_runtime_generic_context_other_ptr (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *bblock,
- MonoInst *rgc_ptr, guint32 token, int token_source, int rgctx_type, unsigned char *ip, int index)
+get_runtime_generic_context_ptr (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
+ MonoClass *klass, MonoGenericContext *generic_context, MonoInst *rgctx, int rgctx_type, unsigned char *ip)
{
- MonoInst *args [6];
- int temp;
- MonoInst *result;
-
- g_assert (method->wrapper_type == MONO_WRAPPER_NONE);
+ guint32 slot = mono_method_lookup_or_register_other_info (method,
+ context_used & MONO_GENERIC_CONTEXT_USED_METHOD, &klass->byval_arg, rgctx_type, generic_context);
- NEW_CLASSCONST (cfg, args [0], method->klass);
- args [1] = rgc_ptr;
- NEW_ICONST (cfg, args [2], token);
- NEW_ICONST (cfg, args [3], token_source);
- NEW_ICONST (cfg, args [4], rgctx_type);
- NEW_ICONST (cfg, args [5], index);
-
- temp = mono_emit_jit_icall (cfg, bblock, mono_helper_get_rgctx_other_ptr, args, ip);
- NEW_TEMPLOAD (cfg, result, temp);
-
- return result;
-}
-
-static MonoInst*
-get_runtime_generic_context_ptr (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *bblock,
- MonoClass *klass, guint32 type_token, int token_source, MonoGenericContext *generic_context, MonoInst *rgctx,
- int rgctx_type, unsigned char *ip)
-{
- int arg_num = -1;
- int relation = mono_class_generic_class_relation (klass, rgctx_type, method->klass, generic_context, &arg_num);
-
- switch (relation) {
- case MINI_GENERIC_CLASS_RELATION_OTHER_TABLE:
- return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, arg_num, ip);
- case MINI_GENERIC_CLASS_RELATION_OTHER:
- return get_runtime_generic_context_other_ptr (cfg, method, bblock, rgctx,
- type_token, token_source, rgctx_type, ip, arg_num);
- default:
- g_assert_not_reached ();
- return NULL;
- }
+ return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
}
static MonoInst*
-get_runtime_generic_context_method (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *bblock,
+get_runtime_generic_context_method (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
MonoMethod *cmethod, MonoGenericContext *generic_context, MonoInst *rgctx, int rgctx_type, const unsigned char *ip)
{
- int arg_num = mono_class_lookup_or_register_other_info (method->klass, cmethod, rgctx_type, generic_context);
+ guint32 slot = mono_method_lookup_or_register_other_info (method,
+ context_used & MONO_GENERIC_CONTEXT_USED_METHOD, cmethod, rgctx_type, generic_context);
- return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, arg_num, ip);
+ return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
}
static MonoInst*
-get_runtime_generic_context_field (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *bblock,
+get_runtime_generic_context_field (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
MonoClassField *field, MonoGenericContext *generic_context, MonoInst *rgctx, int rgctx_type,
const unsigned char *ip)
{
- int arg_num = mono_class_lookup_or_register_other_info (method->klass, field, rgctx_type, generic_context);
+ guint32 slot = mono_method_lookup_or_register_other_info (method,
+ context_used & MONO_GENERIC_CONTEXT_USED_METHOD, field, rgctx_type, generic_context);
+
+ return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
+}
+
+static MonoInst*
+get_runtime_generic_context_method_rgctx (MonoCompile *cfg, MonoMethod *method, int context_used, MonoBasicBlock *bblock,
+ MonoMethod *rgctx_method, MonoGenericContext *generic_context, MonoInst *rgctx, const unsigned char *ip)
+{
+ guint32 slot = mono_method_lookup_or_register_other_info (method,
+ context_used & MONO_GENERIC_CONTEXT_USED_METHOD, rgctx_method,
+ MONO_RGCTX_INFO_METHOD_RGCTX, generic_context);
- return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, arg_num, ip);
+ return get_runtime_generic_context_other_table_ptr (cfg, bblock, rgctx, slot, ip);
}
static gboolean
* is generated.
*/
static int
-handle_unbox_nullable (MonoCompile* cfg, MonoMethod *caller_method, MonoBasicBlock* bblock, MonoInst* val,
- guchar *ip, MonoClass* klass, MonoGenericContext *generic_context, MonoInst *rgctx)
+handle_unbox_nullable (MonoCompile* cfg, MonoMethod *caller_method, int context_used, MonoBasicBlock* bblock,
+ MonoInst* val, const guchar *ip, MonoClass* klass, MonoGenericContext *generic_context, MonoInst *rgctx)
{
MonoMethod* method = mono_class_get_method_from_name (klass, "Unbox", 1);
MonoMethodSignature *signature = mono_method_signature (method);
if (rgctx) {
- MonoInst *addr = get_runtime_generic_context_method (cfg, caller_method, bblock, method,
+ MonoInst *addr = get_runtime_generic_context_method (cfg, caller_method, context_used, bblock, method,
generic_context, rgctx, MONO_RGCTX_INFO_GENERIC_METHOD_CODE, ip);
return mono_emit_rgctx_calli_spilled (cfg, bblock, signature, &val, addr, NULL, ip);
}
}
-static MonoObject*
-mono_object_castclass (MonoObject *obj, MonoClass *klass)
+static MonoInst*
+handle_box_nullable_from_inst (MonoCompile *cfg, MonoMethod *caller_method, int context_used, MonoBasicBlock *bblock,
+ MonoInst *val, const guchar *ip, MonoClass *klass, MonoGenericContext *generic_context, MonoInst *rgctx)
{
- if (!obj)
- return NULL;
-
- if (mono_object_isinst (obj, klass))
- return obj;
+ MonoMethod* method = mono_class_get_method_from_name (klass, "Box", 1);
+ MonoInst *dest, *method_addr;
+ int temp;
- mono_raise_exception (mono_exception_from_name (mono_defaults.corlib,
- "System", "InvalidCastException"));
+ g_assert (mono_class_is_nullable (klass));
- return NULL;
+ method_addr = get_runtime_generic_context_method (cfg, caller_method, context_used, bblock, method,
+ generic_context, rgctx, MONO_RGCTX_INFO_GENERIC_METHOD_CODE, ip);
+ temp = mono_emit_rgctx_calli_spilled (cfg, bblock, mono_method_signature (method), &val,
+ method_addr, NULL, ip);
+ NEW_TEMPLOAD (cfg, dest, temp);
+ return dest;
}
static int
-emit_castclass (MonoClass *klass, guint32 token, gboolean shared_access, gboolean inst_is_castclass, MonoCompile *cfg,
+emit_castclass (MonoClass *klass, guint32 token, int context_used, gboolean inst_is_castclass, MonoCompile *cfg,
MonoMethod *method, MonoInst **arg_array, MonoType **param_types, GList *dont_inline,
unsigned char *end, MonoMethodHeader *header, MonoGenericContext *generic_context,
MonoBasicBlock **_bblock, unsigned char **_ip, MonoInst ***_sp, int *_inline_costs, guint *_real_offset)
guint real_offset = *_real_offset;
int return_value = 0;
- if (shared_access) {
- MonoInst *this = NULL, *rgctx;
- MonoInst *args [2];
+ if (context_used) {
+ MonoInst *rgctx, *args [2];
int temp;
g_assert (!method->klass->valuetype);
args [0] = *sp;
/* klass */
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- args [1] = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ GET_RGCTX (rgctx, context_used);
+ args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
temp = mono_emit_jit_icall (cfg, bblock, mono_object_castclass, args, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
*_inline_costs = inline_costs;
*_real_offset = real_offset;
return return_value;
+exception_exit:
+ return_value = -2;
+ goto do_return;
unverified:
return_value = -1;
goto do_return;
}
-static gboolean
+static int
+emit_unbox (MonoClass *klass, guint32 token, int context_used,
+ MonoCompile *cfg, MonoMethod *method, MonoInst **arg_array, MonoType **param_types, GList *dont_inline,
+ unsigned char *end, MonoMethodHeader *header, MonoGenericContext *generic_context,
+ MonoBasicBlock **_bblock, unsigned char **_ip, MonoInst ***_sp, int *_inline_costs, guint *_real_offset)
+{
+ MonoBasicBlock *bblock = *_bblock;
+ unsigned char *ip = *_ip;
+ MonoInst **sp = *_sp;
+ int inline_costs = *_inline_costs;
+ guint real_offset = *_real_offset;
+ int return_value = 0;
+
+ MonoInst *add, *vtoffset, *ins;
+
+ /* Needed by the code generated in inssel.brg */
+ mono_get_got_var (cfg);
+
+ if (context_used) {
+ MonoInst *rgctx, *element_class;
+
+ /* This assertion is from the unboxcast insn */
+ g_assert (klass->rank == 0);
+
+ GET_RGCTX (rgctx, context_used);
+ element_class = get_runtime_generic_context_ptr (cfg, method, context_used, bblock,
+ klass->element_class, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+
+ MONO_INST_NEW (cfg, ins, OP_UNBOXCAST_REG);
+ ins->type = STACK_OBJ;
+ ins->inst_left = *sp;
+ ins->inst_right = element_class;
+ ins->klass = klass;
+ ins->cil_code = ip;
+ } else {
+ MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
+ ins->type = STACK_OBJ;
+ ins->inst_left = *sp;
+ ins->klass = klass;
+ ins->inst_newa_class = klass;
+ ins->cil_code = ip;
+ }
+
+ MONO_INST_NEW (cfg, add, OP_PADD);
+ NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
+ add->inst_left = ins;
+ add->inst_right = vtoffset;
+ add->type = STACK_MP;
+ add->klass = klass;
+ *sp = add;
+
+do_return:
+ *_bblock = bblock;
+ *_ip = ip;
+ *_sp = sp;
+ *_inline_costs = inline_costs;
+ *_real_offset = real_offset;
+ return return_value;
+exception_exit:
+ return_value = -2;
+ goto do_return;
+}
+
+gboolean
mini_assembly_can_skip_verification (MonoDomain *domain, MonoMethod *method)
{
MonoAssembly *assembly = method->klass->image->assembly;
*
* Returns true if the method is invalid.
*/
-static gboolean
+gboolean
mini_method_verify (MonoCompile *cfg, MonoMethod *method)
{
GSList *tmp, *res;
MonoDeclSecurityActions actions;
GSList *class_inits = NULL;
gboolean dont_verify, dont_verify_stloc, readonly = FALSE;
+ int context_used;
/* serialization and xdomain stuff may need access to private fields and methods */
dont_verify = method->klass->image->assembly->corlib_internal? TRUE: FALSE;
if (!cfg->generic_sharing_context)
g_assert (!sig->has_type_parameters);
+ if (sig->generic_param_count && method->wrapper_type == MONO_WRAPPER_NONE) {
+ g_assert (method->is_inflated);
+ g_assert (mono_method_get_context (method)->method_inst);
+ }
+ if (method->is_inflated && mono_method_get_context (method)->method_inst)
+ g_assert (sig->generic_param_count);
+
if (cfg->method == method)
real_offset = 0;
else
cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
/* ENTRY BLOCK */
- NEW_BBLOCK (cfg, start_bblock);
- cfg->bb_entry = start_bblock;
+ cfg->bb_entry = start_bblock = NEW_BBLOCK (cfg);
start_bblock->cil_code = NULL;
start_bblock->cil_length = 0;
start_bblock->block_num = cfg->num_bblocks++;
/* EXIT BLOCK */
- NEW_BBLOCK (cfg, end_bblock);
- cfg->bb_exit = end_bblock;
+ cfg->bb_exit = end_bblock = NEW_BBLOCK (cfg);
end_bblock->cil_code = NULL;
end_bblock->cil_length = 0;
end_bblock->block_num = cfg->num_bblocks++;
clause->data.catch_class &&
cfg->generic_sharing_context &&
mono_class_check_context_used (clause->data.catch_class)) {
+ if (mono_method_get_context (method)->method_inst)
+ GENERIC_SHARING_FAILURE (CEE_NOP);
+
/*
* In shared generic code with catch
* clauses containing type variables
* the exception handling code has to
* be able to get to the rgctx.
* Therefore we have to make sure that
- * the rgctx argument (for static
- * methods) or the "this" argument
- * (for non-static methods) are live.
+ * the vtable/mrgctx argument (for
+ * static or generic methods) or the
+ * "this" argument (for non-static
+ * methods) are live.
*/
- if (method->flags & METHOD_ATTRIBUTE_STATIC) {
+ if ((method->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method)->method_inst) {
mono_get_vtable_var (cfg);
} else {
MonoInst *this, *dummy_use;
}
/* FIRST CODE BLOCK */
- NEW_BBLOCK (cfg, bblock);
+ bblock = NEW_BBLOCK (cfg);
bblock->cil_code = ip;
ADD_BBLOCK (cfg, bblock);
if ((header->init_locals || (cfg->method == method && (cfg->opt & MONO_OPT_SHARED))) || cfg->compile_aot || security || pinvoke) {
/* we use a separate basic block for the initialization code */
- NEW_BBLOCK (cfg, init_localsbb);
- cfg->bb_init = init_localsbb;
+ cfg->bb_init = init_localsbb = NEW_BBLOCK (cfg);
init_localsbb->real_offset = real_offset;
start_bblock->next_bb = init_localsbb;
init_localsbb->next_bb = bblock;
real_offset = inline_offset;
cfg->ip = ip;
+ context_used = 0;
+
if (start_new_bblock) {
bblock->cil_length = ip - bblock->cil_code;
if (start_new_bblock == 2) {
int temp, array_rank = 0;
int virtual = *ip == CEE_CALLVIRT;
gboolean no_spill;
- int context_used = 0;
+ gboolean pass_imt_from_rgctx = FALSE;
+ MonoInst *imt_arg = NULL;
gboolean pass_vtable = FALSE;
+ gboolean pass_mrgctx = FALSE;
MonoInst *vtable_arg = NULL;
+ gboolean check_this = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
}
if (mono_method_signature (cmethod)->pinvoke) {
- MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
+ MonoMethod *wrapper = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
fsig = mono_method_signature (wrapper);
} else if (constrained_call) {
fsig = mono_method_signature (cmethod);
if (!cfg->generic_sharing_context && cmethod && cmethod->klass->generic_container)
UNVERIFIED;
+ if (!cfg->generic_sharing_context && cmethod)
+ g_assert (!mono_method_check_context_used (cmethod));
+
CHECK_STACK (n);
//g_assert (!virtual || fsig->hasthis);
* generic method).
*/
if (sharing_enabled && context_sharable &&
- !mini_method_get_context (cmethod)->method_inst)
+ !(mini_method_get_context (cmethod) && mini_method_get_context (cmethod)->method_inst))
pass_vtable = TRUE;
}
+ if (cmethod && mini_method_get_context (cmethod) &&
+ mini_method_get_context (cmethod)->method_inst) {
+ gboolean sharing_enabled = mono_class_generic_sharing_enabled (cmethod->klass);
+ MonoGenericContext *context = mini_method_get_context (cmethod);
+ gboolean context_sharable = mono_generic_context_is_sharable (context, TRUE);
+
+ g_assert (!pass_vtable);
+
+ if (sharing_enabled && context_sharable)
+ pass_mrgctx = TRUE;
+ }
+
if (cfg->generic_sharing_context && cmethod) {
MonoGenericContext *cmethod_context = mono_method_get_context (cmethod);
context_used = mono_method_check_context_used (cmethod);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (*ip);
-
- if (context_used &&
- ((cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE) ||
- (cmethod_context && cmethod_context->method_inst && cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))) {
- GENERIC_SHARING_FAILURE (*ip);
+ if (context_used && (cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE)) {
+ /* Generic method interface
+ calls are resolved via a
+ helper function and don't
+ need an imt. */
+ if (!cmethod_context || !cmethod_context->method_inst)
+ pass_imt_from_rgctx = TRUE;
}
+
+ /*
+ * If a shared method calls another
+ * shared method then the caller must
+ * have a generic sharing context
+ * because the magic trampoline
+ * requires it. FIXME: We shouldn't
+ * have to force the vtable/mrgctx
+ * variable here. Instead there
+ * should be a flag in the cfg to
+ * request a generic sharing context.
+ */
+ if (context_used && method->flags & METHOD_ATTRIBUTE_STATIC)
+ mono_get_vtable_var (cfg);
}
if (pass_vtable) {
if (context_used) {
MonoInst *rgctx;
- GET_RGCTX (rgctx);
- vtable_arg = get_runtime_generic_context_ptr (cfg, method, bblock, cmethod->klass,
- token, MINI_TOKEN_SOURCE_METHOD, generic_context,
- rgctx, MONO_RGCTX_INFO_VTABLE, ip);
+ GET_RGCTX (rgctx, context_used);
+ vtable_arg = get_runtime_generic_context_ptr (cfg, method, context_used,
+ bblock, cmethod->klass, generic_context, rgctx, MONO_RGCTX_INFO_VTABLE, ip);
} else {
MonoVTable *vtable = mono_class_vtable (cfg->domain, cmethod->klass);
}
}
+ if (pass_mrgctx) {
+ g_assert (!vtable_arg);
+
+ if (context_used) {
+ MonoInst *rgctx;
+
+ GET_RGCTX (rgctx, context_used);
+ vtable_arg = get_runtime_generic_context_method_rgctx (cfg, method,
+ context_used, bblock, cmethod, generic_context, rgctx, ip);
+ } else {
+ MonoMethodRuntimeGenericContext *mrgctx;
+
+ mrgctx = mono_method_lookup_rgctx (mono_class_vtable (cfg->domain, cmethod->klass),
+ mini_method_get_context (cmethod)->method_inst);
+
+ cfg->disable_aot = TRUE;
+ NEW_PCONST (cfg, vtable_arg, mrgctx);
+ }
+
+ if (!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) ||
+ (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) {
+ if (virtual)
+ check_this = TRUE;
+ virtual = 0;
+ }
+ }
+
+ if (pass_imt_from_rgctx) {
+ MonoInst *rgctx;
+
+ g_assert (!pass_vtable);
+ g_assert (cmethod);
+
+ GET_RGCTX (rgctx, context_used);
+ imt_arg = get_runtime_generic_context_method (cfg, method, context_used, bblock, cmethod,
+ generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ }
+
+ if (check_this) {
+ MonoInst *check;
+
+ MONO_INST_NEW (cfg, check, OP_CHECK_THIS_PASSTHROUGH);
+ check->cil_code = ip;
+ check->inst_left = sp [0];
+ check->type = sp [0]->type;
+ sp [0] = check;
+ }
+
if (cmethod && virtual &&
(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) &&
!((cmethod->flags & METHOD_ATTRIBUTE_FINAL) &&
/* Because of the PCONST below */
cfg->disable_aot = TRUE;
NEW_TEMPLOAD (cfg, iargs [0], this_temp->inst_c0);
- NEW_METHODCONST (cfg, iargs [1], cmethod);
- NEW_PCONST (cfg, iargs [2], mono_method_get_context (cmethod));
- NEW_TEMPLOADA (cfg, iargs [3], this_arg_temp->inst_c0);
- temp = mono_emit_jit_icall (cfg, bblock, mono_helper_compile_generic_method, iargs, ip);
+ if (context_used) {
+ MonoInst *rgctx;
+
+ GET_RGCTX (rgctx, context_used);
+ iargs [1] = get_runtime_generic_context_method (cfg, method, context_used,
+ bblock, cmethod,
+ generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
+ temp = mono_emit_jit_icall (cfg, bblock,
+ mono_helper_compile_generic_method, iargs, ip);
+ } else {
+ NEW_METHODCONST (cfg, iargs [1], cmethod);
+ NEW_TEMPLOADA (cfg, iargs [2], this_arg_temp->inst_c0);
+ temp = mono_emit_jit_icall (cfg, bblock, mono_helper_compile_generic_method,
+ iargs, ip);
+ }
NEW_TEMPLOAD (cfg, addr, temp);
NEW_TEMPLOAD (cfg, sp [0], this_arg_temp->inst_c0);
}
/* FIXME: runtime generic context pointer for jumps? */
+ /* FIXME: handle this for generic sharing eventually */
if ((ins_flag & MONO_INST_TAILCALL) && !cfg->generic_sharing_context && !vtable_arg && cmethod && (*ip == CEE_CALL) &&
(mono_metadata_signature_equal (mono_method_signature (method), mono_method_signature (cmethod)))) {
int i;
- GENERIC_SHARING_FAILURE (*ip);
-
/* Prevent inlining of methods with tail calls (the call stack would be altered) */
INLINE_FAILURE;
/* FIXME: This assumes the two methods has the same number and type of arguments */
handle_loaded_temps (cfg, bblock, stack_start, sp);
- if ((cfg->opt & MONO_OPT_INLINE) && cmethod &&
+ if ((cfg->opt & MONO_OPT_INLINE) && cmethod && //!check_this &&
(!virtual || !(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL) || (cmethod->flags & METHOD_ATTRIBUTE_FINAL)) &&
mono_method_check_inlining (cfg, cmethod) &&
!g_list_find (dont_inline, cmethod)) {
(cmethod->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL)) {
/* Prevent inlining of methods that call wrappers */
INLINE_FAILURE;
- cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc);
+ cmethod = mono_marshal_get_native_wrapper (cmethod, check_for_pending_exc, FALSE);
allways = TRUE;
}
else
no_spill = FALSE;
+ /* FIXME: only do this for generic methods if
+ they are not shared! */
if (context_used &&
(cmethod->klass->valuetype ||
- (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst) ||
+ (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst && !pass_mrgctx) ||
((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
mono_class_generic_sharing_enabled (cmethod->klass)) ||
- (!mono_method_is_generic_sharable_impl (cmethod) &&
+ (!imt_arg && !mono_method_is_generic_sharable_impl (cmethod, TRUE) &&
(!virtual || cmethod->flags & METHOD_ATTRIBUTE_FINAL ||
!(cmethod->flags & METHOD_ATTRIBUTE_VIRTUAL))))) {
- MonoInst *this = NULL, *rgctx;
+ MonoInst *rgctx;
INLINE_FAILURE;
g_assert (cfg->generic_sharing_context && cmethod);
- g_assert (addr == NULL);
+ g_assert (!addr);
/*
- * We are compiling a call to a
- * generic method from shared code,
- * which means that we have to look up
- * the method in the rgctx and do an
- * indirect call.
+ * We are compiling a call to
+ * non-shared generic code from shared
+ * code, which means that we have to
+ * look up the method in the rgctx and
+ * do an indirect call.
*/
-
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
-
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- addr = get_runtime_generic_context_method (cfg, method, bblock, cmethod,
+ GET_RGCTX (rgctx, context_used);
+ addr = get_runtime_generic_context_method (cfg, method, context_used, bblock, cmethod,
generic_context, rgctx, MONO_RGCTX_INFO_GENERIC_METHOD_CODE, ip);
+
}
if (addr) {
+ g_assert (!imt_arg);
+
if (*ip == CEE_CALL) {
g_assert (context_used);
} else if (*ip == CEE_CALLI) {
temp = mono_emit_rgctx_calli_spilled (cfg, bblock, fsig, sp, addr, vtable_arg, ip);
if (temp != -1) {
NEW_TEMPLOAD (cfg, *sp, temp);
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, bblock, *sp, temp, ip);
sp++;
}
}
}
} else if (no_spill) {
ins = (MonoInst*)mono_emit_rgctx_method_call (cfg, bblock, cmethod, fsig, sp,
- vtable_arg, ip, virtual ? sp [0] : NULL);
+ vtable_arg, imt_arg, ip, virtual ? sp [0] : NULL);
*sp++ = ins;
} else {
if ((temp = mono_emit_rgctx_method_call_spilled (cfg, bblock, cmethod, fsig, sp,
- vtable_arg, ip, virtual ? sp [0] : NULL)) != -1) {
+ vtable_arg, imt_arg, ip, virtual ? sp [0] : NULL)) != -1) {
MonoInst *load;
NEW_TEMPLOAD (cfg, load, temp);
//g_assert (returnvar != -1);
NEW_TEMPSTORE (cfg, store, return_var->inst_c0, *sp);
store->cil_code = sp [0]->cil_code;
- /* FIXME: handle CEE_STIND_R4 */
if (store->opcode == CEE_STOBJ) {
g_assert_not_reached ();
NEW_TEMPLOADA (cfg, store, return_var->inst_c0);
*/
if ((ins->opcode == OP_LSHR_UN) && (ins->type == STACK_I8)
&& (ins->inst_right->opcode == OP_ICONST) && (ins->inst_right->inst_c0 == 32)) {
- ins->opcode = OP_LONG_SHRUN_32;
+ ins->opcode = OP_LSHR_UN_32;
/*g_print ("applied long shr speedup to %s\n", cfg->method->name);*/
ip++;
break;
case CEE_CONV_R_UN:
CHECK_STACK (1);
ADD_UNOP (*ip);
+
+#ifdef MONO_ARCH_SOFT_FLOAT
+ /*
+ * Its rather hard to emit the soft float code during the decompose
+ * pass, so avoid it in some specific cases.
+ */
+ if (ins->opcode == OP_LCONV_TO_R4) {
+ MonoInst *conv;
+
+ ins->opcode = OP_LCONV_TO_R8;
+ ins->type = STACK_R8;
+
+ --sp;
+ *sp++ = emit_tree (cfg, bblock, ins, ip + 1);
+
+ MONO_INST_NEW (cfg, conv, CEE_CONV_R4);
+ conv->inst_left = sp [-1];
+ conv->type = STACK_R8;
+ sp [-1] = ins;
+
+ ip++;
+ break;
+ }
+#endif
+
if (mono_find_jit_opcode_emulation (ins->opcode)) {
--sp;
*sp++ = emit_tree (cfg, bblock, ins, ip + 1);
case CEE_NEWOBJ: {
MonoInst *iargs [2];
MonoMethodSignature *fsig;
+ MonoInst this_ins;
int temp;
- gboolean generic_shared = FALSE;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
if (!mono_class_init (cmethod->klass))
goto load_error;
- if (cfg->generic_sharing_context) {
- int context_used = mono_method_check_context_used (cmethod);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_NEWOBJ);
-
- if (context_used)
- generic_shared = TRUE;
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_method_check_context_used (cmethod);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod, bblock, ip))
sp [1] = sp [0];
}
+ /* check_call_signature () requires sp[0] to be set */
+ this_ins.type = STACK_OBJ;
+ sp [0] = &this_ins;
+ if (check_call_signature (cfg, fsig, sp))
+ UNVERIFIED;
+
handle_loaded_temps (cfg, bblock, stack_start, sp);
if (mini_class_is_system_array (cmethod->klass)) {
- g_assert (!generic_shared);
+ g_assert (!context_used);
NEW_METHODCONST (cfg, *sp, cmethod);
- temp = handle_array_new (cfg, bblock, fsig->param_count, sp, ip);
+
+ if (fsig->param_count == 2)
+ /* Avoid varargs in the common case */
+ temp = mono_emit_jit_icall (cfg, bblock, mono_array_new_2, sp, ip);
+ else
+ temp = handle_array_new (cfg, bblock, fsig->param_count, sp, ip);
} else if (cmethod->string_ctor) {
- g_assert (!generic_shared);
+ g_assert (!context_used);
/* we simply pass a null pointer */
NEW_PCONST (cfg, *sp, NULL);
* iargs [0] to be a boxed instance, but luckily the vcall
* will be transformed into a normal call there.
*/
- } else if (generic_shared) {
+ } else if (context_used) {
MonoInst *rgctx, *data;
int rgctx_info;
- GET_RGCTX (rgctx);
+ GET_RGCTX (rgctx, context_used);
if (cfg->opt & MONO_OPT_SHARED)
rgctx_info = MONO_RGCTX_INFO_KLASS;
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
- data = get_runtime_generic_context_ptr (cfg, method, bblock, cmethod->klass,
- token, MINI_TOKEN_SOURCE_METHOD, generic_context,
- rgctx, rgctx_info, ip);
+ data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock,
+ cmethod->klass, generic_context, rgctx, rgctx_info, ip);
temp = handle_alloc_from_inst (cfg, bblock, cmethod->klass, data, FALSE, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
if (cmethod->klass->marshalbyref)
callvirt_this_arg = sp [0];
- if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !generic_shared &&
+ if ((cfg->opt & MONO_OPT_INLINE) && cmethod && !context_used &&
mono_method_check_inlining (cfg, cmethod) &&
!mono_class_is_subclass_of (cmethod->klass, mono_defaults.exception_class, FALSE) &&
!g_list_find (dont_inline, cmethod)) {
INLINE_FAILURE;
mono_emit_method_call_spilled (cfg, bblock, cmethod, fsig, sp, ip, callvirt_this_arg);
}
- } else if (generic_shared &&
+ } else if (context_used &&
(cmethod->klass->valuetype ||
- !mono_method_is_generic_sharable_impl (cmethod))) {
- MonoInst *this = NULL, *rgctx, *cmethod_addr;
+ !mono_method_is_generic_sharable_impl (cmethod, TRUE))) {
+ MonoInst *rgctx, *cmethod_addr;
g_assert (!callvirt_this_arg);
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
-
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- cmethod_addr = get_runtime_generic_context_method (cfg, method, bblock, cmethod,
+ GET_RGCTX (rgctx, context_used);
+ cmethod_addr = get_runtime_generic_context_method (cfg, method, context_used,
+ bblock, cmethod,
generic_context, rgctx, MONO_RGCTX_INFO_GENERIC_METHOD_CODE, ip);
mono_emit_calli_spilled (cfg, bblock, fsig, sp, cmethod_addr, ip);
inline_costs += 5;
break;
}
- case CEE_ISINST: {
- gboolean shared_access = FALSE;
-
+ case CEE_ISINST:
CHECK_STACK (1);
--sp;
CHECK_OPSIZE (5);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- if (cfg->generic_sharing_context) {
- int context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_ISINST);
-
- if (context_used)
- shared_access = TRUE;
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
/* Needed by the code generated in inssel.brg */
- if (!shared_access)
+ if (!context_used)
mono_get_got_var (cfg);
- if (shared_access) {
- MonoInst *this = NULL, *rgctx;
- MonoInst *args [2];
+ if (context_used) {
+ MonoInst *rgctx, *args [2];
int temp;
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
-
/* obj */
args [0] = *sp;
/* klass */
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- args [1] = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ GET_RGCTX (rgctx, context_used);
+ args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
temp = mono_emit_jit_icall (cfg, bblock, mono_object_isinst, args, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
ip += 5;
}
break;
- }
case CEE_UNBOX_ANY: {
- MonoInst *add, *vtoffset;
MonoInst *iargs [3];
guint32 align;
- int context_used = 0;
CHECK_STACK (1);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
+ if (cfg->generic_sharing_context)
context_used = mono_class_check_context_used (klass);
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_UNBOX_ANY);
- }
-
if (generic_class_is_reference_type (cfg, klass)) {
- if (context_used)
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (CEE_UNBOX_ANY);
switch (emit_castclass (klass, token, context_used, FALSE,
cfg, method, arg_array, param_types, dont_inline, end, header,
generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
case 0: break;
case -1: goto unverified;
+ case -2: goto exception_exit;
default: g_assert_not_reached ();
}
break;
int v;
MonoInst *rgctx = NULL;
- if (context_used) {
- MonoInst *this = NULL;
-
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- }
+ if (context_used)
+ GET_RGCTX (rgctx, context_used);
- v = handle_unbox_nullable (cfg, method, bblock, *sp, ip, klass, generic_context, rgctx);
+ v = handle_unbox_nullable (cfg, method, context_used, bblock, *sp, ip, klass,
+ generic_context, rgctx);
NEW_TEMPLOAD (cfg, *sp, v);
sp ++;
ip += 5;
break;
}
- /* Needed by the code generated in inssel.brg */
- mono_get_got_var (cfg);
-
- if (context_used) {
- MonoInst *this = NULL, *rgctx;
- MonoInst *element_class;
-
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (CEE_UNBOX_ANY);
-
- /* This assertion is from the
- unboxcast insn */
- g_assert (klass->rank == 0);
-
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- /* FIXME: Passing token here is
- technically not correct, because we
- don't use klass but
- klass->element_class. Since it's
- only used by code for debugging the
- extensible runtime generic context
- it's not a big deal. To be correct
- we'd have to invent a new token
- source. */
- element_class = get_runtime_generic_context_ptr (cfg, method, bblock,
- klass->element_class, token, MINI_TOKEN_SOURCE_CLASS,
- generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
-
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST_REG);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->inst_right = element_class;
- ins->klass = klass;
- } else {
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->klass = klass;
- ins->inst_newa_class = klass;
+ switch (emit_unbox (klass, token, context_used,
+ cfg, method, arg_array, param_types, dont_inline, end, header,
+ generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
+ case 0: break;
+ case -1: goto unverified;
+ case -2: goto exception_exit;
+ default: g_assert_not_reached ();
}
-
- MONO_INST_NEW (cfg, add, OP_PADD);
- NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
- add->inst_left = ins;
- add->inst_right = vtoffset;
- add->type = STACK_MP;
- add->klass = mono_defaults.object_class;
- *sp = add;
ip += 5;
/* LDOBJ impl */
n = mono_class_value_size (klass, &align);
inline_costs += 2;
break;
}
- case CEE_UNBOX: {
- MonoInst *add, *vtoffset;
-
+ case CEE_UNBOX:
CHECK_STACK (1);
--sp;
CHECK_OPSIZE (5);
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context && mono_class_check_context_used (klass))
- GENERIC_SHARING_FAILURE (CEE_UNBOX);
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
if (mono_class_is_nullable (klass)) {
- int v = handle_unbox_nullable (cfg, method, bblock, *sp, ip, klass, generic_context, NULL);
+ int v;
+ MonoInst *rgctx = NULL;
+
+ if (context_used)
+ GET_RGCTX (rgctx, context_used);
+ v = handle_unbox_nullable (cfg, method, context_used, bblock, *sp, ip, klass,
+ generic_context, rgctx);
NEW_TEMPLOAD (cfg, *sp, v);
sp ++;
ip += 5;
break;
}
- /* Needed by the code generated in inssel.brg */
- mono_get_got_var (cfg);
+ switch (emit_unbox (klass, token, context_used,
+ cfg, method, arg_array, param_types, dont_inline, end, header,
+ generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
+ case 0: break;
+ case -1: goto unverified;
+ case -2: goto exception_exit;
+ default: g_assert_not_reached ();
+ }
- MONO_INST_NEW (cfg, ins, OP_UNBOXCAST);
- ins->type = STACK_OBJ;
- ins->inst_left = *sp;
- ins->klass = klass;
- ins->inst_newa_class = klass;
-
- MONO_INST_NEW (cfg, add, OP_PADD);
- NEW_ICONST (cfg, vtoffset, sizeof (MonoObject));
- add->inst_left = ins;
- add->inst_right = vtoffset;
- add->type = STACK_MP;
- add->klass = klass;
- *sp++ = add;
+ sp++;
ip += 5;
inline_costs += 2;
break;
- }
- case CEE_CASTCLASS: {
- gboolean shared_access = FALSE;
-
+ case CEE_CASTCLASS:
CHECK_STACK (1);
--sp;
CHECK_OPSIZE (5);
if (sp [0]->type != STACK_OBJ)
UNVERIFIED;
- if (cfg->generic_sharing_context) {
- int context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_CASTCLASS);
-
- if (context_used) {
- shared_access = TRUE;
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
- }
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
- switch (emit_castclass (klass, token, shared_access, TRUE,
+ switch (emit_castclass (klass, token, context_used, TRUE,
cfg, method, arg_array, param_types, dont_inline, end, header,
generic_context, &bblock, &ip, &sp, &inline_costs, &real_offset)) {
case 0: break;
case -1: goto unverified;
+ case -2: goto exception_exit;
default: g_assert_not_reached ();
}
break;
- }
case CEE_THROW:
CHECK_STACK (1);
MONO_INST_NEW (cfg, ins, OP_THROW);
} else {
temp = mono_emit_method_call_spilled (cfg, bblock, wrapper, mono_method_signature (wrapper), iargs, ip, NULL);
NEW_TEMPLOAD (cfg, *sp, temp);
+ NEW_TEMPLOAD_SOFT_FLOAT (cfg, bblock, *sp, temp, ip);
sp++;
}
} else {
case CEE_LDSFLDA:
case CEE_STSFLD: {
MonoClassField *field;
+ gboolean is_special_static;
gpointer addr = NULL;
- gboolean shared_access = FALSE;
- int relation = 0;
CHECK_OPSIZE (5);
token = read32 (ip + 1);
GENERIC_SHARING_FAILURE (*ip);
#endif
- if (cfg->generic_sharing_context) {
- int context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD ||
- klass->valuetype)
- GENERIC_SHARING_FAILURE (*ip);
-
- if (context_used) {
- relation = mono_class_generic_class_relation (klass, MONO_RGCTX_INFO_VTABLE,
- method->klass, generic_context, NULL);
- shared_access = TRUE;
- }
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
g_assert (!(field->type->attrs & FIELD_ATTRIBUTE_LITERAL));
if ((*ip) == CEE_STSFLD)
handle_loaded_temps (cfg, bblock, stack_start, sp);
- /* The special_static_fields field is init'd in mono_class_vtable, so it needs
- * to be called here.
- */
- if (!(cfg->opt & MONO_OPT_SHARED)) {
- mono_class_vtable (cfg->domain, klass);
- CHECK_TYPELOAD (klass);
- }
- mono_domain_lock (cfg->domain);
- if (cfg->domain->special_static_fields)
- addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
- mono_domain_unlock (cfg->domain);
+ is_special_static = mono_class_field_is_special_static (field);
- if ((cfg->opt & MONO_OPT_SHARED) || (cfg->compile_aot && addr)) {
+ if ((cfg->opt & MONO_OPT_SHARED) ||
+ (cfg->compile_aot && is_special_static) ||
+ (context_used && is_special_static)) {
int temp;
MonoInst *iargs [2];
- MonoInst *domain_var;
g_assert (field->parent);
- /* avoid depending on undefined C behavior in sequence points */
- domain_var = mono_get_domainvar (cfg);
- NEW_TEMPLOAD (cfg, iargs [0], domain_var->inst_c0);
- if (shared_access) {
+ if ((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) {
+ MonoInst *domain_var;
+ /* avoid depending on undefined C behavior in sequence points */
+ domain_var = mono_get_domainvar (cfg);
+ NEW_TEMPLOAD (cfg, iargs [0], domain_var->inst_c0);
+ } else {
+ NEW_DOMAINCONST (cfg, iargs [0]);
+ }
+ if (context_used) {
MonoInst *rgctx;
- GET_RGCTX (rgctx);
- iargs [1] = get_runtime_generic_context_field (cfg, method, bblock, field,
+ GET_RGCTX (rgctx, context_used);
+ iargs [1] = get_runtime_generic_context_field (cfg, method, context_used,
+ bblock, field,
generic_context, rgctx, MONO_RGCTX_INFO_CLASS_FIELD, ip);
} else {
NEW_FIELDCONST (cfg, iargs [1], field);
}
temp = mono_emit_jit_icall (cfg, bblock, mono_class_static_field_address, iargs, ip);
NEW_TEMPLOAD (cfg, ins, temp);
- } else if (shared_access) {
- MonoInst *this, *rgctx, *static_data;
-
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
+ } else if (context_used) {
+ MonoInst *rgctx, *static_data;
/*
g_print ("sharing static field access in %s.%s.%s - depth %d offset %d\n",
MonoCallInst *call;
MonoInst *vtable, *rgctx;
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- else
- this = NULL;
-
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- vtable = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_FIELD, generic_context,
- rgctx, MONO_RGCTX_INFO_VTABLE, ip);
+ GET_RGCTX (rgctx, context_used);
+ vtable = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_VTABLE, ip);
call = mono_emit_call_args (cfg, bblock, sig, NULL, FALSE, FALSE, ip, FALSE);
call->inst.opcode = OP_TRAMPCALL_VTABLE;
- call->fptr = mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
+ call->fptr = mono_create_generic_class_init_trampoline ();
call->inst.inst_left = vtable;
*
* super_info.static_data + field->offset
*/
-
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- else
- this = NULL;
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- static_data = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_FIELD, generic_context,
- rgctx, MONO_RGCTX_INFO_STATIC_DATA, ip);
+ GET_RGCTX (rgctx, context_used);
+ static_data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_STATIC_DATA, ip);
if (field->offset == 0) {
ins = static_data;
vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
- if (!addr) {
+ if (!is_special_static) {
if (mini_field_access_needs_cctor_run (cfg, method, vtable) && !(g_slist_find (class_inits, vtable))) {
guint8 *tramp = mono_create_class_init_trampoline (vtable);
mono_emit_native_call (cfg, bblock, tramp,
class_inits = g_slist_prepend (class_inits, vtable);
} else {
if (cfg->run_cctors) {
+ MonoException *ex;
/* This makes so that inline cannot trigger */
/* .cctors: too many apps depend on them */
/* running with a specific order... */
if (! vtable->initialized)
INLINE_FAILURE;
- mono_runtime_class_init (vtable);
+ ex = mono_runtime_class_init_full (vtable, FALSE);
+ if (ex) {
+ set_exception_object (cfg, ex);
+ goto exception_exit;
+ }
}
}
addr = (char*)vtable->data + field->offset;
else
NEW_PCONST (cfg, ins, addr);
} else {
+ int temp;
+ MonoInst *iargs [1];
+
+ /* The special_static_fields
+ * field is init'd in
+ * mono_class_vtable, so it
+ * needs to be called here.
+ */
+ if (!(cfg->opt & MONO_OPT_SHARED)) {
+ mono_class_vtable (cfg->domain, klass);
+ CHECK_TYPELOAD (klass);
+ }
+ mono_domain_lock (cfg->domain);
+ if (cfg->domain->special_static_fields)
+ addr = g_hash_table_lookup (cfg->domain->special_static_fields, field);
+ mono_domain_unlock (cfg->domain);
+
/*
* insert call to mono_threads_get_static_data (GPOINTER_TO_UINT (addr))
* This could be later optimized to do just a couple of
* memory dereferences with constant offsets.
*/
- int temp;
- MonoInst *iargs [1];
NEW_ICONST (cfg, iargs [0], GPOINTER_TO_UINT (addr));
temp = mono_emit_jit_icall (cfg, bblock, mono_get_special_static_data, iargs, ip);
NEW_TEMPLOAD (cfg, ins, temp);
MONO_ADD_INS (bblock, store);
} else {
gboolean is_const = FALSE;
- MonoVTable *vtable = mono_class_vtable (cfg->domain, klass);
+ MonoVTable *vtable = NULL;
+
+ if (!context_used)
+ vtable = mono_class_vtable (cfg->domain, klass);
CHECK_TYPELOAD (klass);
- if (!shared_access && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
+ if (!context_used && !((cfg->opt & MONO_OPT_SHARED) || cfg->compile_aot) &&
vtable->initialized && (field->type->attrs & FIELD_ATTRIBUTE_INIT_ONLY)) {
gpointer addr = (char*)vtable->data + field->offset;
int ro_type = field->type->type;
break;
case CEE_BOX: {
MonoInst *val;
- int context_used = 0;
CHECK_STACK (1);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
- context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (*ip);
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
if (generic_class_is_reference_type (cfg, klass)) {
*sp++ = val;
if (context_used) {
MonoInst *rgctx;
- if (mono_class_is_nullable (klass)) {
- GENERIC_SHARING_FAILURE (CEE_BOX);
- } else {
+ if (mono_class_is_nullable (klass)) {
+ GET_RGCTX (rgctx, context_used);
+ *sp++ = handle_box_nullable_from_inst (cfg, method, context_used, bblock, val,
+ ip, klass, generic_context, rgctx);
+ } else {
MonoInst *data;
int rgctx_info;
- GET_RGCTX (rgctx);
+ GET_RGCTX (rgctx, context_used);
if (cfg->opt & MONO_OPT_SHARED)
rgctx_info = MONO_RGCTX_INFO_KLASS;
else
rgctx_info = MONO_RGCTX_INFO_VTABLE;
- data = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, rgctx_info, ip);
+ data = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, rgctx_info, ip);
*sp++ = handle_box_from_inst (cfg, bblock, val, ip, klass, data);
}
inline_costs += 1;
break;
}
- case CEE_NEWARR: {
- gboolean shared_access = FALSE;
-
+ case CEE_NEWARR:
CHECK_STACK (1);
--sp;
klass = mini_get_class (method, token, generic_context);
CHECK_TYPELOAD (klass);
- if (cfg->generic_sharing_context) {
- int context_used = mono_class_check_context_used (klass);
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD || klass->valuetype)
- GENERIC_SHARING_FAILURE (CEE_NEWARR);
-
- if (context_used)
- shared_access = TRUE;
- }
+ if (cfg->generic_sharing_context)
+ context_used = mono_class_check_context_used (klass);
- if (shared_access) {
- MonoInst *this = NULL, *rgctx;
- MonoInst *args [3];
+ if (context_used) {
+ MonoInst *rgctx, *args [3];
int temp;
- GENERIC_SHARING_FAILURE_IF_VALUETYPE_METHOD (*ip);
-
/* domain */
+ /* FIXME: what about domain-neutral code? */
NEW_DOMAINCONST (cfg, args [0]);
/* klass */
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- args [1] = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ GET_RGCTX (rgctx, context_used);
+ args [1] = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
/* array len */
args [2] = *sp;
}
inline_costs += 1;
break;
- }
case CEE_LDLEN:
CHECK_STACK (1);
--sp;
if (sp [2]->opcode == OP_PCONST && sp [2]->inst_p0 == NULL) {
MonoInst *load;
NEW_LDELEMA (cfg, load, sp, mono_defaults.object_class);
- MONO_INST_NEW (cfg, ins, stelem_to_stind [*ip - CEE_STELEM_I]);
+ MONO_INST_NEW (cfg, ins, CEE_STIND_REF);
ins->inst_left = load;
ins->inst_right = sp [2];
MONO_ADD_INS (bblock, ins);
++ip;
break;
}
- case CEE_REFANYVAL: {
- int context_used = 0;
-
+ case CEE_REFANYVAL:
CHECK_STACK (1);
--sp;
CHECK_OPSIZE (5);
CHECK_TYPELOAD (klass);
mono_class_init (klass);
+ /* Needed by the code generated in inssel.brg */
+ mono_get_got_var (cfg);
+
if (cfg->generic_sharing_context) {
context_used = mono_class_check_context_used (klass);
if (context_used && cfg->compile_aot)
ins->inst_left = *sp;
ins->klass = klass;
- GET_RGCTX (rgctx);
- ins->inst_right = get_runtime_generic_context_ptr (cfg, method,
- bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ GET_RGCTX (rgctx, context_used);
+ ins->inst_right = get_runtime_generic_context_ptr (cfg, method, context_used,
+ bblock, klass, generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
} else {
MONO_INST_NEW (cfg, ins, *ip);
ins->type = STACK_MP;
ip += 5;
*sp++ = ins;
break;
- }
case CEE_MKREFANY: {
MonoInst *loc;
- int context_used = 0;
CHECK_STACK (1);
--sp;
if (context_used) {
MonoInst *rgctx, *klass_type, *klass_klass, *loc_load;
- GET_RGCTX (rgctx);
- klass_klass = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_KLASS, ip);
- GET_RGCTX (rgctx);
- klass_type = get_runtime_generic_context_ptr (cfg, method, bblock, klass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_TYPE, ip);
+ GET_RGCTX (rgctx, context_used);
+ klass_klass = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_KLASS, ip);
+ GET_RGCTX (rgctx, context_used);
+ klass_type = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, klass,
+ generic_context, rgctx, MONO_RGCTX_INFO_TYPE, ip);
NEW_TEMPLOADA (cfg, loc_load, loc->inst_c0);
case CEE_LDTOKEN: {
gpointer handle;
MonoClass *handle_class;
- int context_used = 0;
CHECK_STACK_OVF (1);
context_used = mono_method_check_context_used (handle);
else
g_assert_not_reached ();
-
- if (context_used & MONO_GENERIC_CONTEXT_USED_METHOD)
- GENERIC_SHARING_FAILURE (CEE_LDTOKEN);
}
if (cfg->opt & MONO_OPT_SHARED) {
int temp;
MonoInst *res, *store, *addr, *vtvar, *iargs [3];
+ int method_context_used;
+
+ if (cfg->generic_sharing_context)
+ method_context_used = mono_method_check_context_used (method);
+ else
+ method_context_used = 0;
vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
NEW_IMAGECONST (cfg, iargs [0], image);
NEW_ICONST (cfg, iargs [1], n);
- if (cfg->generic_sharing_context) {
+ if (method_context_used) {
MonoInst *rgctx;
- GET_RGCTX (rgctx);
- iargs [2] = get_runtime_generic_context_method (cfg, method, bblock, method,
+ GET_RGCTX (rgctx, method_context_used);
+ iargs [2] = get_runtime_generic_context_method (cfg, method, method_context_used,
+ bblock, method,
generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
temp = mono_emit_jit_icall (cfg, bblock, mono_ldtoken_wrapper_generic_shared,
iargs, ip);
MonoClass *tclass = mono_class_from_mono_type (handle);
mono_class_init (tclass);
if (context_used) {
- MonoInst *this, *rgctx;
+ MonoInst *rgctx;
g_assert (!cfg->compile_aot);
- if (!(method->flags & METHOD_ATTRIBUTE_STATIC))
- NEW_ARGLOAD (cfg, this, 0);
- rgctx = get_runtime_generic_context (cfg, method, this, ip);
- ins = get_runtime_generic_context_ptr (cfg, method, bblock, tclass,
- token, MINI_TOKEN_SOURCE_CLASS, generic_context,
- rgctx, MONO_RGCTX_INFO_REFLECTION_TYPE, ip);
+
+ GET_RGCTX (rgctx, context_used);
+ ins = get_runtime_generic_context_ptr (cfg, method, context_used, bblock, tclass,
+ generic_context, rgctx, MONO_RGCTX_INFO_REFLECTION_TYPE, ip);
} else if (cfg->compile_aot) {
+ /*
+ * FIXME: We would have to include the context into the
+ * aot constant too (tests/generic-array-type.2.exe).
+ */
+ if (generic_context)
+ cfg->disable_aot = TRUE;
NEW_TYPE_FROM_HANDLE_CONST (cfg, ins, image, n);
} else {
NEW_PCONST (cfg, ins, mono_type_get_object (cfg->domain, handle));
} else {
MonoInst *store, *addr, *vtvar;
- GENERIC_SHARING_FAILURE (CEE_LDTOKEN);
+ if (context_used) {
+ MonoInst *rgctx;
+
+ g_assert (!cfg->compile_aot);
- if (cfg->compile_aot)
+ GET_RGCTX (rgctx, context_used);
+ if (handle_class == mono_defaults.typehandle_class) {
+ ins = get_runtime_generic_context_ptr (cfg, method,
+ context_used, bblock,
+ mono_class_from_mono_type (handle), generic_context,
+ rgctx, MONO_RGCTX_INFO_TYPE, ip);
+ } else if (handle_class == mono_defaults.methodhandle_class) {
+ ins = get_runtime_generic_context_method (cfg, method,
+ context_used, bblock, handle, generic_context,
+ rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ } else if (handle_class == mono_defaults.fieldhandle_class) {
+ ins = get_runtime_generic_context_field (cfg, method,
+ context_used, bblock, handle, generic_context,
+ rgctx, MONO_RGCTX_INFO_CLASS_FIELD, ip);
+ } else {
+ g_assert_not_reached ();
+ }
+ }
+ else if (cfg->compile_aot) {
NEW_LDTOKENCONST (cfg, ins, image, n);
- else
+ } else {
NEW_PCONST (cfg, ins, handle);
+ }
vtvar = mono_compile_create_var (cfg, &handle_class->byval_arg, OP_LOCAL);
NEW_TEMPLOADA (cfg, addr, vtvar->inst_c0);
NEW_INDSTORE (cfg, store, addr, ins, &mono_defaults.int_class->byval_arg);
token = read32 (ip + 2);
ptr = mono_method_get_wrapper_data (method, token);
- if (cfg->compile_aot && cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ if (cfg->compile_aot && (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || cfg->method->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE)) {
MonoMethod *wrapped = mono_marshal_method_from_wrapper (cfg->method);
if (wrapped && ptr != NULL && mono_lookup_internal_call (wrapped) == ptr) {
ip += 6;
break;
}
+
+ if ((method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (strstr (method->name, "__icall_wrapper_") == method->name)) {
+ MonoJitICallInfo *callinfo;
+ const char *icall_name;
+
+ icall_name = method->name + strlen ("__icall_wrapper_");
+ g_assert (icall_name);
+ callinfo = mono_find_jit_icall_by_name (icall_name);
+ g_assert (callinfo);
+
+ if (ptr == callinfo->func) {
+ /* Will be transformed into an AOTCONST later */
+ NEW_PCONST (cfg, ins, ptr);
+ *sp++ = ins;
+ ip += 6;
+ break;
+ }
+ }
+ }
+ /* FIXME: Generalize this */
+ if (cfg->compile_aot && ptr == mono_thread_interruption_request_flag ()) {
+ NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
+ *sp++ = ins;
+ ip += 6;
+ break;
}
NEW_PCONST (cfg, ins, ptr);
*sp++ = ins;
cfg->disable_aot = 1;
break;
}
+ case CEE_MONO_ICALL_ADDR: {
+ MonoMethod *cmethod;
+
+ CHECK_STACK_OVF (1);
+ CHECK_OPSIZE (6);
+ token = read32 (ip + 2);
+
+ cmethod = mono_method_get_wrapper_data (method, token);
+
+ g_assert (cfg->compile_aot);
+
+ NEW_AOTCONST (cfg, ins, MONO_PATCH_INFO_ICALL_ADDR, cmethod);
+ *sp++ = ins;
+ ip += 6;
+ break;
+ }
case CEE_MONO_VTADDR:
CHECK_STACK (1);
--sp;
token = read32 (ip + 2);
/* Needed by the code generated in inssel.brg */
mono_get_got_var (cfg);
+
+#ifdef __i386__
+ /*
+ * The code generated for CCASTCLASS has too much register pressure
+ * (obj+vtable+ibitmap_byte_reg+iid_reg), leading to the usual
+ * branches-inside-bblocks problem.
+ */
+ cfg->disable_aot = TRUE;
+#endif
klass = (MonoClass *)mono_method_get_wrapper_data (method, token);
MONO_INST_NEW (cfg, ins, (ip [1] == CEE_MONO_CISINST) ? OP_CISINST : OP_CCASTCLASS);
MonoInst *argconst;
MonoMethod *cil_method, *ctor_method;
int temp;
- gboolean is_shared;
+ gboolean is_shared = FALSE;
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
goto load_error;
mono_class_init (cmethod->klass);
- if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
- GENERIC_SHARING_FAILURE (CEE_LDFTN);
+ if (cfg->generic_sharing_context)
+ context_used = mono_method_check_context_used (cmethod);
- is_shared = (cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
- (cmethod->klass->generic_class || cmethod->klass->generic_container) &&
- mono_class_generic_sharing_enabled (cmethod->klass);
+ if (mono_class_generic_sharing_enabled (cmethod->klass)) {
+ if ((cmethod->flags & METHOD_ATTRIBUTE_STATIC) &&
+ (cmethod->klass->generic_class ||
+ cmethod->klass->generic_container)) {
+ is_shared = TRUE;
+ }
+ if (cmethod->is_inflated && mono_method_get_context (cmethod)->method_inst)
+ is_shared = TRUE;
+ }
cil_method = cmethod;
if (!dont_verify && !cfg->skip_visibility && !mono_method_can_access_method (method, cmethod))
#if defined(MONO_ARCH_HAVE_CREATE_DELEGATE_TRAMPOLINE) && !defined(HAVE_WRITE_BARRIERS)
/* FIXME: SGEN support */
/* FIXME: handle shared static generic methods */
- if (!is_shared && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
+ /* FIXME: handle this in shared code */
+ if (!is_shared && !context_used && (sp > stack_start) && (ip + 6 + 5 < end) && ip_in_bb (cfg, bblock, ip + 6) && (ip [6] == CEE_NEWOBJ) && (ctor_method = mini_get_method (cfg, method, read32 (ip + 7), NULL, generic_context)) && (ctor_method->klass->parent == mono_defaults.multicastdelegate_class)) {
MonoInst *target_ins;
ip += 6;
handle_loaded_temps (cfg, bblock, stack_start, sp);
- if (is_shared)
+ if (context_used) {
+ MonoInst *rgctx;
+
+ if (is_shared)
+ cmethod = mono_marshal_get_static_rgctx_invoke (cmethod);
+
+ GET_RGCTX (rgctx, context_used);
+ argconst = get_runtime_generic_context_method (cfg, method, context_used,
+ bblock, cmethod,
+ generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ } else if (is_shared) {
NEW_METHODCONST (cfg, argconst, mono_marshal_get_static_rgctx_invoke (cmethod));
- else
+ } else {
NEW_METHODCONST (cfg, argconst, cmethod);
- if (method->wrapper_type != MONO_WRAPPER_SYNCHRONIZED)
- temp = mono_emit_jit_icall (cfg, bblock, mono_ldftn, &argconst, ip);
- else
- temp = mono_emit_jit_icall (cfg, bblock, mono_ldftn_nosync, &argconst, ip);
+ }
+ temp = mono_emit_jit_icall (cfg, bblock, mono_ldftn, &argconst, ip);
NEW_TEMPLOAD (cfg, *sp, temp);
sp ++;
goto load_error;
mono_class_init (cmethod->klass);
- if (cfg->generic_sharing_context && mono_method_check_context_used (cmethod))
- GENERIC_SHARING_FAILURE (CEE_LDVIRTFTN);
+ if (cfg->generic_sharing_context)
+ context_used = mono_method_check_context_used (cmethod);
if (mono_security_get_mode () == MONO_SECURITY_MODE_CAS) {
if (check_linkdemand (cfg, method, cmethod, bblock, ip))
--sp;
args [0] = *sp;
- NEW_METHODCONST (cfg, args [1], cmethod);
- temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn, args, ip);
+ if (context_used) {
+ MonoInst *rgctx;
+
+ GET_RGCTX (rgctx, context_used);
+ args [1] = get_runtime_generic_context_method (cfg, method, context_used,
+ bblock, cmethod,
+ generic_context, rgctx, MONO_RGCTX_INFO_METHOD, ip);
+ temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn_gshared, args, ip);
+ } else {
+ NEW_METHODCONST (cfg, args [1], cmethod);
+ temp = mono_emit_jit_icall (cfg, bblock, mono_ldvirtfn, args, ip);
+ }
NEW_TEMPLOAD (cfg, *sp, temp);
sp ++;
store->inst_i0 = sp [0];
store->inst_i1 = load;
} else {
- GENERIC_SHARING_FAILURE (CEE_INITOBJ);
handle_initobj (cfg, bblock, *sp, NULL, klass, stack_start, sp);
}
ip += 6;
break;
}
case CEE_SIZEOF:
- GENERIC_SHARING_FAILURE (CEE_SIZEOF);
-
CHECK_STACK_OVF (1);
CHECK_OPSIZE (6);
token = read32 (ip + 2);
typedef struct {
MonoClass *vtype;
- GList *active;
+ GList *active, *inactive;
GSList *slots;
} StackSlotInfo;
-static inline GSList*
-g_slist_prepend_mempool (MonoMemPool *mp, GSList *list,
- gpointer data)
+static gint
+compare_by_interval_start_pos_func (gconstpointer a, gconstpointer b)
+{
+ MonoMethodVar *v1 = (MonoMethodVar*)a;
+ MonoMethodVar *v2 = (MonoMethodVar*)b;
+
+ if (v1 == v2)
+ return 0;
+ else if (v1->interval->range && v2->interval->range)
+ return v1->interval->range->from - v2->interval->range->from;
+ else if (v1->interval->range)
+ return -1;
+ else
+ return 1;
+}
+
+#if 0
+#define LSCAN_DEBUG(a) do { a; } while (0)
+#else
+#define LSCAN_DEBUG(a)
+#endif
+
+static gint32*
+mono_allocate_stack_slots_full2 (MonoCompile *cfg, gboolean backward, guint32 *stack_size, guint32 *stack_align)
{
- GSList *new_list;
+ int i, slot, offset, size;
+ guint32 align;
+ MonoMethodVar *vmv;
+ MonoInst *inst;
+ gint32 *offsets;
+ GList *vars = NULL, *l, *unhandled;
+ StackSlotInfo *scalar_stack_slots, *vtype_stack_slots, *slot_info;
+ MonoType *t;
+ int nvtypes;
+
+ LSCAN_DEBUG (printf ("Allocate Stack Slots 2 for %s:\n", mono_method_full_name (cfg->method, TRUE)));
+
+ scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
+ vtype_stack_slots = NULL;
+ nvtypes = 0;
+
+ offsets = mono_mempool_alloc (cfg->mempool, sizeof (gint32) * cfg->num_varinfo);
+ for (i = 0; i < cfg->num_varinfo; ++i)
+ offsets [i] = -1;
+
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ inst = cfg->varinfo [i];
+ vmv = MONO_VARINFO (cfg, i);
+
+ if ((inst->flags & MONO_INST_IS_DEAD) || inst->opcode == OP_REGVAR || inst->opcode == OP_REGOFFSET)
+ continue;
+
+ vars = g_list_prepend (vars, vmv);
+ }
+
+ vars = g_list_sort (g_list_copy (vars), compare_by_interval_start_pos_func);
+
+ /* Sanity check */
+ /*
+ i = 0;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ if (current->interval->range) {
+ g_assert (current->interval->range->from >= i);
+ i = current->interval->range->from;
+ }
+ }
+ */
+
+ offset = 0;
+ *stack_align = 0;
+ for (unhandled = vars; unhandled; unhandled = unhandled->next) {
+ MonoMethodVar *current = unhandled->data;
+
+ vmv = current;
+ inst = cfg->varinfo [vmv->idx];
+
+ /* inst->backend.is_pinvoke indicates native sized value types, this is used by the
+ * pinvoke wrappers when they call functions returning structures */
+ if (inst->backend.is_pinvoke && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
+ size = mono_class_native_size (inst->inst_vtype->data.klass, &align);
+ else {
+ int ialign;
+
+ size = mono_type_size (inst->inst_vtype, &ialign);
+ align = ialign;
+ }
+
+ t = mono_type_get_underlying_type (inst->inst_vtype);
+ switch (t->type) {
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (t)) {
+ slot_info = &scalar_stack_slots [t->type];
+ break;
+ }
+ /* Fall through */
+ case MONO_TYPE_VALUETYPE:
+ if (!vtype_stack_slots)
+ vtype_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * 256);
+ for (i = 0; i < nvtypes; ++i)
+ if (t->data.klass == vtype_stack_slots [i].vtype)
+ break;
+ if (i < nvtypes)
+ slot_info = &vtype_stack_slots [i];
+ else {
+ g_assert (nvtypes < 256);
+ vtype_stack_slots [nvtypes].vtype = t->data.klass;
+ slot_info = &vtype_stack_slots [nvtypes];
+ nvtypes ++;
+ }
+ break;
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+#if SIZEOF_VOID_P == 4
+ case MONO_TYPE_I4:
+#else
+ case MONO_TYPE_I8:
+ /* Share non-float stack slots of the same size */
+ slot_info = &scalar_stack_slots [MONO_TYPE_CLASS];
+ break;
+#endif
+ default:
+ slot_info = &scalar_stack_slots [t->type];
+ }
+
+ slot = 0xffffff;
+ if (cfg->comp_done & MONO_COMP_LIVENESS) {
+ int pos;
+ gboolean changed;
+
+ //printf ("START %2d %08x %08x\n", vmv->idx, vmv->range.first_use.abs_pos, vmv->range.last_use.abs_pos);
+
+ if (!current->interval->range) {
+ if (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
+ pos = ~0;
+ else {
+ /* Dead */
+ inst->flags |= MONO_INST_IS_DEAD;
+ continue;
+ }
+ }
+ else
+ pos = current->interval->range->from;
+
+ LSCAN_DEBUG (printf ("process R%d ", inst->dreg));
+ if (current->interval->range)
+ LSCAN_DEBUG (mono_linterval_print (current->interval));
+ LSCAN_DEBUG (printf ("\n"));
+
+ /* Check for intervals in active which expired or inactive */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->active; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (!mono_linterval_covers (v->interval, pos)) {
+ slot_info->inactive = g_list_append (slot_info->inactive, v);
+ slot_info->active = g_list_delete_link (slot_info->active, l);
+ LSCAN_DEBUG (printf ("Interval R%d became inactive\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /* Check for intervals in inactive which expired or active */
+ changed = TRUE;
+ /* FIXME: Optimize this */
+ while (changed) {
+ changed = FALSE;
+ for (l = slot_info->inactive; l != NULL; l = l->next) {
+ MonoMethodVar *v = (MonoMethodVar*)l->data;
+
+ if (v->interval->last_range->to < pos) {
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ // FIXME: Enabling this seems to cause impossible to debug crashes
+ //slot_info->slots = g_slist_prepend_mempool (cfg->mempool, slot_info->slots, GINT_TO_POINTER (offsets [v->idx]));
+ LSCAN_DEBUG (printf ("Interval R%d has expired, adding 0x%x to slots\n", cfg->varinfo [v->idx]->dreg, offsets [v->idx]));
+ changed = TRUE;
+ break;
+ }
+ else if (mono_linterval_covers (v->interval, pos)) {
+ slot_info->active = g_list_append (slot_info->active, v);
+ slot_info->inactive = g_list_delete_link (slot_info->inactive, l);
+ LSCAN_DEBUG (printf ("\tInterval R%d became active\n", cfg->varinfo [v->idx]->dreg));
+ changed = TRUE;
+ break;
+ }
+ }
+ }
+
+ /*
+ * This also handles the case when the variable is used in an
+ * exception region, as liveness info is not computed there.
+ */
+ /*
+ * FIXME: All valuetypes are marked as INDIRECT because of LDADDR
+ * opcodes.
+ */
+ if (! (inst->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))) {
+ if (slot_info->slots) {
+ slot = GPOINTER_TO_INT (slot_info->slots->data);
+
+ slot_info->slots = slot_info->slots->next;
+ }
+
+ /* FIXME: We might want to consider the inactive intervals as well if slot_info->slots is empty */
+
+ slot_info->active = mono_varlist_insert_sorted (cfg, slot_info->active, vmv, TRUE);
+ }
+ }
+
+#if 0
+ {
+ static int count = 0;
+ count ++;
+
+ if (count == atoi (getenv ("COUNT3")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT3")))
+ slot = 0xffffff;
+ else {
+ mono_print_tree_nl (inst);
+ }
+ }
+#endif
+
+ LSCAN_DEBUG (printf ("R%d %s -> 0x%x\n", inst->dreg, mono_type_full_name (t), slot));
+
+ if (slot == 0xffffff) {
+ /*
+ * Allways allocate valuetypes to sizeof (gpointer) to allow more
+ * efficient copying (and to work around the fact that OP_MEMCPY
+ * and OP_MEMSET ignores alignment).
+ */
+ if (MONO_TYPE_ISSTRUCT (t))
+ align = sizeof (gpointer);
+
+ if (backward) {
+ offset += size;
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ }
+ else {
+ offset += align - 1;
+ offset &= ~(align - 1);
+ slot = offset;
+ offset += size;
+ }
+
+ if (*stack_align == 0)
+ *stack_align = align;
+ }
+
+ offsets [vmv->idx] = slot;
+ }
+ g_list_free (vars);
+ for (i = 0; i < MONO_TYPE_PINNED; ++i) {
+ if (scalar_stack_slots [i].active)
+ g_list_free (scalar_stack_slots [i].active);
+ }
+ for (i = 0; i < nvtypes; ++i) {
+ if (vtype_stack_slots [i].active)
+ g_list_free (vtype_stack_slots [i].active);
+ }
- new_list = mono_mempool_alloc (mp, sizeof (GSList));
- new_list->data = data;
- new_list->next = list;
+ mono_jit_stats.locals_stack_size += offset;
- return new_list;
+ *stack_size = offset;
+ return offsets;
}
/*
MonoType *t;
int nvtypes;
+ if ((cfg->num_varinfo > 0) && MONO_VARINFO (cfg, 0)->interval)
+ return mono_allocate_stack_slots_full2 (cfg, backward, stack_size, stack_align);
+
scalar_stack_slots = mono_mempool_alloc0 (cfg->mempool, sizeof (StackSlotInfo) * MONO_TYPE_PINNED);
vtype_stack_slots = NULL;
nvtypes = 0;
int i, j;
char *code;
MonoBasicBlock *bb;
+ MonoInst *c;
g_print ("IR code for method %s\n", mono_method_full_name (cfg->method, TRUE));
for (i = 0; i < cfg->num_bblocks; ++i) {
- MonoInst *c;
-
bb = cfg->bblocks [i];
/*if (bb->cil_code) {
char* code1, *code2;
g_free (code2);
} else*/
code = g_strdup ("\n");
- g_print ("\nBB%d DFN%d (len: %d): %s", bb->block_num, i, bb->cil_length, code);
+ g_print ("\nBB%d (%d) (len: %d): %s", bb->block_num, i, bb->cil_length, code);
MONO_BB_FOR_EACH_INS (bb, c) {
- mono_print_tree (c);
- g_print ("\n");
+ if (cfg->new_ir) {
+ mono_print_ins_index (-1, c);
+ } else {
+ mono_print_tree (c);
+ g_print ("\n");
+ }
}
g_print ("\tprev:");
MONO_ADD_INS (bb, inst);
}
+void
+mono_bblock_insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ ins = bb->code;
+ bb->code = ins_to_insert;
+
+ /* Link with next */
+ ins_to_insert->next = ins;
+ if (ins)
+ ins->prev = ins_to_insert;
+
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with next */
+ ins_to_insert->next = ins->next;
+ if (ins->next)
+ ins->next->prev = ins_to_insert;
+
+ /* Link with previous */
+ ins->next = ins_to_insert;
+ ins_to_insert->prev = ins;
+
+ if (bb->last_ins == ins)
+ bb->last_ins = ins_to_insert;
+ }
+}
+
+void
+mono_bblock_insert_before_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *ins_to_insert)
+{
+ if (ins == NULL) {
+ NOT_IMPLEMENTED;
+ ins = bb->code;
+ bb->code = ins_to_insert;
+ ins_to_insert->next = ins;
+ if (bb->last_ins == NULL)
+ bb->last_ins = ins_to_insert;
+ } else {
+ /* Link with previous */
+ if (ins->prev)
+ ins->prev->next = ins_to_insert;
+ ins_to_insert->prev = ins->prev;
+
+ /* Link with next */
+ ins->prev = ins_to_insert;
+ ins_to_insert->next = ins;
+
+ if (bb->code == ins)
+ bb->code = ins_to_insert;
+ }
+}
+
+/*
+ * mono_verify_bblock:
+ *
+ * Verify that the next and prev pointers are consistent inside the instructions in BB.
+ */
+void
+mono_verify_bblock (MonoBasicBlock *bb)
+{
+ MonoInst *ins, *prev;
+
+ prev = NULL;
+ for (ins = bb->code; ins; ins = ins->next) {
+ g_assert (ins->prev == prev);
+ prev = ins;
+ }
+ if (bb->last_ins)
+ g_assert (!bb->last_ins->next);
+}
+
+/*
+ * mono_verify_cfg:
+ *
+ * Perform consistency checks on the JIT data structures and the IR
+ */
+void
+mono_verify_cfg (MonoCompile *cfg)
+{
+ MonoBasicBlock *bb;
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_verify_bblock (bb);
+}
+
void
mono_destroy_compile (MonoCompile *cfg)
{
mono_mempool_destroy (cfg->mempool);
g_list_free (cfg->ldstr_list);
g_hash_table_destroy (cfg->token_info_hash);
+ if (cfg->abs_patches)
+ g_hash_table_destroy (cfg->abs_patches);
g_free (cfg->varinfo);
g_free (cfg->vars);
cfg->patch_info = ji;
}
+MonoJumpInfo *
+mono_patch_info_list_prepend (MonoJumpInfo *list, int ip, MonoJumpInfoType type, gconstpointer target)
+{
+ MonoJumpInfo *ji = g_new0 (MonoJumpInfo, 1);
+
+ ji->ip.i = ip;
+ ji->type = type;
+ ji->data.target = target;
+ ji->next = list;
+
+ return ji;
+}
+
void
mono_remove_patch_info (MonoCompile *cfg, int ip)
{
res->data.table = mono_mempool_alloc (mp, sizeof (MonoJumpInfoBBTable));
memcpy (res->data.table, patch_info->data.table, sizeof (MonoJumpInfoBBTable));
break;
+ case MONO_PATCH_INFO_RGCTX_FETCH:
+ res->data.rgctx_entry = mono_mempool_alloc (mp, sizeof (MonoJumpInfoRgctxEntry));
+ memcpy (res->data.rgctx_entry, patch_info->data.rgctx_entry, sizeof (MonoJumpInfoRgctxEntry));
+ res->data.rgctx_entry->data = mono_patch_info_dup_mp (mp, res->data.rgctx_entry->data);
+ break;
default:
break;
}
case MONO_PATCH_INFO_CLASS:
case MONO_PATCH_INFO_IID:
case MONO_PATCH_INFO_ADJUSTED_IID:
- return (ji->type << 8) | (gssize)ji->data.klass;
- case MONO_PATCH_INFO_FIELD:
- case MONO_PATCH_INFO_SFLDA:
- return (ji->type << 8) | (gssize)ji->data.field;
+ case MONO_PATCH_INFO_CLASS_INIT:
case MONO_PATCH_INFO_METHODCONST:
case MONO_PATCH_INFO_METHOD:
case MONO_PATCH_INFO_METHOD_JUMP:
- return (ji->type << 8) | (gssize)ji->data.method;
case MONO_PATCH_INFO_IMAGE:
- return (ji->type << 8) | (gssize)ji->data.image;
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR:
+ case MONO_PATCH_INFO_FIELD:
+ case MONO_PATCH_INFO_SFLDA:
+ return (ji->type << 8) | (gssize)ji->data.target;
default:
return (ji->type << 8);
}
return 0;
break;
default:
- if (ji1->data.name != ji2->data.name)
+ if (ji1->data.target != ji2->data.target)
return 0;
break;
}
switch (patch_info->type) {
case MONO_PATCH_INFO_BB:
+ g_assert (patch_info->data.bb->native_offset);
target = patch_info->data.bb->native_offset + code;
break;
case MONO_PATCH_INFO_ABS:
target = mono_icall_get_wrapper (mi);
break;
}
- case MONO_PATCH_INFO_METHOD_JUMP: {
- GSList *list;
-
- /* get the trampoline to the method from the domain */
- target = mono_create_jump_trampoline (domain, patch_info->data.method, TRUE);
- if (!domain->jump_target_hash)
- domain->jump_target_hash = g_hash_table_new (NULL, NULL);
- list = g_hash_table_lookup (domain->jump_target_hash, patch_info->data.method);
- list = g_slist_prepend (list, ip);
- g_hash_table_insert (domain->jump_target_hash, patch_info->data.method, list);
+ case MONO_PATCH_INFO_METHOD_JUMP:
+ target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
break;
- }
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
target = code;
} else {
/* get the trampoline to the method from the domain */
- if (method && method->wrapper_type == MONO_WRAPPER_STATIC_RGCTX_INVOKE)
- target = mono_ldftn_nosync (patch_info->data.method);
- else
+ if (method && method->wrapper_type == MONO_WRAPPER_STATIC_RGCTX_INVOKE) {
+ target = mono_create_jit_trampoline_in_domain (mono_domain_get (),
+ patch_info->data.method);
+ } else {
target = mono_create_jit_trampoline (patch_info->data.method);
+ }
}
break;
case MONO_PATCH_INFO_SWITCH: {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
mono_domain_lock (domain);
- jump_table = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
+ if (mono_aot_only)
+ jump_table = mono_mempool_alloc (domain->mp, sizeof (gpointer) * patch_info->data.table->table_size);
+ else
+ jump_table = mono_code_manager_reserve (domain->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
mono_domain_unlock (domain);
}
- for (i = 0; i < patch_info->data.table->table_size; i++) {
+ for (i = 0; i < patch_info->data.table->table_size; i++)
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
- }
target = jump_table;
break;
}
break;
case MONO_PATCH_INFO_ICALL_ADDR:
target = mono_lookup_internal_call (patch_info->data.method);
+ /* run_cctors == 0 -> AOT */
+ if (!target && run_cctors)
+ g_error ("Unregistered icall '%s'\n", mono_method_full_name (patch_info->data.method, TRUE));
+ break;
+ case MONO_PATCH_INFO_JIT_ICALL_ADDR: {
+ MonoJitICallInfo *mi = mono_find_jit_icall_by_name (patch_info->data.name);
+ if (!mi) {
+ g_warning ("unknown MONO_PATCH_INFO_JIT_ICALL_ADDR %s", patch_info->data.name);
+ g_assert_not_reached ();
+ }
+ target = mi->func;
+ break;
+ }
+ case MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG:
+ target = mono_thread_interruption_request_flag ();
+ break;
+ case MONO_PATCH_INFO_METHOD_RGCTX:
+ target = mono_method_lookup_rgctx (mono_class_vtable (domain, patch_info->data.method->klass), mini_method_get_context (patch_info->data.method)->method_inst);
break;
case MONO_PATCH_INFO_BB_OVF:
case MONO_PATCH_INFO_EXC_OVF:
case MONO_PATCH_INFO_GOT_OFFSET:
case MONO_PATCH_INFO_NONE:
break;
+ case MONO_PATCH_INFO_RGCTX_FETCH: {
+ MonoJumpInfoRgctxEntry *entry = patch_info->data.rgctx_entry;
+ guint32 slot = -1;
+
+ switch (entry->data->type) {
+ case MONO_PATCH_INFO_CLASS:
+ slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, &entry->data->data.klass->byval_arg, entry->info_type, mono_method_get_context (entry->method));
+ break;
+ case MONO_PATCH_INFO_METHOD:
+ case MONO_PATCH_INFO_METHODCONST:
+ slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, entry->data->data.method, entry->info_type, mono_method_get_context (entry->method));
+ break;
+ case MONO_PATCH_INFO_FIELD:
+ slot = mono_method_lookup_or_register_other_info (entry->method, entry->in_mrgctx, entry->data->data.field, entry->info_type, mono_method_get_context (entry->method));
+ break;
+ default:
+ g_assert_not_reached ();
+ break;
+ }
+
+ target = mono_create_rgctx_lazy_fetch_trampoline (slot);
+ break;
+ }
+ case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
+ target = mono_create_generic_class_init_trampoline ();
+ break;
default:
g_assert_not_reached ();
}
}
}
-static void
-nullify_basic_block (MonoBasicBlock *bb)
-{
- bb->in_count = 0;
- bb->out_count = 0;
- bb->in_bb = NULL;
- bb->out_bb = NULL;
- bb->next_bb = NULL;
- MONO_INST_LIST_INIT (&bb->ins_list);
- bb->cil_code = NULL;
-}
-
-static void
-replace_out_block (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl)
-{
- int i;
-
- for (i = 0; i < bb->out_count; i++) {
- MonoBasicBlock *ob = bb->out_bb [i];
- if (ob == orig) {
- if (!repl) {
- if (bb->out_count > 1) {
- bb->out_bb [i] = bb->out_bb [bb->out_count - 1];
- }
- bb->out_count--;
- } else {
- bb->out_bb [i] = repl;
- }
- }
- }
-}
-
-static void
-replace_in_block (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl)
-{
- int i;
-
- for (i = 0; i < bb->in_count; i++) {
- MonoBasicBlock *ib = bb->in_bb [i];
- if (ib == orig) {
- if (!repl) {
- if (bb->in_count > 1) {
- bb->in_bb [i] = bb->in_bb [bb->in_count - 1];
- }
- bb->in_count--;
- } else {
- bb->in_bb [i] = repl;
- }
- }
- }
-}
-
-static void
-replace_out_block_in_code (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl) {
- MonoInst *inst;
-
- MONO_BB_FOR_EACH_INS (bb, inst) {
- if (inst->opcode == OP_CALL_HANDLER) {
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- }
- }
-
- inst = mono_inst_list_last (&bb->ins_list);
- if (!inst)
- return;
-
- switch (inst->opcode) {
- case OP_BR:
- if (inst->inst_target_bb == orig)
- inst->inst_target_bb = repl;
- break;
- case OP_SWITCH: {
- int i;
- int n = GPOINTER_TO_INT (inst->klass);
- for (i = 0; i < n; i++ ) {
- if (inst->inst_many_bb [i] == orig)
- inst->inst_many_bb [i] = repl;
- }
- break;
- }
- case CEE_BNE_UN:
- case CEE_BEQ:
- case CEE_BLT:
- case CEE_BLT_UN:
- case CEE_BGT:
- case CEE_BGT_UN:
- case CEE_BGE:
- case CEE_BGE_UN:
- case CEE_BLE:
- case CEE_BLE_UN:
- if (inst->inst_true_bb == orig)
- inst->inst_true_bb = repl;
- if (inst->inst_false_bb == orig)
- inst->inst_false_bb = repl;
- break;
- default:
- break;
- }
-}
-
-static void
-replace_basic_block (MonoBasicBlock *bb, MonoBasicBlock *orig, MonoBasicBlock *repl)
-{
- int i, j;
-
- for (i = 0; i < bb->out_count; i++) {
- MonoBasicBlock *ob = bb->out_bb [i];
- for (j = 0; j < ob->in_count; j++) {
- if (ob->in_bb [j] == orig) {
- ob->in_bb [j] = repl;
- }
- }
- }
-
-}
-
-/**
- * Check if a bb is useless (is just made of NOPs and ends with an
- * unconditional branch, or nothing).
- * If it is so, unlink it from the CFG and nullify it, and return TRUE.
- * Otherwise, return FALSE;
- */
-static gboolean
-remove_block_if_useless (MonoCompile *cfg, MonoBasicBlock *bb, MonoBasicBlock *previous_bb) {
- MonoBasicBlock *target_bb = NULL;
- MonoInst *inst;
-
- /* Do not touch handlers */
- if (bb->region != -1) {
- bb->not_useless = TRUE;
- return FALSE;
- }
-
- MONO_BB_FOR_EACH_INS (bb, inst) {
- switch (inst->opcode) {
- case OP_NOP:
- break;
- case OP_BR:
- target_bb = inst->inst_target_bb;
- break;
- default:
- bb->not_useless = TRUE;
- return FALSE;
- }
- }
-
- if (target_bb == NULL) {
- if ((bb->out_count == 1) && (bb->out_bb [0] == bb->next_bb)) {
- target_bb = bb->next_bb;
- } else {
- /* Do not touch empty BBs that do not "fall through" to their next BB (like the exit BB) */
- return FALSE;
- }
- }
-
- /* Do not touch BBs following a switch (they are the "default" branch) */
- inst = mono_inst_list_last (&previous_bb->ins_list);
- if (inst && inst->opcode == OP_SWITCH)
- return FALSE;
-
- /* Do not touch BBs following the entry BB and jumping to something that is not */
- /* thiry "next" bb (the entry BB cannot contain the branch) */
- if ((previous_bb == cfg->bb_entry) && (bb->next_bb != target_bb)) {
- return FALSE;
- }
-
- /*
- * Do not touch BBs following a try block as the code in
- * mini_method_compile needs them to compute the length of the try block.
- */
- if (MONO_BBLOCK_IS_IN_REGION (previous_bb, MONO_REGION_TRY))
- return FALSE;
-
- /* Check that there is a target BB, and that bb is not an empty loop (Bug 75061) */
- if ((target_bb != NULL) && (target_bb != bb)) {
- MonoInst *last_ins;
- int i;
-
- if (cfg->verbose_level > 1) {
- printf ("remove_block_if_useless, removed BB%d\n", bb->block_num);
- }
-
- /* unlink_bblock () modifies the bb->in_bb array so can't use a for loop here */
- while (bb->in_count) {
- MonoBasicBlock *in_bb = bb->in_bb [0];
- mono_unlink_bblock (cfg, in_bb, bb);
- link_bblock (cfg, in_bb, target_bb);
- replace_out_block_in_code (in_bb, bb, target_bb);
- }
-
- mono_unlink_bblock (cfg, bb, target_bb);
-
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
-
- if ((previous_bb != cfg->bb_entry) &&
- (previous_bb->region == bb->region) &&
- ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH)))) {
- for (i = 0; i < previous_bb->out_count; i++) {
- if (previous_bb->out_bb [i] == target_bb) {
- MonoInst *jump;
- MONO_INST_NEW (cfg, jump, OP_BR);
- MONO_ADD_INS (previous_bb, jump);
- jump->cil_code = previous_bb->cil_code;
- jump->inst_target_bb = target_bb;
- break;
- }
- }
- }
-
- previous_bb->next_bb = bb->next_bb;
- nullify_basic_block (bb);
-
- return TRUE;
- } else {
- return FALSE;
- }
-}
-
-static void
-merge_basic_blocks (MonoBasicBlock *bb, MonoBasicBlock *bbn)
-{
- MonoInst *last_ins;
-
- bb->out_count = bbn->out_count;
- bb->out_bb = bbn->out_bb;
-
- replace_basic_block (bb, bbn, bb);
-
- last_ins = mono_inst_list_last (&bb->ins_list);
-
- /* Nullify branch at the end of bb */
- if (last_ins && MONO_IS_BRANCH_OP (last_ins))
- last_ins->opcode = OP_NOP;
-
- MONO_INST_LIST_SPLICE_TAIL_INIT (&bbn->ins_list, &bb->ins_list);
-
- bb->next_bb = bbn->next_bb;
- nullify_basic_block (bbn);
-}
-
-static void
-move_basic_block_to_end (MonoCompile *cfg, MonoBasicBlock *bb)
-{
- MonoBasicBlock *bbn, *next;
- MonoInst *last_ins;
-
- next = bb->next_bb;
-
- /* Find the previous */
- for (bbn = cfg->bb_entry; bbn->next_bb && bbn->next_bb != bb; bbn = bbn->next_bb)
- ;
- if (bbn->next_bb) {
- bbn->next_bb = bb->next_bb;
- }
-
- /* Find the last */
- for (bbn = cfg->bb_entry; bbn->next_bb; bbn = bbn->next_bb)
- ;
- bbn->next_bb = bb;
- bb->next_bb = NULL;
-
- last_ins = mono_inst_list_last (&bb->ins_list);
-
- /* Add a branch */
- if (next && (!last_ins || (last_ins->opcode != OP_NOT_REACHED))) {
- MonoInst *ins;
-
- MONO_INST_NEW (cfg, ins, OP_BR);
- MONO_ADD_INS (bb, ins);
- link_bblock (cfg, bb, next);
- ins->inst_target_bb = next;
- }
-}
-
-/* checks that a and b represent the same instructions, conservatively,
- * it can return FALSE also for two trees that are equal.
- * FIXME: also make sure there are no side effects.
- */
-static int
-same_trees (MonoInst *a, MonoInst *b)
-{
- int arity;
- if (a->opcode != b->opcode)
- return FALSE;
- arity = mono_burg_arity [a->opcode];
- if (arity == 1) {
- if (a->ssa_op == b->ssa_op && a->ssa_op == MONO_SSA_LOAD && a->inst_i0 == b->inst_i0)
- return TRUE;
- return same_trees (a->inst_left, b->inst_left);
- } else if (arity == 2) {
- return same_trees (a->inst_left, b->inst_left) && same_trees (a->inst_right, b->inst_right);
- } else if (arity == 0) {
- switch (a->opcode) {
- case OP_ICONST:
- return a->inst_c0 == b->inst_c0;
- default:
- return FALSE;
- }
- }
- return FALSE;
-}
-
-static int
-get_unsigned_condbranch (int opcode)
-{
- switch (opcode) {
- case CEE_BLE: return CEE_BLE_UN;
- case CEE_BLT: return CEE_BLT_UN;
- case CEE_BGE: return CEE_BGE_UN;
- case CEE_BGT: return CEE_BGT_UN;
- }
- g_assert_not_reached ();
- return 0;
-}
-
-static int
-tree_is_unsigned (MonoInst* ins) {
- switch (ins->opcode) {
- case OP_ICONST:
- return (int)ins->inst_c0 >= 0;
- /* array lengths are positive as are string sizes */
- case CEE_LDLEN:
- case OP_STRLEN:
- return TRUE;
- case CEE_CONV_U1:
- case CEE_CONV_U2:
- case CEE_CONV_U4:
- case CEE_CONV_OVF_U1:
- case CEE_CONV_OVF_U2:
- case CEE_CONV_OVF_U4:
- return TRUE;
- case CEE_LDIND_U1:
- case CEE_LDIND_U2:
- case CEE_LDIND_U4:
- return TRUE;
- default:
- return FALSE;
- }
-}
-
-/* check if an unsigned compare can be used instead of two signed compares
- * for (val < 0 || val > limit) conditionals.
- * Returns TRUE if the optimization has been applied.
- * Note that this can't be applied if the second arg is not positive...
- */
-static int
-try_unsigned_compare (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *bb_last)
-{
- MonoBasicBlock *truet, *falset;
- MonoInst *cmp_inst = bb_last->inst_left;
- MonoInst *condb;
- if (!cmp_inst->inst_right->inst_c0 == 0)
- return FALSE;
- truet = bb_last->inst_true_bb;
- falset = bb_last->inst_false_bb;
- if (falset->in_count != 1)
- return FALSE;
- condb = mono_inst_list_last (&falset->ins_list);
- /* target bb must have one instruction */
- if (!condb || (condb->node.next != &falset->ins_list))
- return FALSE;
- if ((((condb->opcode == CEE_BLE || condb->opcode == CEE_BLT) && (condb->inst_false_bb == truet))
- || ((condb->opcode == CEE_BGE || condb->opcode == CEE_BGT) && (condb->inst_true_bb == truet)))
- && same_trees (cmp_inst->inst_left, condb->inst_left->inst_left)) {
- if (!tree_is_unsigned (condb->inst_left->inst_right))
- return FALSE;
- condb->opcode = get_unsigned_condbranch (condb->opcode);
- /* change the original condbranch to just point to the new unsigned check */
- bb_last->opcode = OP_BR;
- bb_last->inst_target_bb = falset;
- replace_out_block (bb, truet, NULL);
- replace_in_block (truet, bb, NULL);
- return TRUE;
- }
- return FALSE;
-}
-
-/*
- * Optimizes the branches on the Control Flow Graph
- *
- */
-static void
-optimize_branches (MonoCompile *cfg)
-{
- int i, changed = FALSE;
- MonoBasicBlock *bb, *bbn;
- guint32 niterations;
-
- /*
- * Some crazy loops could cause the code below to go into an infinite
- * loop, see bug #53003 for an example. To prevent this, we put an upper
- * bound on the number of iterations.
- */
- if (cfg->num_bblocks > 1000)
- niterations = cfg->num_bblocks * 2;
- else
- niterations = 1000;
-
- do {
- MonoBasicBlock *previous_bb;
- changed = FALSE;
- niterations --;
-
- /* we skip the entry block (exit is handled specially instead ) */
- for (previous_bb = cfg->bb_entry, bb = cfg->bb_entry->next_bb; bb; previous_bb = bb, bb = bb->next_bb) {
- MonoInst *last_ins;
-
- /* dont touch code inside exception clauses */
- if (bb->region != -1)
- continue;
-
- if (!bb->not_useless && remove_block_if_useless (cfg, bb, previous_bb)) {
- changed = TRUE;
- continue;
- }
-
- if ((bbn = bb->next_bb) && bbn->in_count == 0 && bb->region == bbn->region) {
- if (cfg->verbose_level > 2)
- g_print ("nullify block triggered %d\n", bbn->block_num);
-
- bb->next_bb = bbn->next_bb;
-
- for (i = 0; i < bbn->out_count; i++)
- replace_in_block (bbn->out_bb [i], bbn, NULL);
-
- nullify_basic_block (bbn);
- changed = TRUE;
- }
-
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (bb->out_count == 1) {
- bbn = bb->out_bb [0];
-
- /* conditional branches where true and false targets are the same can be also replaced with OP_BR */
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins)) {
- MonoInst *pop;
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_left;
- mono_add_ins_to_end (bb, pop);
- MONO_INST_NEW (cfg, pop, CEE_POP);
- pop->inst_left = last_ins->inst_left->inst_right;
- mono_add_ins_to_end (bb, pop);
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = last_ins->inst_true_bb;
- changed = TRUE;
- if (cfg->verbose_level > 2)
- g_print ("cond branch removal triggered in %d %d\n", bb->block_num, bb->out_count);
- }
-
- if (bb->region == bbn->region && bb->next_bb == bbn) {
- /* the block are in sequence anyway ... */
-
- /* branches to the following block can be removed */
- if (last_ins && last_ins->opcode == OP_BR) {
- last_ins->opcode = OP_NOP;
- changed = TRUE;
- if (cfg->verbose_level > 2)
- g_print ("br removal triggered %d -> %d\n", bb->block_num, bbn->block_num);
- }
-
- if (bbn->in_count == 1) {
-
- if (bbn != cfg->bb_exit) {
- if (cfg->verbose_level > 2)
- g_print ("block merge triggered %d -> %d\n", bb->block_num, bbn->block_num);
- merge_basic_blocks (bb, bbn);
- changed = TRUE;
- continue;
- }
-
- //mono_print_bb_code (bb);
- }
- }
- }
- if ((bbn = bb->next_bb) && bbn->in_count == 0 && bb->region == bbn->region) {
- if (cfg->verbose_level > 2) {
- g_print ("nullify block triggered %d\n", bbn->block_num);
- }
- bb->next_bb = bbn->next_bb;
-
- for (i = 0; i < bbn->out_count; i++)
- replace_in_block (bbn->out_bb [i], bbn, NULL);
-
- nullify_basic_block (bbn);
- changed = TRUE;
- continue;
- }
-
- if (bb->out_count == 1) {
- bbn = bb->out_bb [0];
-
- if (last_ins && last_ins->opcode == OP_BR) {
- MonoInst *bbn_code;
-
- bbn = last_ins->inst_target_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code &&
- bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
- if (cfg->verbose_level > 2)
- g_print ("in %s branch to branch triggered %d -> %d -> %d\n", cfg->method->name,
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num);
-
- replace_in_block (bbn, bb, NULL);
- replace_out_block (bb, bbn, bbn_code->inst_target_bb);
- link_bblock (cfg, bb, bbn_code->inst_target_bb);
- last_ins->inst_target_bb = bbn_code->inst_target_bb;
- changed = TRUE;
- continue;
- }
- }
- } else if (bb->out_count == 2) {
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- int branch_result = mono_eval_cond_branch (last_ins);
- MonoBasicBlock *taken_branch_target = NULL, *untaken_branch_target = NULL;
- MonoInst *bbn_code;
-
- if (branch_result == BRANCH_TAKEN) {
- taken_branch_target = last_ins->inst_true_bb;
- untaken_branch_target = last_ins->inst_false_bb;
- } else if (branch_result == BRANCH_NOT_TAKEN) {
- taken_branch_target = last_ins->inst_false_bb;
- untaken_branch_target = last_ins->inst_true_bb;
- }
- if (taken_branch_target) {
- /* if mono_eval_cond_branch () is ever taken to handle
- * non-constant values to compare, issue a pop here.
- */
- last_ins->opcode = OP_BR;
- last_ins->inst_target_bb = taken_branch_target;
- mono_unlink_bblock (cfg, bb, untaken_branch_target);
- changed = TRUE;
- continue;
- }
- bbn = last_ins->inst_true_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
- if (cfg->verbose_level > 2)
- g_print ("cbranch1 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
-
- /*
- * Unlink, then relink bblocks to avoid various
- * tricky situations when the two targets of the branch
- * are equal, or will become equal after the change.
- */
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
-
- last_ins->inst_true_bb = bbn_code->inst_target_bb;
-
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
-
- changed = TRUE;
- continue;
- }
-
- bbn = last_ins->inst_false_bb;
- bbn_code = mono_inst_list_first (&bbn->ins_list);
- if (bb->region == bbn->region && bbn_code && bbn_code->opcode == OP_BR &&
- bbn_code->inst_target_bb->region == bb->region) {
- if (cfg->verbose_level > 2)
- g_print ("cbranch2 to branch triggered %d -> (%d) %d (0x%02x)\n",
- bb->block_num, bbn->block_num, bbn_code->inst_target_bb->block_num,
- bbn_code->opcode);
-
- mono_unlink_bblock (cfg, bb, last_ins->inst_true_bb);
- mono_unlink_bblock (cfg, bb, last_ins->inst_false_bb);
-
- last_ins->inst_false_bb = bbn_code->inst_target_bb;
-
- link_bblock (cfg, bb, last_ins->inst_true_bb);
- link_bblock (cfg, bb, last_ins->inst_false_bb);
-
- changed = TRUE;
- continue;
- }
- }
-
- /* detect and optimize to unsigned compares checks like: if (v < 0 || v > limit */
- if (last_ins && last_ins->opcode == CEE_BLT && last_ins->inst_left->inst_right->opcode == OP_ICONST) {
- if (try_unsigned_compare (cfg, bb, last_ins)) {
- /*g_print ("applied in bb %d (->%d) %s\n", bb->block_num, last_ins->inst_target_bb->block_num, mono_method_full_name (cfg->method, TRUE));*/
- changed = TRUE;
- continue;
- }
- }
-
- if (last_ins && MONO_IS_COND_BRANCH_NOFP (last_ins)) {
- if (last_ins->inst_false_bb->out_of_line && (bb->region == last_ins->inst_false_bb->region)) {
- /* Reverse the branch */
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
- bbn = last_ins->inst_false_bb;
- last_ins->inst_false_bb = last_ins->inst_true_bb;
- last_ins->inst_true_bb = bbn;
-
- move_basic_block_to_end (cfg, last_ins->inst_true_bb);
- if (cfg->verbose_level > 2)
- g_print ("cbranch to throw block triggered %d.\n",
- bb->block_num);
- }
- }
- }
- }
- } while (changed && (niterations > 0));
-
-}
-
static void
mono_compile_create_vars (MonoCompile *cfg)
{
sig = mono_method_signature (cfg->method);
if (!MONO_TYPE_IS_VOID (sig->ret)) {
- cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- cfg->ret->opcode = OP_RETARG;
- cfg->ret->inst_vtype = sig->ret;
- cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ if (cfg->new_ir) {
+ cfg->ret = mono_compile_create_var (cfg, sig->ret, OP_ARG);
+ /* Inhibit optimizations */
+ cfg->ret->flags |= MONO_INST_VOLATILE;
+ } else {
+ cfg->ret = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ cfg->ret->opcode = OP_RETARG;
+ cfg->ret->inst_vtype = sig->ret;
+ cfg->ret->klass = mono_class_from_mono_type (sig->ret);
+ }
}
if (cfg->verbose_level > 2)
g_print ("creating vars\n");
for (i = 0; i < sig->param_count; ++i) {
cfg->args [i + sig->hasthis] = mono_compile_create_var (cfg, sig->params [i], OP_ARG);
if (sig->params [i]->byref) {
- cfg->disable_ssa = TRUE;
+ if (!cfg->new_ir) cfg->disable_ssa = TRUE;
+ }
+ }
+
+ if (cfg->new_ir && cfg->verbose_level > 2) {
+ if (cfg->ret) {
+ printf ("\treturn : ");
+ mono_print_ins (cfg->ret);
+ }
+
+ if (sig->hasthis) {
+ printf ("\tthis: ");
+ mono_print_ins (cfg->args [0]);
+ }
+
+ for (i = 0; i < sig->param_count; ++i) {
+ printf ("\targ [%d]: ", i);
+ mono_print_ins (cfg->args [i + sig->hasthis]);
}
}
cfg->locals_start = cfg->num_varinfo;
+ cfg->locals = mono_mempool_alloc0 (cfg->mempool, header->num_locals * sizeof (MonoInst*));
if (cfg->verbose_level > 2)
g_print ("creating locals\n");
for (i = 0; i < header->num_locals; ++i)
- mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+ cfg->locals [i] = mono_compile_create_var (cfg, header->locals [i], OP_LOCAL);
+
if (cfg->verbose_level > 2)
g_print ("locals done\n");
}
void
-mono_print_code (MonoCompile *cfg)
+mono_print_code (MonoCompile *cfg, const char* msg)
{
MonoBasicBlock *bb;
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
+ MonoInst *tree = bb->code;
- if (!MONO_INST_LIST_EMPTY (&bb->ins_list))
- g_print ("CODE BLOCK %d (nesting %d):\n",
- bb->block_num, bb->nesting);
+ if (cfg->new_ir) {
+ mono_print_bb (bb, msg);
+ } else {
+ if (!tree)
+ continue;
+
+ g_print ("%s CODE BLOCK %d (nesting %d):\n", msg, bb->block_num, bb->nesting);
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
}
cfg->rs = mono_regstate_new ();
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins = mono_inst_list_last (&bb->ins_list);
-
- if (last_ins && MONO_IS_COND_BRANCH_OP (last_ins) &&
- bb->next_bb != last_ins->inst_false_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
/* we are careful when inverting, since bugs like #59580
* could show up when dealing with NaNs.
*/
- if (MONO_IS_COND_BRANCH_NOFP(last_ins) && bb->next_bb == last_ins->inst_true_bb) {
- MonoBasicBlock *tmp = last_ins->inst_true_bb;
- last_ins->inst_true_bb = last_ins->inst_false_bb;
- last_ins->inst_false_bb = tmp;
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
- last_ins->opcode = reverse_branch_op (last_ins->opcode);
+ bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
} else {
- MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
- inst->opcode = OP_BR;
- inst->inst_target_bb = last_ins->inst_false_bb;
- mono_bblock_add_inst (bb, inst);
+ MonoInst *ins;
+
+ MONO_INST_NEW (cfg, ins, OP_BR);
+ ins->inst_target_bb = bb->last_ins->inst_false_bb;
+ MONO_ADD_INS (bb, ins);
}
}
}
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 4) {
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree;
- g_print ("DUMP BLOCK %d:\n", bb->block_num);
-
- MONO_BB_FOR_EACH_INS (bb, tree) {
- mono_print_tree (tree);
- g_print ("\n");
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ MONO_BB_FOR_EACH_INS (bb, tree) {
+ mono_print_tree (tree);
+ g_print ("\n");
+ }
}
}
- }
#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *tree, *n;
- MonoInstList head;
+ MonoInst *tree = bb->code, *next;
MBState *mbstate;
- MONO_INST_LIST_INIT (&head);
- if (MONO_INST_LIST_EMPTY (&bb->ins_list))
+ if (!tree)
continue;
- MONO_INST_LIST_SPLICE_INIT (&bb->ins_list, &head);
+ bb->code = NULL;
+ bb->last_ins = NULL;
cfg->cbb = bb;
mono_regstate_reset (cfg->rs);
if (cfg->verbose_level >= 3)
g_print ("LABEL BLOCK %d:\n", bb->block_num);
#endif
- MONO_INST_LIST_FOR_EACH_ENTRY_SAFE (tree, n, &head, node) {
+ for (; tree; tree = next) {
+ next = tree->next;
#ifdef DEBUG_SELECTION
if (cfg->verbose_level >= 3) {
mono_print_tree (tree);
}
bb->max_vreg = cfg->rs->next_vreg;
+ if (bb->last_ins)
+ bb->last_ins->next = NULL;
+
mono_mempool_empty (cfg->state_pool);
}
mono_mempool_destroy (cfg->state_pool);
/* we reuse dfn here */
/* bb->dfn = bb_count++; */
#ifdef MONO_ARCH_ENABLE_NORMALIZE_OPCODES
- mono_normalize_opcodes (cfg, bb);
+ if (!cfg->new_ir)
+ mono_normalize_opcodes (cfg, bb);
#endif
mono_arch_lowering_pass (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_1 (cfg, bb);
- mono_local_regalloc (cfg, bb);
+ if (!cfg->globalra)
+ mono_local_regalloc (cfg, bb);
if (cfg->opt & MONO_OPT_PEEPHOLE)
mono_arch_peephole_pass_2 (cfg, bb);
/* emit code all basic blocks */
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
bb->native_offset = cfg->code_len;
- mono_arch_output_basic_block (cfg, bb);
+ //if ((bb == cfg->bb_entry) || !(bb->region == -1 && !bb->dfn))
+ mono_arch_output_basic_block (cfg, bb);
if (bb == cfg->bb_exit) {
cfg->epilog_begin = cfg->code_len;
/* fixme: align to MONO_ARCH_CODE_ALIGNMENT */
if (cfg->method->dynamic) {
+ guint unwindlen = 0;
+#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+#endif
/* Allocate the code into a separate memory pool so it can be freed */
cfg->dynamic_info = g_new0 (MonoJitDynamicMethodInfo, 1);
cfg->dynamic_info->code_mp = mono_code_manager_new_dynamic ();
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size);
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
} else {
+ guint unwindlen = 0;
+#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
+ unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
+#endif
mono_domain_lock (cfg->domain);
- code = mono_code_manager_reserve (cfg->domain->code_mp, cfg->code_size);
+ code = mono_code_manager_reserve (cfg->domain->code_mp, cfg->code_size + unwindlen);
mono_domain_unlock (cfg->domain);
}
switch (patch_info->type) {
case MONO_PATCH_INFO_ABS: {
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (patch_info->data.target);
+
+ /*
+ * Change patches of type MONO_PATCH_INFO_ABS into patches describing the
+ * absolute address.
+ */
if (info) {
//printf ("TEST %s %p\n", info->name, patch_info->data.target);
+ // FIXME: CLEAN UP THIS MESS.
if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
- strstr (cfg->method->name, info->name))
+ strstr (cfg->method->name, info->name)) {
/*
* This is an icall wrapper, and this is a call to the
* wrapped function.
*/
- ;
- else {
+ if (cfg->compile_aot) {
+ patch_info->type = MONO_PATCH_INFO_JIT_ICALL_ADDR;
+ patch_info->data.name = info->name;
+ }
+ } else {
/* for these array methods we currently register the same function pointer
* since it's a vararg function. But this means that mono_find_jit_icall_by_addr ()
* will return the incorrect one depending on the order they are registered.
}
}
}
- else {
+
+ if (patch_info->type == MONO_PATCH_INFO_ABS && !cfg->new_ir) {
MonoVTable *vtable = mono_find_class_init_trampoline_by_addr (patch_info->data.target);
if (vtable) {
patch_info->type = MONO_PATCH_INFO_CLASS_INIT;
patch_info->data.klass = vtable->klass;
- } else {
- MonoClass *klass = mono_find_delegate_trampoline_by_addr (patch_info->data.target);
- if (klass) {
- patch_info->type = MONO_PATCH_INFO_DELEGATE_TRAMPOLINE;
- patch_info->data.klass = klass;
+ }
+ }
+
+ if (patch_info->type == MONO_PATCH_INFO_ABS) {
+ MonoClass *klass = mono_find_delegate_trampoline_by_addr (patch_info->data.target);
+ if (klass) {
+ patch_info->type = MONO_PATCH_INFO_DELEGATE_TRAMPOLINE;
+ patch_info->data.klass = klass;
+ }
+ }
+
+ if (patch_info->type == MONO_PATCH_INFO_ABS) {
+ if (cfg->abs_patches) {
+ MonoJumpInfo *abs_ji = g_hash_table_lookup (cfg->abs_patches, patch_info->data.target);
+ if (abs_ji) {
+ patch_info->type = abs_ji->type;
+ patch_info->data.target = abs_ji->data.target;
}
}
}
+
break;
}
case MONO_PATCH_INFO_SWITCH: {
mono_domain_unlock (cfg->domain);
}
- if (!cfg->compile_aot)
+ if (!cfg->compile_aot && !cfg->new_ir)
/* In the aot case, the patch already points to the correct location */
patch_info->ip.i = patch_info->ip.label->inst_c0;
for (i = 0; i < patch_info->data.table->table_size; i++) {
- table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ /* Might be NULL if the switch is eliminated */
+ if (patch_info->data.table->table [i]) {
+ g_assert (patch_info->data.table->table [i]->native_offset);
+ table [i] = GINT_TO_POINTER (patch_info->data.table->table [i]->native_offset);
+ } else {
+ table [i] = NULL;
+ }
}
patch_info->data.table->table = (MonoBasicBlock**)table;
break;
}
+ case MONO_PATCH_INFO_METHOD_JUMP: {
+ GSList *list;
+ MonoDomain *domain = cfg->domain;
+ unsigned char *ip = cfg->native_code + patch_info->ip.i;
+
+ mono_domain_lock (domain);
+ if (!domain->jump_target_hash)
+ domain->jump_target_hash = g_hash_table_new (NULL, NULL);
+ list = g_hash_table_lookup (domain->jump_target_hash, patch_info->data.method);
+ list = g_slist_prepend (list, ip);
+ g_hash_table_insert (domain->jump_target_hash, patch_info->data.method, list);
+ mono_domain_unlock (domain);
+ break;
+ }
default:
/* do nothing */
break;
g_free (nm);
}
-#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
- mono_arch_save_unwind_info (cfg);
-#endif
-
- mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
-
- if (cfg->method->dynamic) {
- mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
- } else {
- mono_domain_lock (cfg->domain);
- mono_code_manager_commit (cfg->domain->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
- mono_domain_unlock (cfg->domain);
- }
-
- mono_arch_flush_icache (cfg->native_code, cfg->code_len);
-
- mono_debug_close_method (cfg);
-}
-
-
-
-static void
-remove_critical_edges (MonoCompile *cfg) {
- MonoBasicBlock *bb;
- MonoBasicBlock *previous_bb;
-
- if (cfg->verbose_level > 3) {
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
- int i;
- printf ("remove_critical_edges %s, BEFORE BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
- for (i = 0; i < bb->in_count; i++) {
- printf (" %d", bb->in_bb [i]->block_num);
- }
- printf (") (out:");
- for (i = 0; i < bb->out_count; i++) {
- printf (" %d", bb->out_bb [i]->block_num);
- }
- printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
- printf (" ");
- mono_print_tree (last_ins);
- }
- printf ("\n");
- }
- }
-
- for (previous_bb = cfg->bb_entry, bb = previous_bb->next_bb; bb != NULL; previous_bb = previous_bb->next_bb, bb = bb->next_bb) {
- if (bb->in_count > 1) {
- int in_bb_index;
- for (in_bb_index = 0; in_bb_index < bb->in_count; in_bb_index++) {
- MonoBasicBlock *in_bb = bb->in_bb [in_bb_index];
- if (in_bb->out_count > 1) {
- MonoBasicBlock *new_bb = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb->ins_list);
- new_bb->block_num = cfg->num_bblocks++;
-// new_bb->real_offset = bb->real_offset;
- new_bb->region = bb->region;
-
- /* Do not alter the CFG while altering the BB list */
- if (previous_bb->region == bb->region) {
- if (previous_bb != cfg->bb_entry) {
- MonoInst *last_ins;
- /* If previous_bb "followed through" to bb, */
- /* keep it linked with a OP_BR */
- last_ins = mono_inst_list_last (&previous_bb->ins_list);
- if ((last_ins == NULL) ||
- ((last_ins->opcode != OP_BR) &&
- (!(MONO_IS_COND_BRANCH_OP (last_ins))) &&
- (last_ins->opcode != OP_SWITCH))) {
- int i;
- /* Make sure previous_bb really falls through bb */
- for (i = 0; i < previous_bb->out_count; i++) {
- if (previous_bb->out_bb [i] == bb) {
- MonoInst *jump;
- MONO_INST_NEW (cfg, jump, OP_BR);
- MONO_ADD_INS (previous_bb, jump);
- jump->cil_code = previous_bb->cil_code;
- jump->inst_target_bb = bb;
- break;
- }
- }
- }
- } else {
- /* We cannot add any inst to the entry BB, so we must */
- /* put a new BB in the middle to hold the OP_BR */
- MonoInst *jump;
- MonoBasicBlock *new_bb_after_entry = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoBasicBlock));
- MONO_INST_LIST_INIT (&new_bb_after_entry->ins_list);
- new_bb_after_entry->block_num = cfg->num_bblocks++;
-// new_bb_after_entry->real_offset = bb->real_offset;
- new_bb_after_entry->region = bb->region;
-
- MONO_INST_NEW (cfg, jump, OP_BR);
- MONO_ADD_INS (new_bb_after_entry, jump);
- jump->cil_code = bb->cil_code;
- jump->inst_target_bb = bb;
-
- previous_bb->next_bb = new_bb_after_entry;
- previous_bb = new_bb_after_entry;
-
- if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, added helper BB%d jumping to BB%d\n", mono_method_full_name (cfg->method, TRUE), new_bb_after_entry->block_num, bb->block_num);
- }
- }
- }
-
- /* Insert new_bb in the BB list */
- previous_bb->next_bb = new_bb;
- new_bb->next_bb = bb;
- previous_bb = new_bb;
-
- /* Setup in_bb and out_bb */
- new_bb->in_bb = mono_mempool_alloc ((cfg)->mempool, sizeof (MonoBasicBlock*));
- new_bb->in_bb [0] = in_bb;
- new_bb->in_count = 1;
- new_bb->out_bb = mono_mempool_alloc ((cfg)->mempool, sizeof (MonoBasicBlock*));
- new_bb->out_bb [0] = bb;
- new_bb->out_count = 1;
-
- /* Relink in_bb and bb to (from) new_bb */
- replace_out_block (in_bb, bb, new_bb);
- replace_out_block_in_code (in_bb, bb, new_bb);
- replace_in_block (bb, in_bb, new_bb);
-
- if (cfg->verbose_level > 2) {
- printf ("remove_critical_edges %s, removed critical edge from BB%d to BB%d (added BB%d)\n", mono_method_full_name (cfg->method, TRUE), in_bb->block_num, bb->block_num, new_bb->block_num);
- }
- }
- }
+ {
+ gboolean is_generic = FALSE;
+
+ if (cfg->method->is_inflated || mono_method_get_generic_container (cfg->method) ||
+ cfg->method->klass->generic_container || cfg->method->klass->generic_class) {
+ is_generic = TRUE;
}
+
+ if (cfg->generic_sharing_context)
+ g_assert (is_generic);
}
+
+#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
+ mono_arch_save_unwind_info (cfg);
+#endif
- if (cfg->verbose_level > 3) {
- for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *last_ins;
- int i;
- printf ("remove_critical_edges %s, AFTER BB%d (in:", mono_method_full_name (cfg->method, TRUE), bb->block_num);
- for (i = 0; i < bb->in_count; i++) {
- printf (" %d", bb->in_bb [i]->block_num);
- }
- printf (") (out:");
- for (i = 0; i < bb->out_count; i++) {
- printf (" %d", bb->out_bb [i]->block_num);
- }
- printf (")");
- last_ins = mono_inst_list_last (&bb->ins_list);
- if (last_ins) {
- printf (" ");
- mono_print_tree (last_ins);
- }
- printf ("\n");
- }
+ mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
+
+ if (cfg->method->dynamic) {
+ mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
+ } else {
+ mono_domain_lock (cfg->domain);
+ mono_code_manager_commit (cfg->domain->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
+ mono_domain_unlock (cfg->domain);
}
+
+ mono_arch_flush_icache (cfg->native_code, cfg->code_len);
+
+ mono_debug_close_method (cfg);
+#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
+ mono_arch_unwindinfo_install_unwind_info (&cfg->arch.unwindinfo, cfg->native_code, cfg->code_len);
+#endif
+}
+
+static MonoGenericInst*
+get_object_generic_inst (int type_argc)
+{
+ MonoType **type_argv;
+ int i;
+
+ type_argv = alloca (sizeof (MonoType*) * type_argc);
+
+ for (i = 0; i < type_argc; ++i)
+ type_argv [i] = &mono_defaults.object_class->byval_arg;
+
+ return mono_metadata_get_generic_inst (type_argc, type_argv);
}
/*
guint8 *ip;
MonoCompile *cfg;
MonoJitInfo *jinfo;
- int dfn = 0, i, code_size_ratio;
+ int dfn, i, code_size_ratio;
gboolean deadce_has_run = FALSE;
gboolean try_generic_shared;
- MonoMethod *method_to_compile;
+ MonoMethod *method_to_compile, *method_to_register;
int generic_info_size;
mono_jit_stats.methods_compiled++;
(opts & MONO_OPT_GSHARED) && (method->is_generic || method->klass->generic_container);
else
try_generic_shared = mono_class_generic_sharing_enabled (method->klass) &&
- (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method);
+ (opts & MONO_OPT_GSHARED) && mono_method_is_generic_sharable_impl (method, FALSE);
if (opts & MONO_OPT_GSHARED) {
if (try_generic_shared)
declaring_method = method;
} else {
declaring_method = mono_method_get_declaring_generic_method (method);
- g_assert (method->klass->generic_class->container_class == declaring_method->klass);
+ if (method->klass->generic_class)
+ g_assert (method->klass->generic_class->container_class == declaring_method->klass);
+ else
+ g_assert (method->klass == declaring_method->klass);
}
if (declaring_method->is_generic)
cfg->verbose_level = mini_verbose;
cfg->compile_aot = compile_aot;
cfg->skip_visibility = method->skip_visibility;
+ cfg->orig_method = method;
if (try_generic_shared)
cfg->generic_sharing_context = (MonoGenericSharingContext*)&cfg->generic_sharing_context;
cfg->token_info_hash = g_hash_table_new (NULL, NULL);
+ if (cfg->compile_aot && !try_generic_shared && (method->is_generic || method->klass->generic_container)) {
+ cfg->exception_type = MONO_EXCEPTION_GENERIC_SHARING_FAILED;
+ return cfg;
+ }
+
/* The debugger has no liveness information, so avoid sharing registers/stack slots */
if (mono_debug_using_mono_debugger () || debug_options.mdb_optimizations) {
cfg->disable_reuse_registers = TRUE;
cfg->opt &= ~MONO_OPT_INLINE;
cfg->opt &= ~MONO_OPT_COPYPROP;
cfg->opt &= ~MONO_OPT_CONSPROP;
+ cfg->opt &= ~MONO_OPT_GSHARED;
}
header = mono_method_get_header (method_to_compile);
return cfg;
}
+ if (getenv ("MONO_VERBOSE_METHOD")) {
+ if (strcmp (cfg->method->name, getenv ("MONO_VERBOSE_METHOD")) == 0)
+ cfg->verbose_level = 4;
+ }
+
ip = (guint8 *)header->code;
+ cfg->intvars = mono_mempool_alloc0 (cfg->mempool, sizeof (guint16) * STACK_MAX * header->max_stack);
+
if (cfg->verbose_level > 2) {
if (cfg->generic_sharing_context)
g_print ("converting shared method %s\n", mono_method_full_name (method, TRUE));
g_print ("converting method %s\n", mono_method_full_name (method, TRUE));
}
+ if (cfg->opt & (MONO_OPT_ABCREM | MONO_OPT_SSAPRE))
+ cfg->opt |= MONO_OPT_SSA;
+
+ {
+ static int count = 0;
+
+ count ++;
+
+ if (getenv ("MONO_COUNT")) {
+ if (count == atoi (getenv ("MONO_COUNT"))) {
+ printf ("LAST: %s\n", mono_method_full_name (method, TRUE));
+ //cfg->verbose_level = 5;
+ }
+ if (count <= atoi (getenv ("MONO_COUNT")))
+ cfg->new_ir = TRUE;
+
+ /*
+ * Passing/returning vtypes in registers in managed methods is an ABI change
+ * from the old JIT.
+ */
+ disable_vtypes_in_regs = TRUE;
+ }
+ else
+ cfg->new_ir = TRUE;
+ }
+
+ /*
+ if ((cfg->method->klass->image != mono_defaults.corlib) || (strstr (cfg->method->klass->name, "StackOverflowException") && strstr (cfg->method->name, ".ctor")) || (strstr (cfg->method->klass->name, "OutOfMemoryException") && strstr (cfg->method->name, ".ctor")))
+ cfg->globalra = TRUE;
+ */
+
+ //cfg->globalra = TRUE;
+
+ //if (!strcmp (cfg->method->klass->name, "Tests") && !cfg->method->wrapper_type)
+ // cfg->globalra = TRUE;
+
+ {
+ static int count = 0;
+ count ++;
+
+ if (getenv ("COUNT2")) {
+ if (count == atoi (getenv ("COUNT2")))
+ printf ("LAST: %s\n", mono_method_full_name (cfg->method, TRUE));
+ if (count > atoi (getenv ("COUNT2")))
+ cfg->globalra = FALSE;
+ }
+ }
+
+ if (header->clauses)
+ cfg->globalra = FALSE;
+
+ if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED)
+ /* The code in the prolog clobbers caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME: Disable globalra in case of tracing/profiling
+
+ if (cfg->method->save_lmf)
+ /* The LMF saving code might clobber caller saved registers */
+ cfg->globalra = FALSE;
+
+ // FIXME:
+ if (!strcmp (cfg->method->name, "CompareInternal"))
+ cfg->globalra = FALSE;
+
+ /*
+ if (strstr (cfg->method->name, "LoadData"))
+ cfg->new_ir = FALSE;
+ */
+
+ if (cfg->new_ir) {
+ cfg->rs = mono_regstate_new ();
+ cfg->next_vreg = cfg->rs->next_vreg;
+ }
+
+ /* FIXME: Fix SSA to handle branches inside bblocks */
+ if (cfg->opt & MONO_OPT_SSA)
+ cfg->enable_extended_bblocks = FALSE;
+
+ /*
+ * FIXME: This confuses liveness analysis because variables which are assigned after
+ * a branch inside a bblock become part of the kill set, even though the assignment
+ * might not get executed. This causes the optimize_initlocals pass to delete some
+ * assignments which are needed.
+ * Also, the mono_if_conversion pass needs to be modified to recognize the code
+ * created by this.
+ */
+ //cfg->enable_extended_bblocks = TRUE;
+
/*
* create MonoInst* which represents arguments and local variables
*/
mono_compile_create_vars (cfg);
- if ((i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE)) < 0) {
+ if (cfg->new_ir) {
+ /* SSAPRE is not supported on linear IR */
+ cfg->opt &= ~MONO_OPT_SSAPRE;
+
+ i = mono_method_to_ir2 (cfg, method_to_compile, NULL, NULL, NULL, NULL, NULL, 0, FALSE);
+ }
+ else {
+ i = mono_method_to_ir (cfg, method_to_compile, NULL, NULL, cfg->locals_start, NULL, NULL, NULL, 0, FALSE);
+ }
+
+ if (i < 0) {
if (try_generic_shared && cfg->exception_type == MONO_EXCEPTION_GENERIC_SHARING_FAILED) {
if (compile_aot) {
if (MONO_PROBE_METHOD_COMPILE_END_ENABLED ())
mono_jit_stats.basic_blocks += cfg->num_bblocks;
mono_jit_stats.max_basic_blocks = MAX (cfg->num_bblocks, mono_jit_stats.max_basic_blocks);
- if ((cfg->num_varinfo > 2000) && !cfg->compile_aot) {
- /*
- * we disable some optimizations if there are too many variables
- * because JIT time may become too expensive. The actual number needs
- * to be tweaked and eventually the non-linear algorithms should be fixed.
- */
- cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
- cfg->disable_ssa = TRUE;
- }
-
/*g_print ("numblocks = %d\n", cfg->num_bblocks);*/
+ if (cfg->new_ir) {
+ mono_decompose_long_opts (cfg);
+
+ /* Should be done before branch opts */
+ if (cfg->opt & (MONO_OPT_CONSPROP | MONO_OPT_COPYPROP))
+ mono_local_cprop2 (cfg);
+ }
+
if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ mono_optimize_branches (cfg);
- if (cfg->opt & MONO_OPT_SSAPRE) {
- remove_critical_edges (cfg);
+ if (cfg->new_ir) {
+ /* This must be done _before_ global reg alloc and _after_ decompose */
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ mono_if_conversion (cfg);
}
+ if ((cfg->opt & MONO_OPT_SSAPRE) || cfg->globalra)
+ mono_remove_critical_edges (cfg);
+
/* Depth-first ordering on basic blocks */
cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+ dfn = 0;
df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
if (cfg->num_bblocks != dfn + 1) {
MonoBasicBlock *bb;
cfg->num_bblocks = dfn + 1;
- if (!header->clauses) {
- /* remove unreachable code, because the code in them may be
- * inconsistent (access to dead variables for example) */
- for (bb = cfg->bb_entry; bb;) {
- MonoBasicBlock *bbn = bb->next_bb;
-
- if (bbn && bbn->region == -1 && !bbn->dfn) {
- if (cfg->verbose_level > 1)
- g_print ("found unreachable code in BB%d\n", bbn->block_num);
- bb->next_bb = bbn->next_bb;
- nullify_basic_block (bbn);
- } else {
- bb = bb->next_bb;
- }
+ /* remove unreachable code, because the code in them may be
+ * inconsistent (access to dead variables for example) */
+ for (bb = cfg->bb_entry; bb;) {
+ MonoBasicBlock *bbn = bb->next_bb;
+
+ /*
+ * FIXME: Can't use the second case in methods with clauses, since the
+ * bblocks inside the clauses are not processed during dfn computation.
+ */
+ if ((header->clauses && (bbn && bbn->region == -1 && bbn->in_count == 0)) ||
+ (!header->clauses && (bbn && bbn->region == -1 && !bbn->dfn))) {
+ if (cfg->verbose_level > 1)
+ g_print ("found unreachable code in BB%d\n", bbn->block_num);
+ /* There may exist unreachable branches to this bb */
+ bb->next_bb = bbn->next_bb;
+ mono_nullify_basic_block (bbn);
+ } else {
+ bb = bb->next_bb;
}
}
}
+ if (((cfg->num_varinfo > 2000) || (cfg->num_bblocks > 1000)) && !cfg->compile_aot) {
+ /*
+ * we disable some optimizations if there are too many variables
+ * because JIT time may become too expensive. The actual number needs
+ * to be tweaked and eventually the non-linear algorithms should be fixed.
+ */
+ cfg->opt &= ~ (MONO_OPT_LINEARS | MONO_OPT_COPYPROP | MONO_OPT_CONSPROP);
+ cfg->disable_ssa = TRUE;
+ }
+
if (cfg->opt & MONO_OPT_LOOP) {
mono_compile_dominator_info (cfg, MONO_COMP_DOM | MONO_COMP_IDOM);
mono_compute_natural_loops (cfg);
if (!header->num_clauses && !cfg->disable_ssa) {
mono_local_cprop (cfg);
+
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
}
#else
-
- /* fixme: add all optimizations which requires SSA */
- if (cfg->opt & (MONO_OPT_SSA | MONO_OPT_ABCREM | MONO_OPT_SSAPRE)) {
+ if (cfg->opt & MONO_OPT_SSA) {
if (!(cfg->comp_done & MONO_COMP_SSA) && !header->num_clauses && !cfg->disable_ssa) {
- mono_local_cprop (cfg);
#ifndef DISABLE_SSA
- mono_ssa_compute (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_compute2 (cfg);
+ else
+ mono_ssa_compute (cfg);
#endif
if (cfg->verbose_level >= 2) {
if ((cfg->opt & MONO_OPT_CONSPROP) || (cfg->opt & MONO_OPT_COPYPROP)) {
if (cfg->comp_done & MONO_COMP_SSA) {
#ifndef DISABLE_SSA
- mono_ssa_cprop (cfg);
+ if (cfg->new_ir)
+ mono_ssa_cprop2 (cfg);
+ else
+ mono_ssa_cprop (cfg);
#endif
} else {
- mono_local_cprop (cfg);
+ if (!cfg->new_ir)
+ mono_local_cprop (cfg);
}
}
#ifndef DISABLE_SSA
if (cfg->comp_done & MONO_COMP_SSA) {
- //mono_ssa_deadce (cfg);
-
//mono_ssa_strength_reduction (cfg);
if (cfg->opt & MONO_OPT_SSAPRE) {
mono_perform_ssapre (cfg);
//mono_local_cprop (cfg);
}
-
+
if (cfg->opt & MONO_OPT_DEADCE) {
- mono_ssa_deadce (cfg);
+ if (cfg->new_ir)
+ mono_ssa_deadce2 (cfg);
+ else
+ mono_ssa_deadce (cfg);
deadce_has_run = TRUE;
}
-
- if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
- mono_perform_abc_removal (cfg);
-
- mono_ssa_remove (cfg);
- if (cfg->opt & MONO_OPT_BRANCH)
- optimize_branches (cfg);
+ if (cfg->new_ir) {
+ if ((cfg->flags & (MONO_CFG_HAS_LDELEMA|MONO_CFG_HAS_CHECK_THIS)) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal2 (cfg);
+ } else {
+ if ((cfg->flags & MONO_CFG_HAS_LDELEMA) && (cfg->opt & MONO_OPT_ABCREM))
+ mono_perform_abc_removal (cfg);
+ }
+
+ if (cfg->new_ir) {
+ mono_ssa_remove2 (cfg);
+ mono_local_cprop2 (cfg);
+ mono_handle_global_vregs (cfg);
+ mono_local_deadce (cfg);
+ }
+ else
+ mono_ssa_remove (cfg);
+
+ if (cfg->opt & MONO_OPT_BRANCH) {
+ MonoBasicBlock *bb;
+
+ mono_optimize_branches (cfg);
+
+ /* Have to recompute cfg->bblocks and bb->dfn */
+ if (cfg->globalra) {
+ mono_remove_critical_edges (cfg);
+
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ bb->dfn = 0;
+
+ /* Depth-first ordering on basic blocks */
+ cfg->bblocks = mono_mempool_alloc (cfg->mempool, sizeof (MonoBasicBlock*) * (cfg->num_bblocks + 1));
+
+ dfn = 0;
+ df_visit (cfg->bb_entry, &dfn, cfg->bblocks);
+ cfg->num_bblocks = dfn + 1;
+ }
+ }
}
#endif
return cfg;
}
- if (cfg->verbose_level > 4) {
- printf ("BEFORE DECOMPSE START\n");
- mono_print_code (cfg);
- printf ("BEFORE DECOMPSE END\n");
+ if (cfg->new_ir) {
+#ifdef MONO_ARCH_SOFT_FLOAT
+ mono_handle_soft_float (cfg);
+#endif
+ mono_decompose_vtype_opts (cfg);
+ if (cfg->flags & MONO_CFG_HAS_ARRAY_ACCESS)
+ mono_decompose_array_access_opts (cfg);
+ }
+
+ if (!cfg->new_ir) {
+ if (cfg->verbose_level > 4)
+ mono_print_code (cfg, "BEFORE DECOMPOSE");
+
+ decompose_pass (cfg);
}
-
- decompose_pass (cfg);
if (cfg->got_var) {
GList *regs;
*/
mono_liveness_handle_exception_clauses (cfg);
- if (cfg->opt & MONO_OPT_LINEARS) {
+ if (cfg->globalra) {
+ MonoBasicBlock *bb;
+
+ /* Have to do this before regalloc since it can create vregs */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb)
+ mono_arch_lowering_pass (cfg, bb);
+
+ mono_global_regalloc (cfg);
+ }
+
+ if ((cfg->opt & MONO_OPT_LINEARS) && !cfg->globalra) {
GList *vars, *regs;
/* For now, compute aliasing info only if needed for deadce... */
- if ((cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
+ if (!cfg->new_ir && (cfg->opt & MONO_OPT_DEADCE) && (! deadce_has_run) && (header->num_clauses == 0)) {
cfg->aliasing_info = mono_build_aliasing_information (cfg);
}
//print_dfn (cfg);
/* variables are allocated after decompose, since decompose could create temps */
- mono_arch_allocate_vars (cfg);
+ if (!cfg->globalra)
+ mono_arch_allocate_vars (cfg);
- if (cfg->opt & MONO_OPT_CFOLD)
+ if (!cfg->new_ir && cfg->opt & MONO_OPT_CFOLD)
mono_constant_fold (cfg);
- mini_select_instructions (cfg);
+ if (cfg->new_ir) {
+ MonoBasicBlock *bb;
+ gboolean need_local_opts;
+
+ if (!cfg->globalra) {
+ mono_spill_global_vars (cfg, &need_local_opts);
+
+ if (need_local_opts || cfg->compile_aot) {
+ /* To optimize code created by spill_global_vars */
+ mono_local_cprop2 (cfg);
+ mono_local_deadce (cfg);
+ }
+ }
+
+ /* Add branches between non-consecutive bblocks */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ if (bb->last_ins && MONO_IS_COND_BRANCH_OP (bb->last_ins) &&
+ bb->last_ins->inst_false_bb && bb->next_bb != bb->last_ins->inst_false_bb) {
+ /* we are careful when inverting, since bugs like #59580
+ * could show up when dealing with NaNs.
+ */
+ if (MONO_IS_COND_BRANCH_NOFP(bb->last_ins) && bb->next_bb == bb->last_ins->inst_true_bb) {
+ MonoBasicBlock *tmp = bb->last_ins->inst_true_bb;
+ bb->last_ins->inst_true_bb = bb->last_ins->inst_false_bb;
+ bb->last_ins->inst_false_bb = tmp;
+
+ bb->last_ins->opcode = mono_reverse_branch_op (bb->last_ins->opcode);
+ } else {
+ MonoInst *inst = mono_mempool_alloc0 (cfg->mempool, sizeof (MonoInst));
+ inst->opcode = OP_BR;
+ inst->inst_target_bb = bb->last_ins->inst_false_bb;
+ mono_bblock_add_inst (bb, inst);
+ }
+ }
+ }
+
+ if (cfg->verbose_level >= 4 && !cfg->globalra) {
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ MonoInst *tree = bb->code;
+ g_print ("DUMP BLOCK %d:\n", bb->block_num);
+ if (!tree)
+ continue;
+ for (; tree; tree = tree->next) {
+ mono_print_ins_index (-1, tree);
+ }
+ }
+ }
+
+ /* FIXME: */
+ for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
+ bb->max_vreg = cfg->next_vreg;
+ }
+ }
+ else
+ mini_select_instructions (cfg);
mono_codegen (cfg);
if (cfg->verbose_level >= 2) {
mono_domain_unlock (cfg->domain);
}
- jinfo->method = method;
+ if (cfg->generic_sharing_context) {
+ MonoGenericContext object_context;
+
+ g_assert (!method_to_compile->klass->generic_class);
+ if (method_to_compile->klass->generic_container) {
+ int type_argc = method_to_compile->klass->generic_container->type_argc;
+
+ object_context.class_inst = get_object_generic_inst (type_argc);
+ } else {
+ object_context.class_inst = NULL;
+ }
+
+ if (mini_method_get_context (method_to_compile)->method_inst) {
+ int type_argc = mini_method_get_context (method_to_compile)->method_inst->type_argc;
+
+ object_context.method_inst = get_object_generic_inst (type_argc);
+ } else {
+ object_context.method_inst = NULL;
+ }
+
+ g_assert (object_context.class_inst || object_context.method_inst);
+
+ method_to_register = mono_class_inflate_generic_method (method_to_compile, &object_context);
+ } else {
+ g_assert (method == method_to_compile);
+ method_to_register = method;
+ }
+
+ jinfo->method = method_to_register;
jinfo->code_start = cfg->native_code;
jinfo->code_size = cfg->code_len;
jinfo->used_regs = cfg->used_int_regs;
jinfo->cas_inited = FALSE; /* initialization delayed at the first stalk walk using this method */
jinfo->num_clauses = header->num_clauses;
- /*
- * Static methods only get a generic JIT info if they use the
- * rgctx variable (which they are forced to if they have any
- * open catch clauses).
- */
- if (cfg->generic_sharing_context &&
- (cfg->rgctx_var || !(method_to_compile->flags & METHOD_ATTRIBUTE_STATIC))) {
+ if (cfg->generic_sharing_context) {
MonoInst *inst;
MonoGenericJitInfo *gi;
gi->generic_sharing_context = cfg->generic_sharing_context;
- if (method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) {
- inst = cfg->rgctx_var;
- g_assert (inst->opcode == OP_REGOFFSET);
- } else {
- inst = cfg->args [0];
- }
+ /*
+ * Non-generic static methods only get a "this" info
+ * if they use the rgctx variable (which they are
+ * forced to if they have any open catch clauses).
+ */
+ if (cfg->rgctx_var ||
+ (!(method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) &&
+ !mini_method_get_context (method_to_compile)->method_inst)) {
+ gi->has_this = 1;
+
+ if ((method_to_compile->flags & METHOD_ATTRIBUTE_STATIC) ||
+ mini_method_get_context (method_to_compile)->method_inst) {
+ inst = cfg->rgctx_var;
+ g_assert (inst->opcode == OP_REGOFFSET);
+ } else {
+ inst = cfg->args [0];
+ }
- if (inst->opcode == OP_REGVAR) {
- gi->this_in_reg = 1;
- gi->this_reg = inst->dreg;
+ if (inst->opcode == OP_REGVAR) {
+ gi->this_in_reg = 1;
+ gi->this_reg = inst->dreg;
- //g_print ("this in reg %d\n", inst->dreg);
- } else {
- g_assert (inst->opcode == OP_REGOFFSET);
+ //g_print ("this in reg %d\n", inst->dreg);
+ } else {
+ g_assert (inst->opcode == OP_REGOFFSET);
#ifdef __i386__
- g_assert (inst->inst_basereg == X86_EBP);
+ g_assert (inst->inst_basereg == X86_EBP);
#elif defined(__x86_64__)
- g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
+ g_assert (inst->inst_basereg == X86_EBP || inst->inst_basereg == X86_ESP);
#endif
- g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
+ g_assert (inst->inst_offset >= G_MININT32 && inst->inst_offset <= G_MAXINT32);
- gi->this_in_reg = 0;
- gi->this_reg = inst->inst_basereg;
- gi->this_offset = inst->inst_offset;
+ gi->this_in_reg = 0;
+ gi->this_reg = inst->inst_basereg;
+ gi->this_offset = inst->inst_offset;
- //g_print ("this at offset %d\n", inst->inst_offset);
+ //g_print ("this at offset %d from reg %d\n", gi->this_offset, gi->this_reg);
+ }
+ } else {
+ gi->has_this = 0;
}
}
mono_arch_fixup_jinfo (cfg);
#endif
- mono_domain_lock (cfg->domain);
- mono_jit_info_table_add (cfg->domain, jinfo);
+ if (!cfg->compile_aot) {
+ mono_domain_lock (cfg->domain);
+ mono_jit_info_table_add (cfg->domain, jinfo);
- if (cfg->method->dynamic)
- mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = jinfo;
- mono_domain_unlock (cfg->domain);
+ if (cfg->method->dynamic)
+ mono_dynamic_code_hash_lookup (cfg->domain, cfg->method)->ji = jinfo;
+ mono_domain_unlock (cfg->domain);
+ }
/* collect statistics */
+ mono_perfcounters->jit_methods++;
+ mono_perfcounters->jit_bytes += header->code_size;
mono_jit_stats.allocated_code_size += cfg->code_len;
code_size_ratio = cfg->code_len;
- if (code_size_ratio > mono_jit_stats.biggest_method_size) {
- mono_jit_stats.biggest_method_size = code_size_ratio;
- mono_jit_stats.biggest_method = method;
+ if (code_size_ratio > mono_jit_stats.biggest_method_size && mono_jit_stats.enabled) {
+ mono_jit_stats.biggest_method_size = code_size_ratio;
+ g_free (mono_jit_stats.biggest_method);
+ mono_jit_stats.biggest_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
}
code_size_ratio = (code_size_ratio * 100) / mono_method_get_header (method)->code_size;
- if (code_size_ratio > mono_jit_stats.max_code_size_ratio) {
+ if (code_size_ratio > mono_jit_stats.max_code_size_ratio && mono_jit_stats.enabled) {
mono_jit_stats.max_code_size_ratio = code_size_ratio;
- mono_jit_stats.max_ratio_method = method;
+ g_free (mono_jit_stats.max_ratio_method);
+ mono_jit_stats.max_ratio_method = g_strdup_printf ("%s::%s)", method->klass->name, method->name);
}
mono_jit_stats.native_code_size += cfg->code_len;
{
MonoMethod *open_method;
- if (!mono_method_is_generic_sharable_impl (method))
+ if (!mono_method_is_generic_sharable_impl (method, FALSE))
return NULL;
open_method = mono_method_get_declaring_generic_method (method);
return mono_domain_lookup_shared_generic (domain, open_method);
}
+/*
+ * LOCKING: Assumes domain->jit_code_hash_lock is held.
+ */
static MonoJitInfo*
-lookup_method (MonoDomain *domain, MonoMethod *method)
+lookup_method_inner (MonoDomain *domain, MonoMethod *method)
{
MonoJitInfo *ji = mono_internal_hash_table_lookup (&domain->jit_code_hash, method);
- if (ji != NULL)
+ if (ji)
return ji;
return lookup_generic_method (domain, method);
}
+static MonoJitInfo*
+lookup_method (MonoDomain *domain, MonoMethod *method)
+{
+ MonoJitInfo *info;
+
+ mono_domain_jit_code_hash_lock (domain);
+ info = lookup_method_inner (domain, method);
+ mono_domain_jit_code_hash_unlock (domain);
+
+ return info;
+}
+
static gpointer
mono_jit_compile_method_inner (MonoMethod *method, MonoDomain *target_domain, int opt)
{
else
mono_lookup_pinvoke_call (method, NULL, NULL);
}
- nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc);
- return mono_get_addr_from_ftnptr (mono_compile_method (nm));
+ nm = mono_marshal_get_native_wrapper (method, check_for_pending_exc, FALSE);
+ return mono_get_addr_from_ftnptr (mono_compile_method (nm));
- //if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
- //mono_debug_add_wrapper (method, nm);
+ //if (mono_debug_format != MONO_DEBUG_FORMAT_NONE)
+ //mono_debug_add_wrapper (method, nm);
} else if ((method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
const char *name = method->name;
MonoMethod *nm;
return NULL;
}
+ if (mono_aot_only)
+ g_error ("Attempting to JIT compile method '%s' while running with --aot-only.\n", mono_method_full_name (method, TRUE));
+
cfg = mini_method_compile (method, opt, target_domain, TRUE, FALSE, 0);
switch (cfg->exception_type) {
- case MONO_EXCEPTION_NONE: break;
+ case MONO_EXCEPTION_NONE:
+ break;
case MONO_EXCEPTION_TYPE_LOAD:
case MONO_EXCEPTION_MISSING_FIELD:
case MONO_EXCEPTION_MISSING_METHOD:
- case MONO_EXCEPTION_FILE_NOT_FOUND: {
+ case MONO_EXCEPTION_FILE_NOT_FOUND:
+ case MONO_EXCEPTION_BAD_IMAGE: {
/* Throw a type load exception if needed */
MonoLoaderError *error = mono_loader_get_last_error ();
MonoException *ex;
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "TypeLoadException", cfg->exception_message);
else if (cfg->exception_type == MONO_EXCEPTION_FILE_NOT_FOUND)
ex = mono_exception_from_name_msg (mono_defaults.corlib, "System", "FileNotFoundException", cfg->exception_message);
+ else if (cfg->exception_type == MONO_EXCEPTION_BAD_IMAGE)
+ ex = mono_get_exception_bad_image_format (cfg->exception_message);
else
g_assert_not_reached ();
}
mono_raise_exception ((MonoException*)exc);
}
+ case MONO_EXCEPTION_OBJECT_SUPPLIED: {
+ MonoException *exp = cfg->exception_ptr;
+ MONO_GC_UNREGISTER_ROOT (cfg->exception_ptr);
+ mono_destroy_compile (cfg);
+ mono_raise_exception (exp);
+ break;
+ }
default:
g_assert_not_reached ();
}
/* Check if some other thread already did the job. In this case, we can
discard the code this thread generated. */
- if ((info = lookup_method (target_domain, method))) {
+ mono_domain_jit_code_hash_lock (target_domain);
+
+ info = lookup_method_inner (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if ((target_domain == mono_domain_get ()) || info->domain_neutral) {
code = info->code_start;
}
if (code == NULL) {
- mono_internal_hash_table_insert (&target_domain->jit_code_hash, method, cfg->jit_info);
+ mono_internal_hash_table_insert (&target_domain->jit_code_hash, cfg->jit_info->method, cfg->jit_info);
+ mono_domain_jit_code_hash_unlock (target_domain);
code = cfg->native_code;
- if (cfg->generic_sharing_context && mono_method_is_generic_sharable_impl (method)) {
+ if (cfg->generic_sharing_context && mono_method_is_generic_sharable_impl (method, FALSE)) {
/* g_print ("inserting method %s.%s.%s\n", method->klass->name_space, method->klass->name, method->name); */
mono_domain_register_shared_generic (target_domain,
mono_method_get_declaring_generic_method (method), cfg->jit_info);
mono_stats.generics_shared_methods++;
}
+ } else {
+ mono_domain_jit_code_hash_unlock (target_domain);
}
mono_destroy_compile (cfg);
else
target_domain = domain;
- mono_domain_lock (target_domain);
-
- if ((info = lookup_method (target_domain, method))) {
+ info = lookup_method (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
MonoVTable *vtable;
- mono_domain_unlock (target_domain);
mono_jit_stats.methods_lookups++;
vtable = mono_class_vtable (domain, method->klass);
mono_runtime_class_init (vtable);
}
}
- mono_domain_unlock (target_domain);
p = mono_create_ftnptr (target_domain, mono_jit_compile_method_inner (method, target_domain, opt));
if (callinfo) {
return mono_jit_compile_method_with_opt (method, default_opt);
}
+#ifdef MONO_ARCH_HAVE_INVALIDATE_METHOD
static void
invalidated_delegate_trampoline (char *desc)
{
"See http://www.go-mono.com/delegate.html for an explanation and ways to fix this.",
desc);
}
+#endif
/*
* mono_jit_free_method:
else
target_domain = domain;
- mono_domain_lock (target_domain);
-
- if ((info = lookup_method (target_domain, method))) {
+ info = lookup_method (target_domain, method);
+ if (info) {
/* We can't use a domain specific method in another domain */
if (! ((domain != target_domain) && !info->domain_neutral)) {
- mono_domain_unlock (target_domain);
mono_jit_stats.methods_lookups++;
return info->code_start;
}
}
- mono_domain_unlock (target_domain);
-
return NULL;
}
return NULL;
}
- if ((method->flags & METHOD_ATTRIBUTE_STATIC) &&
+ if (((method->flags & METHOD_ATTRIBUTE_STATIC) ||
+ (method->is_inflated && mono_method_get_context (method)->method_inst)) &&
mono_class_generic_sharing_enabled (method->klass) &&
- mono_method_is_generic_sharable_impl (method)) {
+ mono_method_is_generic_sharable_impl (method, FALSE)) {
to_compile = mono_marshal_get_static_rgctx_invoke (method);
} else {
to_compile = method;
MonoException *exc = NULL;
#endif
MonoJitInfo *ji;
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
MonoJitTlsData *jit_tls = TlsGetValue (mono_jit_tls_id);
-#endif
+
GET_CONTEXT;
#ifdef MONO_ARCH_USE_SIGACTION
}
#endif
+ /* The thread might no be registered with the runtime */
+ if (!mono_domain_get () || !jit_tls)
+ mono_handle_native_sigsegv (SIGSEGV, ctx);
+
ji = mono_jit_info_table_find (mono_domain_get (), mono_arch_ip_from_context (ctx));
#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
MonoJitInfo res;
MonoContext new_mono_context;
int native_offset;
- ji = mono_arch_find_jit_info (domain, jit_tls, &res, NULL, &mono_context,
+ ji = mono_find_jit_info (domain, jit_tls, &res, NULL, &mono_context,
&new_mono_context, NULL, &lmf, &native_offset, NULL);
while ((ji != NULL) && (current_frame_index <= call_chain_depth)) {
ips [current_frame_index] = MONO_CONTEXT_GET_IP (&new_mono_context);
current_frame_index ++;
mono_context = new_mono_context;
- ji = mono_arch_find_jit_info (domain, jit_tls, &res, NULL, &mono_context,
+ ji = mono_find_jit_info (domain, jit_tls, &res, NULL, &mono_context,
&new_mono_context, NULL, &lmf, &native_offset, NULL);
}
}
{
static gpointer tramp = NULL;
if (!tramp)
- tramp = mono_arch_create_specific_trampoline (MONO_FAKE_IMT_METHOD, MONO_TRAMPOLINE_GENERIC, mono_get_root_domain (), NULL);
+ tramp = mono_create_specific_trampoline (MONO_FAKE_IMT_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
return tramp;
}
#endif
{
static gpointer tramp = NULL;
if (!tramp)
- tramp = mono_arch_create_specific_trampoline (MONO_FAKE_VTABLE_METHOD, MONO_TRAMPOLINE_GENERIC, mono_get_root_domain (), NULL);
+ tramp = mono_create_specific_trampoline (MONO_FAKE_VTABLE_METHOD, MONO_TRAMPOLINE_JIT, mono_get_root_domain (), NULL);
return tramp;
}
#endif
debug_options.break_on_unverified = TRUE;
else if (!strcmp (arg, "no-gdb-backtrace"))
debug_options.no_gdb_backtrace = TRUE;
+ else if (!strcmp (arg, "dont-free-domains"))
+ mono_dont_free_domains = TRUE;
else {
fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg);
- fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace'\n");
+ fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains'\n");
exit (1);
}
}
+
+ g_strfreev (args);
}
MonoDebugOptions *
{
return &debug_options;
}
+
+static void
+mini_create_jit_domain_info (MonoDomain *domain)
+{
+ MonoJitDomainInfo *info = g_new0 (MonoJitDomainInfo, 1);
+
+ domain->runtime_info = info;
+}
+
+static void
+delete_jump_list (gpointer key, gpointer value, gpointer user_data)
+{
+ g_slist_free (value);
+}
+
+static void
+mini_free_jit_domain_info (MonoDomain *domain)
+{
+ MonoJitDomainInfo *info = jit_domain_info (domain);
+
+ if (info->jump_target_got_slot_hash) {
+ g_hash_table_foreach (info->jump_target_got_slot_hash, delete_jump_list, NULL);
+ g_hash_table_destroy (info->jump_target_got_slot_hash);
+ }
+ g_free (domain->runtime_info);
+}
MonoDomain *
mini_init (const char *filename, const char *runtime_version)
mono_trampolines_init ();
- mono_exceptions_init ();
-
if (!g_thread_supported ())
g_thread_init (NULL);
mono_install_jump_trampoline (mono_create_jump_trampoline);
mono_install_remoting_trampoline (mono_jit_create_remoting_trampoline);
mono_install_delegate_trampoline (mono_create_delegate_trampoline);
+ mono_install_create_domain_hook (mini_create_jit_domain_info);
+ mono_install_free_domain_hook (mini_free_jit_domain_info);
#endif
#define JIT_INVOKE_WORKS
#ifdef JIT_INVOKE_WORKS
mono_install_runtime_invoke (mono_jit_runtime_invoke);
- mono_install_handler (mono_arch_get_throw_exception ());
#endif
mono_install_stack_walk (mono_jit_walk_stack);
mono_install_get_cached_class_info (mono_aot_get_cached_class_info);
domain = mono_init_version (filename, runtime_version);
else
domain = mono_init_from_assembly (filename, filename);
+
+ if (mono_aot_only) {
+ /* The IMT tables are very dynamic thus they are hard to AOT */
+ mono_use_imt = FALSE;
+ /* This helps catch code allocation requests */
+ mono_code_manager_set_read_only (domain->code_mp);
+ }
+
#ifdef MONO_ARCH_HAVE_IMT
- mono_install_imt_thunk_builder (mono_arch_build_imt_thunk);
- mono_install_imt_trampoline (mini_get_imt_trampoline ());
+ if (mono_use_imt) {
+ mono_install_imt_thunk_builder (mono_arch_build_imt_thunk);
+ mono_install_imt_trampoline (mini_get_imt_trampoline ());
#if MONO_ARCH_COMMON_VTABLE_TRAMPOLINE
- mono_install_vtable_trampoline (mini_get_vtable_trampoline ());
+ mono_install_vtable_trampoline (mini_get_vtable_trampoline ());
#endif
+ }
#endif
+
+ /* This must come after mono_init () in the aot-only case */
+ mono_exceptions_init ();
+ mono_install_handler (mono_get_throw_exception ());
+
mono_icall_init ();
mono_add_internal_call ("System.Diagnostics.StackFrame::get_frame_info",
register_icall (mono_jit_thread_attach, "mono_jit_thread_attach", "void", TRUE);
register_icall (mono_domain_get, "mono_domain_get", "ptr", TRUE);
- register_icall (mono_arch_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
- register_icall (mono_arch_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
- register_icall (mono_arch_get_throw_exception_by_name (), "mono_arch_throw_exception_by_name", "void ptr", TRUE);
+ register_icall (mono_get_throw_exception (), "mono_arch_throw_exception", "void object", TRUE);
+ register_icall (mono_get_rethrow_exception (), "mono_arch_rethrow_exception", "void object", TRUE);
+ register_icall (mono_get_throw_exception_by_name (), "mono_arch_throw_exception_by_name", "void ptr", TRUE);
#if MONO_ARCH_HAVE_THROW_CORLIB_EXCEPTION
- register_icall (mono_arch_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
+ register_icall (mono_get_throw_corlib_exception (), "mono_arch_throw_corlib_exception",
"void ptr", TRUE);
#endif
register_icall (mono_thread_get_undeniable_exception, "mono_thread_get_undeniable_exception", "object", FALSE);
mono_register_opcode_emulation (CEE_DIV_UN, "__emul_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
mono_register_opcode_emulation (CEE_REM, "__emul_irem", "int32 int32 int32", mono_irem, FALSE);
mono_register_opcode_emulation (CEE_REM_UN, "__emul_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
+ mono_register_opcode_emulation (OP_IDIV, "__emul_op_idiv", "int32 int32 int32", mono_idiv, FALSE);
+ mono_register_opcode_emulation (OP_IDIV_UN, "__emul_op_idiv_un", "int32 int32 int32", mono_idiv_un, FALSE);
+ mono_register_opcode_emulation (OP_IREM, "__emul_op_irem", "int32 int32 int32", mono_irem, FALSE);
+ mono_register_opcode_emulation (OP_IREM_UN, "__emul_op_irem_un", "int32 int32 int32", mono_irem_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_MUL_DIV
mono_register_opcode_emulation (CEE_MUL_OVF, "__emul_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
mono_register_opcode_emulation (CEE_MUL_OVF_UN, "__emul_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
mono_register_opcode_emulation (CEE_MUL, "__emul_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL, "__emul_op_imul", "int32 int32 int32", mono_imul, TRUE);
+ mono_register_opcode_emulation (OP_IMUL_OVF, "__emul_op_imul_ovf", "int32 int32 int32", mono_imul_ovf, FALSE);
+ mono_register_opcode_emulation (OP_IMUL_OVF_UN, "__emul_op_imul_ovf_un", "int32 int32 int32", mono_imul_ovf_un, FALSE);
#endif
#if defined(MONO_ARCH_EMULATE_MUL_DIV) || defined(MONO_ARCH_SOFT_FLOAT)
mono_register_opcode_emulation (OP_FDIV, "__emul_fdiv", "double double double", mono_fdiv, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_CONV_R8_UN
mono_register_opcode_emulation (CEE_CONV_R_UN, "__emul_conv_r_un", "double int32", mono_conv_to_r8_un, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R_UN, "__emul_iconv_to_r_un", "double int32", mono_conv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_LCONV_TO_R8
mono_register_opcode_emulation (OP_LCONV_TO_R8, "__emul_lconv_to_r8", "double long", mono_lconv_to_r8, FALSE);
mono_register_opcode_emulation (OP_FMUL, "__emul_fmul", "double double double", mono_fmul, FALSE);
mono_register_opcode_emulation (OP_FNEG, "__emul_fneg", "double double", mono_fneg, FALSE);
mono_register_opcode_emulation (CEE_CONV_R8, "__emul_conv_r8", "double int32", mono_conv_to_r8, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R8, "__emul_iconv_to_r8", "double int32", mono_conv_to_r8, FALSE);
mono_register_opcode_emulation (CEE_CONV_R4, "__emul_conv_r4", "double int32", mono_conv_to_r4, FALSE);
+ mono_register_opcode_emulation (OP_ICONV_TO_R4, "__emul_iconv_to_r4", "double int32", mono_conv_to_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_R4, "__emul_fconv_to_r4", "double double", mono_fconv_r4, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I1, "__emul_fconv_to_i1", "int8 double", mono_fconv_i1, FALSE);
mono_register_opcode_emulation (OP_FCONV_TO_I2, "__emul_fconv_to_i2", "int16 double", mono_fconv_i2, FALSE);
register_icall (mono_array_new_specific, "mono_array_new_specific", "object ptr int32", FALSE);
register_icall (mono_runtime_class_init, "mono_runtime_class_init", "void ptr", FALSE);
register_icall (mono_ldftn, "mono_ldftn", "ptr ptr", FALSE);
- register_icall (mono_ldftn_nosync, "mono_ldftn_nosync", "ptr ptr", FALSE);
register_icall (mono_ldvirtfn, "mono_ldvirtfn", "ptr object ptr", FALSE);
- register_icall (mono_helper_compile_generic_method, "compile_generic_method", "ptr object ptr ptr ptr", FALSE);
+ register_icall (mono_ldvirtfn_gshared, "mono_ldvirtfn_gshared", "ptr object ptr", FALSE);
+ register_icall (mono_helper_compile_generic_method, "compile_generic_method", "ptr object ptr ptr", FALSE);
register_icall (mono_helper_ldstr, "helper_ldstr", "object ptr int", FALSE);
register_icall (mono_helper_ldstr_mscorlib, "helper_ldstr_mscorlib", "object int", FALSE);
register_icall (mono_helper_newobj_mscorlib, "helper_newobj_mscorlib", "object int", FALSE);
register_icall (mono_value_copy, "mono_value_copy", "void ptr ptr ptr", FALSE);
- register_icall (mono_helper_get_rgctx_other_ptr, "get_rgctx_other_ptr", "ptr ptr ptr int32 int32 int32 int32", FALSE);
register_icall (mono_object_castclass, "mono_object_castclass", "object object ptr", FALSE);
register_icall (mono_break, "mono_break", NULL, TRUE);
register_icall (mono_create_corlib_exception_0, "mono_create_corlib_exception_0", "object int", TRUE);
register_icall (mono_create_corlib_exception_1, "mono_create_corlib_exception_1", "object int object", TRUE);
register_icall (mono_create_corlib_exception_2, "mono_create_corlib_exception_2", "object int object object", TRUE);
+ register_icall (mono_array_new_1, "mono_array_new_1", "object ptr int", FALSE);
+ register_icall (mono_array_new_2, "mono_array_new_2", "object ptr int int", FALSE);
#endif
#define JIT_RUNTIME_WORKS
mono_thread_attach (domain);
+ mono_profiler_runtime_initialized ();
+
MONO_PROBE_VES_INIT_END ();
return domain;
g_print ("Analyze stack repeat: %ld\n", mono_jit_stats.analyze_stack_repeat);
g_print ("Compiled CIL code size: %ld\n", mono_jit_stats.cil_code_size);
g_print ("Native code size: %ld\n", mono_jit_stats.native_code_size);
- g_print ("Max code size ratio: %.2f (%s::%s)\n", mono_jit_stats.max_code_size_ratio/100.0,
- mono_jit_stats.max_ratio_method->klass->name, mono_jit_stats.max_ratio_method->name);
- g_print ("Biggest method: %ld (%s::%s)\n", mono_jit_stats.biggest_method_size,
- mono_jit_stats.biggest_method->klass->name, mono_jit_stats.biggest_method->name);
+ g_print ("Max code size ratio: %.2f (%s)\n", mono_jit_stats.max_code_size_ratio/100.0,
+ mono_jit_stats.max_ratio_method);
+ g_print ("Biggest method: %ld (%s)\n", mono_jit_stats.biggest_method_size,
+ mono_jit_stats.biggest_method);
g_print ("Code reallocs: %ld\n", mono_jit_stats.code_reallocs);
g_print ("Allocated code size: %ld\n", mono_jit_stats.allocated_code_size);
g_print ("Inlineable methods: %ld\n", mono_jit_stats.inlineable_methods);
g_print ("Inlined methods: %ld\n", mono_jit_stats.inlined_methods);
+ g_print ("Regvars: %ld\n", mono_jit_stats.regvars);
g_print ("Locals stack size: %ld\n", mono_jit_stats.locals_stack_size);
g_print ("\nCreated object count: %ld\n", mono_stats.new_object_count);
g_print ("Metadata pagefaults : %d\n", mono_raw_buffer_get_n_pagefaults ());
g_print ("AOT pagefaults : %d\n", mono_aot_get_n_pagefaults ());
}
+
+ g_free (mono_jit_stats.max_ratio_method);
+ mono_jit_stats.max_ratio_method = NULL;
+ g_free (mono_jit_stats.biggest_method);
+ mono_jit_stats.biggest_method = NULL;
}
}