MonoInst *
mono_get_got_var (MonoCompile *cfg)
{
- if (!cfg->compile_aot || !cfg->backend->need_got_var)
+ if (!cfg->compile_aot || !cfg->backend->need_got_var || cfg->llvm_only)
return NULL;
if (!cfg->got_var) {
cfg->got_var = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
}
static void
-emit_instrumentation_call (MonoCompile *cfg, void *func)
+emit_instrumentation_call (MonoCompile *cfg, void *func, gboolean entry)
{
MonoInst *iargs [1];
if (cfg->method != cfg->current_method)
return;
- if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE) {
+ if (mono_profiler_should_instrument_method (cfg->method, entry)) {
EMIT_NEW_METHODCONST (cfg, iargs [0], cfg->method);
mono_emit_jit_icall (cfg, func, iargs);
}
tail = FALSE;
if (tail) {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE);
MONO_INST_NEW_CALL (cfg, call, OP_TAILCALL);
} else
g_assert (ctx);
g_assert (ctx->method_inst);
g_assert (ctx->method_inst->type_argc == 1);
- MonoType *t = mini_get_underlying_type (ctx->method_inst->type_argv [0]);
- MonoClass *klass = mono_class_from_mono_type (t);
+ MonoType *arg_type = ctx->method_inst->type_argv [0];
+ MonoType *t;
+ MonoClass *klass;
ins = NULL;
+ /* Resolve the argument class as possible so we can handle common cases fast */
+ t = mini_get_underlying_type (arg_type);
+ klass = mono_class_from_mono_type (t);
mono_class_init (klass);
if (MONO_TYPE_IS_REFERENCE (t))
EMIT_NEW_ICONST (cfg, ins, 1);
else {
g_assert (cfg->gshared);
- int context_used = mini_class_check_context_used (cfg, klass);
+ /* Have to use the original argument class here */
+ MonoClass *arg_class = mono_class_from_mono_type (arg_type);
+ int context_used = mini_class_check_context_used (cfg, arg_class);
/* This returns 1 or 2 */
- MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, klass, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
+ MonoInst *info = mini_emit_get_rgctx_klass (cfg, context_used, arg_class, MONO_RGCTX_INFO_CLASS_IS_REF_OR_CONTAINS_REFS);
int dreg = alloc_ireg (cfg);
EMIT_NEW_BIALU_IMM (cfg, ins, OP_ISUB_IMM, dreg, info->dreg, 1);
}
{
if (method->klass == mono_defaults.string_class) {
/* managed string allocation support */
- if (strcmp (method->name, "InternalAllocateStr") == 0 && !(mono_profiler_events & MONO_PROFILE_ALLOCATIONS) && !(cfg->opt & MONO_OPT_SHARED)) {
+ if (strcmp (method->name, "InternalAllocateStr") == 0 && !(cfg->opt & MONO_OPT_SHARED)) {
MonoInst *iargs [2];
MonoVTable *vtable = mono_class_vtable (cfg->domain, method->klass);
MonoMethod *managed_alloc = NULL;
cfg->dont_inline = g_list_prepend (cfg->dont_inline, method);
if (cfg->method == method) {
- if (cfg->prof_options & MONO_PROFILE_INS_COVERAGE)
- cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
+ cfg->coverage_info = mono_profiler_coverage_alloc (cfg->method, header->code_size);
/* ENTRY BLOCK */
NEW_BBLOCK (cfg, start_bblock);
tblock->real_offset = clause->handler_offset;
tblock->flags |= BB_EXCEPTION_HANDLER;
+ if (clause->flags == MONO_EXCEPTION_CLAUSE_FINALLY)
+ mono_create_exvar_for_offset (cfg, clause->handler_offset);
/*
* Linking the try block with the EH block hinders inlining as we won't be able to
* merge the bblocks from inlining and produce an artificial hole for no good reason.
CHECK_STACK_OVF (1);
n = ip [1];
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [2] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 2) && MONO_TYPE_ISSTRUCT (header->locals [n])) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 2;
break;
if (cfg->gshared && mono_method_check_context_used (cmethod))
GENERIC_SHARING_FAILURE (CEE_JMP);
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE);
fsig = mono_method_signature (cmethod);
n = fsig->param_count + fsig->hasthis;
/* Handle tail calls similarly to normal calls */
tail_call = TRUE;
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE);
MONO_INST_NEW_CALL (cfg, call, OP_JMP);
call->tail_call = TRUE;
cfg->ret_var_set = TRUE;
}
} else {
- emit_instrumentation_call (cfg, mono_profiler_method_leave);
+ emit_instrumentation_call (cfg, mono_profiler_raise_method_leave, FALSE);
if (cfg->lmf_var && cfg->cbb->in_count && !cfg->llvm_only)
emit_pop_lmf (cfg);
if ((handlers = mono_find_final_block (cfg, ip, target, MONO_EXCEPTION_CLAUSE_FINALLY))) {
GList *tmp;
- MonoExceptionClause *clause;
for (tmp = handlers; tmp; tmp = tmp->next) {
- clause = (MonoExceptionClause *)tmp->data;
+ MonoExceptionClause *clause = (MonoExceptionClause *)tmp->data;
+ MonoInst *abort_exc = (MonoInst *)mono_find_exvar_for_offset (cfg, clause->handler_offset);
+ MonoBasicBlock *dont_throw;
+
tblock = cfg->cil_offset_to_bb [clause->handler_offset];
g_assert (tblock);
link_bblock (cfg, cfg->cbb, tblock);
+
+ MONO_EMIT_NEW_PCONST (cfg, abort_exc->dreg, 0);
+
MONO_INST_NEW (cfg, ins, OP_CALL_HANDLER);
ins->inst_target_bb = tblock;
ins->inst_eh_block = clause;
MONO_ADD_INS (cfg->cbb, ins);
cfg->cbb->has_call_handler = 1;
+
+ /* Throw exception if exvar is set */
+ /* FIXME Do we need this for calls from catch/filter ? */
+ NEW_BBLOCK (cfg, dont_throw);
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, abort_exc->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, dont_throw);
+ mono_emit_jit_icall (cfg, mono_thread_self_abort, NULL);
+ cfg->cbb->clause_hole = clause;
+
+ MONO_START_BB (cfg, dont_throw);
+ cfg->cbb->clause_hole = clause;
+
if (COMPILE_LLVM (cfg)) {
MonoBasicBlock *target_bb;
case CEE_MONO_LDPTR_CARD_TABLE:
case CEE_MONO_LDPTR_NURSERY_START:
case CEE_MONO_LDPTR_NURSERY_BITS:
- case CEE_MONO_LDPTR_INT_REQ_FLAG: {
+ case CEE_MONO_LDPTR_INT_REQ_FLAG:
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT: {
CHECK_STACK_OVF (1);
switch (ip [1]) {
case CEE_MONO_LDPTR_INT_REQ_FLAG:
ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_INTERRUPTION_REQUEST_FLAG, NULL);
break;
+ case CEE_MONO_LDPTR_PROFILER_ALLOCATION_COUNT:
+ ins = mini_emit_runtime_constant (cfg, MONO_PATCH_INFO_PROFILER_ALLOCATION_COUNT, NULL);
+ break;
default:
g_assert_not_reached ();
break;
CHECK_OPSIZE (4);
n = read16 (ip + 2);
CHECK_LOCAL (n);
- EMIT_NEW_LOCLOAD (cfg, ins, n);
+ if ((ip [4] == CEE_LDFLD) && ip_in_bb (cfg, cfg->cbb, ip + 4) && header->locals [n]->type == MONO_TYPE_VALUETYPE) {
+ /* Avoid loading a struct just to load one of its fields */
+ EMIT_NEW_LOCLOADA (cfg, ins, n);
+ } else {
+ EMIT_NEW_LOCLOAD (cfg, ins, n);
+ }
*sp++ = ins;
ip += 4;
break;
}
cfg->cbb = init_localsbb;
- emit_instrumentation_call (cfg, mono_profiler_method_enter);
+ emit_instrumentation_call (cfg, mono_profiler_raise_method_enter, TRUE);
if (seq_points) {
MonoBasicBlock *bb;