return -1;
}
+static gboolean
+ip_in_finally_clause (MonoCompile *cfg, int offset)
+{
+ MonoMethodHeader *header = cfg->header;
+ MonoExceptionClause *clause;
+ int i;
+
+ for (i = 0; i < header->num_clauses; ++i) {
+ clause = &header->clauses [i];
+ if (clause->flags != MONO_EXCEPTION_CLAUSE_FINALLY && clause->flags != MONO_EXCEPTION_CLAUSE_FAULT)
+ continue;
+
+ if (MONO_OFFSET_IN_HANDLER (clause, offset))
+ return TRUE;
+ }
+ return FALSE;
+}
+
static GList*
mono_find_final_block (MonoCompile *cfg, unsigned char *ip, unsigned char *target, int type)
{
if (target->byref) {
/* FIXME: check that the pointed to types match */
if (arg->type == STACK_MP) {
- if (cfg->verbose_level) printf ("ok\n");
/* This is needed to handle gshared types + ldaddr. We lower the types so we can handle enums and other typedef-like types. */
MonoClass *target_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&mono_class_from_mono_type (target)->byval_arg));
MonoClass *source_class_lowered = mono_class_from_mono_type (mini_get_underlying_type (&arg->klass->byval_arg));
RGCTX. */
addr = emit_get_rgctx_method (cfg, context_used, method,
MONO_RGCTX_INFO_GENERIC_METHOD_CODE);
- if (cfg->llvm_only && cfg->gsharedvt) {
+ if (cfg->llvm_only) {
+ cfg->signatures = g_slist_prepend_mempool (cfg->mempool, cfg->signatures, mono_method_signature (method));
return emit_llvmonly_calli (cfg, mono_method_signature (method), &val, addr);
} else {
rgctx = emit_get_rgctx (cfg, cfg->current_method, context_used);
if (opcode && fsig->param_count == 1) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = STACK_R8;
- ins->dreg = mono_alloc_freg (cfg);
+ ins->dreg = mono_alloc_dreg (cfg, ins->type);
ins->sreg1 = args [0]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
}
if (opcode && fsig->param_count == 2) {
MONO_INST_NEW (cfg, ins, opcode);
ins->type = fsig->params [0]->type == MONO_TYPE_I4 ? STACK_I4 : STACK_I8;
- ins->dreg = mono_alloc_ireg (cfg);
+ ins->dreg = mono_alloc_dreg (cfg, ins->type);
ins->sreg1 = args [0]->dreg;
ins->sreg2 = args [1]->dreg;
MONO_ADD_INS (cfg->cbb, ins);
!strcmp (cmethod->klass->image->assembly->aname.name, "monotouch")) &&
!strcmp (cmethod->klass->name_space, "XamCore.ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector")) ||
- (!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") &&
+ ((!strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.iOS") ||
+ !strcmp (cmethod->klass->image->assembly->aname.name, "Xamarin.Mac")) &&
!strcmp (cmethod->klass->name_space, "ObjCRuntime") &&
!strcmp (cmethod->klass->name, "Selector"))
) {
- if (cfg->backend->have_objc_get_selector &&
+ if ((cfg->backend->have_objc_get_selector || cfg->compile_llvm) &&
!strcmp (cmethod->name, "GetHandle") && fsig->param_count == 1 &&
(args [0]->opcode == OP_GOT_ENTRY || args [0]->opcode == OP_AOTCONST) &&
- cfg->compile_aot && !cfg->llvm_only) {
+ cfg->compile_aot) {
MonoInst *pi;
MonoJumpInfoToken *ji;
char *s;
- // FIXME: llvmonly
-
- cfg->exception_message = g_strdup ("GetHandle");
- cfg->disable_llvm = TRUE;
-
if (args [0]->opcode == OP_GOT_ENTRY) {
pi = (MonoInst *)args [0]->inst_p1;
g_assert (pi->opcode == OP_PATCH_INFO);
if (!header) {
mono_cfg_set_exception (cfg, MONO_EXCEPTION_MONO_ERROR);
goto exception_exit;
+ } else {
+ cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
}
+
generic_container = mono_method_get_generic_container (method);
sig = mono_method_signature (method);
num_args = sig->hasthis + sig->param_count;
for (i = 0; i < n; ++i)
EMIT_NEW_ARGLOAD (cfg, call->args [i], i);
+ if (mini_type_is_vtype (mini_get_underlying_type (call->signature->ret)))
+ call->vret_var = cfg->vret_addr;
+
mono_arch_emit_call (cfg, call);
cfg->param_area = MAX(cfg->param_area, call->stack_usage);
MONO_ADD_INS (cfg->cbb, (MonoInst*)call);
inline_costs += 10 * num_calls++;
+ /*
+ * Synchronized wrappers.
+ * Its hard to determine where to replace a method with its synchronized
+ * wrapper without causing an infinite recursion. The current solution is
+ * to add the synchronized wrapper in the trampolines, and to
+ * change the called method to a dummy wrapper, and resolve that wrapper
+ * to the real method in mono_jit_compile_method ().
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
+ MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
+ if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
+ cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
+ }
+
/*
* Making generic calls out of gsharedvt methods.
* This needs to be used for all generic calls, not just ones with a gsharedvt signature, to avoid
}
}
- /*
- * Synchronized wrappers.
- * Its hard to determine where to replace a method with its synchronized
- * wrapper without causing an infinite recursion. The current solution is
- * to add the synchronized wrapper in the trampolines, and to
- * change the called method to a dummy wrapper, and resolve that wrapper
- * to the real method in mono_jit_compile_method ().
- */
- if (cfg->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED) {
- MonoMethod *orig = mono_marshal_method_from_wrapper (cfg->method);
- if (cmethod == orig || (cmethod->is_inflated && mono_method_get_declaring_generic_method (cmethod) == orig))
- cmethod = mono_marshal_get_synchronized_inner_wrapper (cmethod);
- }
-
/*
* Virtual calls in llvm-only mode.
*/
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
+ if (ins_flag & MONO_INST_VOLATILE) {
+ /* Volatile stores have release semantics, see 12.6.7 in Ecma 335 */
+ emit_memory_barrier (cfg, MONO_MEMORY_BARRIER_REL);
+ }
+
if (mini_is_gsharedvt_klass (klass)) {
MonoInst *offset_ins;
INLINE_FAILURE ("throw");
break;
case CEE_ENDFINALLY:
+ if (!ip_in_finally_clause (cfg, ip - header->code))
+ UNVERIFIED;
/* mono_save_seq_point_info () depends on this */
if (sp != stack_start)
emit_seq_point (cfg, method, ip, FALSE, FALSE);
ip += 4;
inline_costs += 1;
break;
- case CEE_LOCALLOC:
+ case CEE_LOCALLOC: {
CHECK_STACK (1);
+ MonoBasicBlock *non_zero_bb, *end_bb;
+ int alloc_ptr = alloc_preg (cfg);
--sp;
if (sp != stack_start)
UNVERIFIED;
*/
INLINE_FAILURE("localloc");
+ NEW_BBLOCK (cfg, non_zero_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ /* if size != zero */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, sp [0]->dreg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBNE_UN, non_zero_bb);
+
+ //size is zero, so result is NULL
+ MONO_EMIT_NEW_PCONST (cfg, alloc_ptr, NULL);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ MONO_START_BB (cfg, non_zero_bb);
MONO_INST_NEW (cfg, ins, OP_LOCALLOC);
- ins->dreg = alloc_preg (cfg);
+ ins->dreg = alloc_ptr;
ins->sreg1 = sp [0]->dreg;
ins->type = STACK_PTR;
MONO_ADD_INS (cfg->cbb, ins);
if (init_locals)
ins->flags |= MONO_INST_INIT;
+ MONO_START_BB (cfg, end_bb);
+ EMIT_NEW_UNALU (cfg, ins, OP_MOVE, alloc_preg (cfg), alloc_ptr);
+ ins->type = STACK_PTR;
+
*sp++ = ins;
ip += 2;
break;
+ }
case CEE_ENDFILTER: {
MonoExceptionClause *clause, *nearest;
int cc;
g_slist_free (class_inits);
mono_basic_block_free (original_bb);
cfg->dont_inline = g_list_remove (cfg->dont_inline, method);
- cfg->headers_to_free = g_slist_prepend_mempool (cfg->mempool, cfg->headers_to_free, header);
if (cfg->exception_type)
return -1;
else
g_free (live_range_end_bb);
}
-static void mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
+static void
+mono_decompose_typecheck (MonoCompile *cfg, MonoBasicBlock *bb, MonoInst *ins)
{
MonoInst *ret, *move, *source;
MonoClass *klass = ins->klass;
mono_replace_ins (cfg, bb, ins, &prev, first_bb, cfg->cbb);
}
-void mono_decompose_typechecks (MonoCompile *cfg)
+void
+mono_decompose_typechecks (MonoCompile *cfg)
{
for (MonoBasicBlock *bb = cfg->bb_entry; bb; bb = bb->next_bb) {
- MonoInst *c;
- MONO_BB_FOR_EACH_INS (bb, c) {
- switch (c->opcode) {
+ MonoInst *ins;
+ MONO_BB_FOR_EACH_INS (bb, ins) {
+ switch (ins->opcode) {
case OP_ISINST:
case OP_CASTCLASS:
- mono_decompose_typecheck (cfg, bb, c);
+ mono_decompose_typecheck (cfg, bb, ins);
break;
}
}