*out_arg = out_wrapper_arg;
}
+ if (!gsharedvt && generic_virtual) {
+ // FIXME: This wastes memory since add_generic_virtual_invocation ignores it in a lot of cases
+ gpointer *ftndesc = mono_domain_alloc0 (mono_domain_get (), 2 * sizeof (gpointer));
+ ftndesc [0] = addr;
+ ftndesc [1] = *out_arg;
+
+ mono_method_add_generic_virtual_invocation (mono_domain_get (),
+ vt, vt->vtable + slot,
+ generic_virtual, ftndesc);
+ }
+
return addr;
}
return resolve_vcall (this_obj->vtable, slot, imt_method, out_rgctx_arg, TRUE);
}
+/*
+ * mono_resolve_generic_virtual_call:
+ *
+ * Resolve a generic virtual call. This returns an ftndesc.
+ * This function is called on a slowpath, so it doesn't need to be fast.
+ */
+gpointer
+mono_resolve_generic_virtual_call (MonoObject *this_obj, int slot, MonoMethod *imt_method)
+{
+ MonoMethod *m, *generic_virtual = NULL;
+ gpointer addr, compiled_method;
+ gboolean need_unbox_tramp = FALSE;
+ MonoError error;
+ MonoGenericContext context = { NULL, NULL };
+ MonoMethod *declaring;
+ gpointer out_arg = NULL;
+ MonoVTable *vt = this_obj->vtable;
+
+ m = mono_class_get_vtable_entry (vt->klass, slot);
+
+ g_assert (is_generic_method_definition (m));
+
+ if (m->is_inflated)
+ declaring = mono_method_get_declaring_generic_method (m);
+ else
+ declaring = m;
+
+ if (m->klass->generic_class)
+ context.class_inst = m->klass->generic_class->context.class_inst;
+ else
+ g_assert (!m->klass->generic_container);
+
+ generic_virtual = imt_method;
+ g_assert (generic_virtual);
+ g_assert (generic_virtual->is_inflated);
+ context.method_inst = ((MonoMethodInflated*)generic_virtual)->context.method_inst;
+
+ m = mono_class_inflate_generic_method_checked (declaring, &context, &error);
+ g_assert (mono_error_ok (&error));
+
+ if (vt->klass->valuetype)
+ need_unbox_tramp = TRUE;
+
+ // FIXME: This can throw exceptions
+ addr = compiled_method = mono_compile_method (m);
+ g_assert (addr);
+
+ addr = mini_add_method_wrappers_llvmonly (m, addr, FALSE, need_unbox_tramp, &out_arg);
+
+ /*
+ * This wastes memory but the memory usage is bounded since
+ * mono_method_add_generic_virtual_invocation () eventually builds an imt thunk for
+ * this vtable slot so we are not called any more for this instantiation.
+ */
+ gpointer *ftndesc = mono_domain_alloc0 (mono_domain_get (), 2 * sizeof (gpointer));
+ ftndesc [0] = addr;
+ ftndesc [1] = out_arg;
+
+ mono_method_add_generic_virtual_invocation (mono_domain_get (),
+ vt, vt->vtable + slot,
+ generic_virtual, ftndesc);
+ return ftndesc;
+}
+
gpointer
mono_init_vtable_slot_vt (MonoVTable *vtable, int slot)
{
gpointer mono_resolve_vcall_gsharedvt (MonoObject *this_obj, int imt_slot, MonoMethod *imt_method, gpointer *out_arg);
+gpointer mono_resolve_generic_virtual_call (MonoObject *this_obj, int slot, MonoMethod *imt_method);
+
gpointer mono_init_vtable_slot_vt (MonoVTable *vtable, int slot);
gpointer mono_init_vtable_slot (MonoObject *this_obj, int slot);
MonoInst *call_target, *ins;
int arg_reg;
gboolean is_iface = cmethod->klass->flags & TYPE_ATTRIBUTE_INTERFACE;
+ gboolean is_gsharedvt = cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig);
guint32 slot;
MONO_EMIT_NULL_CHECK (cfg, sp [0]->dreg);
else
slot = mono_method_get_vtable_index (cmethod);
- if (!fsig->generic_param_count && !is_iface && !imt_arg && !(cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))) {
+ if (!fsig->generic_param_count && !is_iface && !imt_arg && !is_gsharedvt) {
/*
* The simplest case, a normal virtual call.
*/
return emit_extra_arg_calli (cfg, fsig, sp, arg_reg, call_target);
}
- if (!fsig->generic_param_count && is_iface && !imt_arg && !(cfg->gsharedvt && mini_is_gsharedvt_variable_signature (fsig))) {
+ if (!fsig->generic_param_count && is_iface && !imt_arg && !is_gsharedvt) {
/*
* A simple interface call
*
return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
}
+ if (fsig->generic_param_count && !is_iface && !is_gsharedvt) {
+ /*
+ * This is similar to the interface case, the vtable slot points to an imt thunk which is
+ * dynamically extended as more instantiations are discovered.
+ */
+ int this_reg = sp [0]->dreg;
+ int vtable_reg = alloc_preg (cfg);
+ int slot_reg = alloc_preg (cfg);
+ int addr_reg = alloc_preg (cfg);
+ int arg_reg = alloc_preg (cfg);
+ int ftndesc_reg = alloc_preg (cfg);
+ int offset;
+ MonoInst *thunk_addr_ins, *thunk_arg_ins, *ftndesc_ins;
+ MonoBasicBlock *slowpath_bb, *end_bb;
+
+ NEW_BBLOCK (cfg, slowpath_bb);
+ NEW_BBLOCK (cfg, end_bb);
+
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, vtable_reg, this_reg, MONO_STRUCT_OFFSET (MonoObject, vtable));
+ offset = MONO_STRUCT_OFFSET (MonoVTable, vtable) + (slot * SIZEOF_VOID_P);
+
+ /* Load the imt slot, which contains a function descriptor. */
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, slot_reg, vtable_reg, offset);
+
+ /* These slots are not initialized, so fall back to the slow path until they are initialized */
+ /* That happens when mono_method_add_generic_virtual_invocation () creates an IMT thunk */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, slot_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
+
+ /* Fastpath */
+ /* Same as with iface calls */
+ EMIT_NEW_LOAD_MEMBASE (cfg, thunk_addr_ins, OP_LOAD_MEMBASE, addr_reg, slot_reg, 0);
+ EMIT_NEW_LOAD_MEMBASE (cfg, thunk_arg_ins, OP_LOAD_MEMBASE, arg_reg, slot_reg, SIZEOF_VOID_P);
+ icall_args [0] = thunk_arg_ins;
+ icall_args [1] = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD);
+ ftndesc_ins = mono_emit_calli (cfg, helper_sig_llvmonly_imt_thunk, icall_args, thunk_addr_ins, NULL, NULL);
+ ftndesc_ins->dreg = ftndesc_reg;
+ /*
+ * Unlike normal iface calls, these imt thunks can return NULL, i.e. when they are passed an instantiation
+ * they don't know about yet. Fall back to the slowpath in that case.
+ */
+ MONO_EMIT_NEW_BIALU_IMM (cfg, OP_COMPARE_IMM, -1, ftndesc_reg, 0);
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_PBEQ, slowpath_bb);
+
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Slowpath */
+ MONO_START_BB (cfg, slowpath_bb);
+ icall_args [0] = sp [0];
+ EMIT_NEW_ICONST (cfg, icall_args [1], slot);
+ imt_arg = emit_get_rgctx_method (cfg, context_used,
+ cmethod, MONO_RGCTX_INFO_METHOD);
+ icall_args [2] = imt_arg;
+ ftndesc_ins = mono_emit_jit_icall (cfg, mono_resolve_generic_virtual_call, icall_args);
+ ftndesc_ins->dreg = ftndesc_reg;
+ MONO_EMIT_NEW_BRANCH_BLOCK (cfg, OP_BR, end_bb);
+
+ /* Common case */
+ MONO_START_BB (cfg, end_bb);
+ return emit_llvmonly_calli (cfg, fsig, sp, ftndesc_ins);
+ }
+
// FIXME: Optimize this
icall_args [0] = sp [0];
return arg [5];
}
+/*
+ * A version of the imt thunk used for generic virtual methods.
+ * Unlikely a normal imt thunk, its possible that IMT_METHOD is not found
+ * in the search table. The original JIT code had a 'fallback' trampoline it could
+ * call, but we can't do that, so we just return NULL, and the compiled code
+ * will handle it.
+ */
+static gpointer
+mono_llvmonly_generic_virtual_imt_thunk (gpointer *arg, MonoMethod *imt_method)
+{
+ int i = 0;
+
+ while (arg [i] && arg [i] != imt_method)
+ i += 2;
+ if (!arg [i])
+ return NULL;
+
+ return arg [i + 1];
+}
+
static gpointer
mono_llvmonly_get_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckItem **imt_entries, int count, gpointer fail_tramp)
{
gpointer *buf;
gpointer *res;
int i, index, real_count;
+ gboolean virtual_generic = FALSE;
/*
* Create an array which is passed to the imt thunk functions.
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- if (item->has_target_code)
- continue;
-
if (item->is_equals)
real_count ++;
+ if (item->has_target_code)
+ virtual_generic = TRUE;
}
/*
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
- if (!item->is_equals || item->has_target_code)
+ if (!item->is_equals)
continue;
g_assert (item->key);
- g_assert (!item->has_target_code);
- g_assert (vtable->vtable [item->value.vtable_slot]);
-
buf [(index * 2)] = item->key;
- buf [(index * 2) + 1] = vtable->vtable [item->value.vtable_slot];
+ if (item->has_target_code)
+ buf [(index * 2) + 1] = item->value.target_code;
+ else
+ buf [(index * 2) + 1] = vtable->vtable [item->value.vtable_slot];
index ++;
}
buf [(index * 2)] = NULL;
* It will by called by JITted code.
*/
res = (void **)mono_domain_alloc (domain, 2 * sizeof (gpointer));
- // FIXME: Add more special cases
switch (real_count) {
case 1:
res [0] = mono_llvmonly_imt_thunk_1;
res [0] = mono_llvmonly_imt_thunk;
break;
}
+ if (virtual_generic)
+ res [0] = mono_llvmonly_generic_virtual_imt_thunk;
res [1] = buf;
return res;
register_icall_no_wrapper (mono_resolve_vcall, "mono_resolve_vcall", "ptr object int ptr ptr");
register_icall_no_wrapper (mono_resolve_iface_call_gsharedvt, "mono_resolve_iface_call_gsharedvt", "ptr object int ptr ptr");
register_icall_no_wrapper (mono_resolve_vcall_gsharedvt, "mono_resolve_vcall_gsharedvt", "ptr object int ptr ptr");
+ register_icall_no_wrapper (mono_resolve_generic_virtual_call, "mono_resolve_generic_virtual_call", "ptr object int ptr");
/* This needs a wrapper so it can have a preserveall cconv */
register_icall (mono_init_vtable_slot, "mono_init_vtable_slot", "ptr object int", FALSE);
register_icall (mono_init_delegate, "mono_init_delegate", "void object object ptr", TRUE);