return mono_arch_get_unbox_trampoline (gsctx, m, addr);
}
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+/*
+ * mono_create_static_rgctx_trampoline:
+ *
+ * Return a static rgctx trampoline for M which branches to ADDR which should
+ * point to the compiled code of M.
+ *
+ * Static rgctx trampolines are used when a shared generic method which doesn't
+ * have a this argument is called indirectly, ie. from code which can't pass in
+ * the rgctx argument. The trampoline sets the rgctx argument and jumps to the
+ * methods code. These trampolines are similar to the unbox trampolines, they
+ * perform the same task as the static rgctx wrappers, but they are smaller/faster,
+ * and can be made to work with full AOT.
+ */
+gpointer
+mono_create_static_rgctx_trampoline (MonoMethod *m, gpointer addr)
+{
+ gpointer ctx;
+ gpointer res;
+ MonoDomain *domain;
+
+ if (mini_method_get_context (m)->method_inst)
+ ctx = mono_method_lookup_rgctx (mono_class_vtable (mono_domain_get (), m->klass), mini_method_get_context (m)->method_inst);
+ else
+ ctx = mono_class_vtable (mono_domain_get (), m->klass);
+
+ if (mono_aot_only)
+ return mono_aot_get_static_rgctx_trampoline (ctx, addr);
+
+ domain = mono_domain_get ();
+
+ mono_domain_lock (domain);
+ res = g_hash_table_lookup (domain_jit_info (domain)->static_rgctx_trampoline_hash,
+ m);
+ mono_domain_unlock (domain);
+ if (res)
+ return res;
+
+ res = mono_arch_get_static_rgctx_trampoline (m, ctx, addr);
+
+ mono_domain_lock (domain);
+ /* Duplicates inserted while we didn't hold the lock are OK */
+ g_hash_table_insert (domain_jit_info (domain)->static_rgctx_trampoline_hash, m, res);
+ mono_domain_unlock (domain);
+
+ return res;
+}
+#endif
+
+gpointer*
+mono_get_vcall_slot_addr (guint8* code, gpointer *regs)
+{
+ gpointer vt;
+ int displacement;
+ vt = mono_arch_get_vcall_slot (code, regs, &displacement);
+ if (!vt)
+ return NULL;
+ return (gpointer*)((char*)vt + displacement);
+}
+
#ifdef MONO_ARCH_HAVE_IMT
static gpointer*
mono_vtable_build_imt_slot (vt, mono_method_get_imt_slot (imt_method));
if (impl_method) {
- MonoMethod *impl = mono_class_get_vtable_entry (vt->klass, interface_offset + imt_method->slot);
+ MonoMethod *impl;
if (imt_method->is_inflated && ((MonoMethodInflated*)imt_method)->context.method_inst) {
MonoGenericContext context = { NULL, NULL };
/*
* Generic virtual method, imt_method contains the inflated interface
- * method, need to get the infated impl method.
+ * method, need to get the inflated impl method.
*/
+ /* imt_method->slot might not be set */
+ impl = mono_class_get_vtable_entry (vt->klass, interface_offset + mono_method_get_declaring_generic_method (imt_method)->slot);
+
if (impl->klass->generic_class)
context.class_inst = impl->klass->generic_class->context.class_inst;
context.method_inst = ((MonoMethodInflated*)imt_method)->context.method_inst;
impl = mono_class_inflate_generic_method (impl, &context);
+ } else {
+ impl = mono_class_get_vtable_entry (vt->klass, interface_offset + imt_method->slot);
}
*impl_method = impl;
MonoMethod *declaring = NULL;
MonoMethod *generic_virtual = NULL;
int context_used;
+ gboolean proxy = FALSE;
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ gboolean need_rgctx_tramp = FALSE;
+#endif
-#if MONO_ARCH_COMMON_VTABLE_TRAMPOLINE
if (m == MONO_FAKE_VTABLE_METHOD) {
int displacement;
MonoVTable *vt = mono_arch_get_vcall_slot (code, (gpointer*)regs, &displacement);
/* Avoid loading metadata or creating a generic vtable if possible */
addr = mono_aot_get_method_from_vt_slot (mono_domain_get (), vt, displacement);
if (addr && !vt->klass->valuetype) {
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
if (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot)) {
*vtable_slot = mono_get_addr_from_ftnptr (addr);
}
/*g_print ("vtable with disp %d at %p\n", displacement, code);*/
}
}
-#endif
+
/* this is the IMT trampoline */
#ifdef MONO_ARCH_HAVE_IMT
if (m == MONO_FAKE_IMT_METHOD) {
MonoMethod *impl_method;
+ MonoGenericSharingContext *gsctx;
+ MonoObject *this_arg;
+
/* we get the interface method because mono_convert_imt_slot_to_vtable_slot ()
* needs the signature to be able to find the this argument
*/
m = mono_arch_find_imt_method ((gpointer*)regs, code);
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
g_assert (vtable_slot);
- vtable_slot = mono_convert_imt_slot_to_vtable_slot (vtable_slot, (gpointer*)regs, code, m, &impl_method);
- /* mono_convert_imt_slot_to_vtable_slot () also gives us the method that is supposed
- * to be called, so we compile it and go ahead as usual.
- */
- /*g_print ("imt found method %p (%s) at %p\n", impl_method, impl_method->name, code);*/
- if (m->is_inflated && ((MonoMethodInflated*)m)->context.method_inst) {
- /* Generic virtual method */
- generic_virtual = m;
- m = impl_method;
- m = mono_marshal_get_static_rgctx_invoke (m);
+
+ gsctx = mono_get_generic_context_from_code (code);
+ this_arg = mono_arch_find_this_argument ((gpointer*)regs, m, gsctx);
+
+ if (this_arg->vtable->klass == mono_defaults.transparent_proxy_class) {
+ /* Use the slow path for now */
+ proxy = TRUE;
+ m = mono_object_get_virtual_method (this_arg, m);
} else {
- m = impl_method;
+ vtable_slot = mono_convert_imt_slot_to_vtable_slot (vtable_slot, (gpointer*)regs, code, m, &impl_method);
+ /* mono_convert_imt_slot_to_vtable_slot () also gives us the method that is supposed
+ * to be called, so we compile it and go ahead as usual.
+ */
+ /*g_print ("imt found method %p (%s) at %p\n", impl_method, impl_method->name, code);*/
+ if (m->is_inflated && ((MonoMethodInflated*)m)->context.method_inst) {
+ /* Generic virtual method */
+ generic_virtual = m;
+ m = impl_method;
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ need_rgctx_tramp = TRUE;
+#else
+ m = mono_marshal_get_static_rgctx_invoke (m);
+#endif
+ } else {
+ m = impl_method;
+ }
}
}
#endif
}
m = mono_class_inflate_generic_method (declaring, &context);
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ need_rgctx_tramp = TRUE;
+#else
/* FIXME: only do this if the method is sharable */
m = mono_marshal_get_static_rgctx_invoke (m);
+#endif
} else if ((context_used = mono_method_check_context_used (m))) {
MonoClass *klass = NULL;
MonoMethod *actual_method = NULL;
mono_get_generic_context_from_code (code));
vt = this_argument->vtable;
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
g_assert (this_argument->vtable->klass->inited);
//mono_class_init (this_argument->vtable->klass);
m = mono_marshal_get_synchronized_wrapper (m);
}
+ /* Calls made through delegates on platforms without delegate trampolines */
+ if (!code && mono_method_needs_static_rgctx_invoke (m, FALSE))
+ m = mono_marshal_get_static_rgctx_invoke (m);
+
addr = mono_compile_method (m);
g_assert (addr);
mono_debugger_trampoline_compiled (m, addr);
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ if (need_rgctx_tramp)
+ addr = mono_create_static_rgctx_trampoline (m, addr);
+#endif
+
if (generic_virtual) {
int displacement;
MonoVTable *vt = mono_arch_get_vcall_slot (code, (gpointer*)regs, &displacement);
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
g_assert (vtable_slot);
if (vt->klass->valuetype)
return addr;
}
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
if (vtable_slot) {
if (m->klass->valuetype)
addr = get_unbox_trampoline (mono_get_generic_context_from_code (code), m, addr);
-
g_assert (*vtable_slot);
- if (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot)) {
+ if (!proxy && (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot))) {
#ifdef MONO_ARCH_HAVE_IMT
vtable_slot = mono_convert_imt_slot_to_vtable_slot (vtable_slot, (gpointer*)regs, code, m, NULL);
#endif
return addr;
}
+
+#ifdef ENABLE_LLVM
+/*
+ * mono_llvm_vcall_trampoline:
+ *
+ * This trampoline handles virtual calls when using LLVM.
+ */
+static gpointer
+mono_llvm_vcall_trampoline (gssize *regs, guint8 *code, MonoMethod *m, guint8 *tramp)
+{
+ MonoObject *this;
+ gpointer addr;
+ MonoVTable *vt;
+ gpointer *vtable_slot;
+ gboolean proxy = FALSE;
+
+ /*
+ * We have the method which is called, we need to obtain the vtable slot without
+ * disassembly which is impossible with LLVM.
+ * So we use the this argument.
+ */
+ this = mono_arch_get_this_arg_from_call (NULL, mono_method_signature (m), regs, code);
+ g_assert (this);
+
+ vt = this->vtable;
+
+ /* This is a simplified version of mono_magic_trampoline () */
+ /* FIXME: Avoid code duplication */
+
+ if (m->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED) {
+ MonoJitInfo *ji;
+
+ if (code)
+ ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
+ else
+ ji = NULL;
+
+ /* Avoid recursion */
+ if (!(ji && ji->method->wrapper_type == MONO_WRAPPER_SYNCHRONIZED))
+ m = mono_marshal_get_synchronized_wrapper (m);
+ }
+
+ addr = mono_compile_method (m);
+ g_assert (addr);
+
+ if (m->klass->valuetype)
+ addr = get_unbox_trampoline (mono_get_generic_context_from_code (code), m, addr);
+
+ vtable_slot = &(vt->vtable [mono_method_get_vtable_slot (m)]);
+ g_assert (*vtable_slot);
+
+ if (!proxy && (mono_aot_is_got_entry (code, (guint8*)vtable_slot) || mono_domain_owns_vtable_slot (mono_domain_get (), vtable_slot))) {
+#ifdef MONO_ARCH_HAVE_IMT
+ vtable_slot = mono_convert_imt_slot_to_vtable_slot (vtable_slot, (gpointer*)regs, code, m, NULL);
+#endif
+ *vtable_slot = mono_get_addr_from_ftnptr (addr);
+ }
+
+ mono_debugger_trampoline_compiled (m, addr);
+
+ return addr;
+}
+#endif
gpointer
mono_generic_virtual_remoting_trampoline (gssize *regs, guint8 *code, MonoMethod *m, guint8 *tramp)
{
MonoGenericContext context = { NULL, NULL };
- MonoMethod *declaring;
+ MonoMethod *imt_method, *declaring;
gpointer addr;
g_assert (m->is_generic);
g_assert (!m->klass->generic_container);
#ifdef MONO_ARCH_HAVE_IMT
- context.method_inst = (MonoGenericInst*)mono_arch_find_imt_method ((gpointer*)regs, code);
+ imt_method = mono_arch_find_imt_method ((gpointer*)regs, code);
+ if (imt_method->is_inflated)
+ context.method_inst = ((MonoMethodInflated*)imt_method)->context.method_inst;
#endif
m = mono_class_inflate_generic_method (declaring, &context);
m = mono_marshal_get_remoting_invoke_with_check (m);
return mono_magic_trampoline (regs, code, method, tramp);
}
- vtable_slot = mono_arch_get_vcall_slot_addr (code, (gpointer*)regs);
+ vtable_slot = mono_get_vcall_slot_addr (code, (gpointer*)regs);
g_assert (!vtable_slot);
/* This is a normal call through a PLT entry */
MonoMethod *m;
MonoMethod *method = NULL;
gboolean multicast, callvirt;
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ gboolean need_rgctx_tramp = FALSE;
+#endif
MonoMethod *invoke = tramp_data [0];
guint8 *impl_this = tramp_data [1];
guint8 *impl_nothis = tramp_data [2];
if (method && method->iflags & METHOD_IMPL_ATTRIBUTE_SYNCHRONIZED)
method = mono_marshal_get_synchronized_wrapper (method);
+ if (method && mono_method_needs_static_rgctx_invoke (method, FALSE)) {
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ need_rgctx_tramp = TRUE;
+#else
+ method = mono_marshal_get_static_rgctx_invoke (method);
+#endif
+ }
+
/*
* If the called address is a trampoline, replace it with the compiled method so
* further calls don't have to go through the trampoline.
}
}
+#ifdef MONO_ARCH_HAVE_STATIC_RGCTX_TRAMPOLINE
+ if (need_rgctx_tramp)
+ delegate->method_ptr = mono_create_static_rgctx_trampoline (method, delegate->method_ptr);
+#endif
+
multicast = ((MonoMulticastDelegate*)delegate)->prev != NULL;
if (!multicast && !callvirt) {
- code = delegate->target ? impl_this : impl_nothis;
+ if (method && (method->flags & METHOD_ATTRIBUTE_STATIC) && mono_method_signature (method)->param_count == mono_method_signature (invoke)->param_count + 1)
+ /* Closed static delegate */
+ code = impl_this;
+ else
+ code = delegate->target ? impl_this : impl_nothis;
if (code) {
delegate->invoke_impl = mono_get_addr_from_ftnptr (code);
return mono_monitor_enter_trampoline;
case MONO_TRAMPOLINE_MONITOR_EXIT:
return mono_monitor_exit_trampoline;
+#ifdef ENABLE_LLVM
+ case MONO_TRAMPOLINE_LLVM_VCALL:
+ return mono_llvm_vcall_trampoline;
+#endif
default:
g_assert_not_reached ();
return NULL;
mono_trampoline_code [MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING] = mono_arch_create_trampoline_code (MONO_TRAMPOLINE_GENERIC_VIRTUAL_REMOTING);
mono_trampoline_code [MONO_TRAMPOLINE_MONITOR_ENTER] = mono_arch_create_trampoline_code (MONO_TRAMPOLINE_MONITOR_ENTER);
mono_trampoline_code [MONO_TRAMPOLINE_MONITOR_EXIT] = mono_arch_create_trampoline_code (MONO_TRAMPOLINE_MONITOR_EXIT);
+#ifdef ENABLE_LLVM
+ mono_trampoline_code [MONO_TRAMPOLINE_LLVM_VCALL] = mono_arch_create_trampoline_code (MONO_TRAMPOLINE_LLVM_VCALL);
+#endif
}
void
gpointer code;
guint32 code_size = 0;
- code = mono_jit_find_compiled_method (domain, method);
- if (code)
+ code = mono_jit_find_compiled_method_with_jit_info (domain, method, &ji);
+ /*
+ * We cannot recover the correct type of a shared generic
+ * method from its native code address, so we use the
+ * trampoline instead.
+ */
+ if (code && !ji->has_generic_jit_info)
return code;
mono_domain_lock (domain);
tramp_data = mono_domain_alloc (domain, sizeof (gpointer) * 3);
tramp_data [0] = invoke;
- if (mono_aot_only) {
- tramp_data [1] = NULL;
- tramp_data [2] = NULL;
- } else {
- tramp_data [1] = mono_arch_get_delegate_invoke_impl (mono_method_signature (invoke), TRUE);
- tramp_data [2] = mono_arch_get_delegate_invoke_impl (mono_method_signature (invoke), FALSE);
- }
+ tramp_data [1] = mono_arch_get_delegate_invoke_impl (mono_method_signature (invoke), TRUE);
+ tramp_data [2] = mono_arch_get_delegate_invoke_impl (mono_method_signature (invoke), FALSE);
ptr = mono_create_specific_trampoline (tramp_data, MONO_TRAMPOLINE_DELEGATE, mono_domain_get (), &code_size);
g_assert (code_size);
#endif
return code;
}
+
+#ifdef ENABLE_LLVM
+/*
+ * mono_create_llvm_vcall_trampoline:
+ *
+ * LLVM emits code for virtual calls which mono_get_vcall_slot is unable to
+ * decode, i.e. only the final branch address is available:
+ * mov <offset>(%rax), %rax
+ * <random code inserted by instruction scheduling>
+ * call *%rax
+ *
+ * To work around this problem, we don't use the common vtable trampoline when
+ * llvm is enabled. Instead, we use one trampoline per method.
+ */
+gpointer
+mono_create_llvm_vcall_trampoline (MonoMethod *method)
+{
+ MonoDomain *domain;
+ gpointer res;
+
+ domain = mono_domain_get ();
+
+ mono_domain_lock (domain);
+ res = g_hash_table_lookup (domain_jit_info (domain)->llvm_vcall_trampoline_hash, method);
+ mono_domain_unlock (domain);
+ if (res)
+ return res;
+
+ res = mono_create_specific_trampoline (method, MONO_TRAMPOLINE_LLVM_VCALL, domain, NULL);
+
+ mono_domain_lock (domain);
+ g_hash_table_insert (domain_jit_info (domain)->llvm_vcall_trampoline_hash, method, res);
+ mono_domain_unlock (domain);
+
+ return res;
+}
+#endif
MonoVTable*
mono_find_class_init_trampoline_by_addr (gconstpointer addr)