* Copyright 2003 Ximian, Inc.
* Copyright 2003-2011 Novell Inc.
* Copyright 2011 Xamarin Inc.
+ * Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
#include "mini.h"
#include <string.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-memory-model.h>
-#include <mono/utils/mono-hwcap-x86.h>
+#include <mono/utils/mono-hwcap.h>
#include <mono/utils/mono-threads.h>
#include "trace.h"
#ifdef TARGET_WIN32
/* Under windows, the default pinvoke calling convention is stdcall */
-#define CALLCONV_IS_STDCALL(sig) ((((sig)->call_convention) == MONO_CALL_STDCALL) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_DEFAULT) || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_THISCALL))
+#define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_DEFAULT || (sig)->call_convention == MONO_CALL_THISCALL))
#else
-#define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL || ((sig)->pinvoke && ((sig)->call_convention) == MONO_CALL_THISCALL))
+#define CALLCONV_IS_STDCALL(sig) ((sig)->pinvoke && ((sig)->call_convention == MONO_CALL_STDCALL || (sig)->call_convention == MONO_CALL_THISCALL))
#endif
#define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI))
static guint8*
emit_load_aotconst (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji, int dreg, int tramp_type, gconstpointer target);
-#ifdef __native_client_codegen__
-
-/* Default alignment for Native Client is 32-byte. */
-gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
-
-/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
-/* Check that alignment doesn't cross an alignment boundary. */
-guint8 *
-mono_arch_nacl_pad (guint8 *code, int pad)
-{
- const int kMaxPadding = 7; /* see x86-codegen.h: x86_padding() */
-
- if (pad == 0) return code;
- /* assertion: alignment cannot cross a block boundary */
- g_assert(((uintptr_t)code & (~kNaClAlignmentMask)) ==
- (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
- while (pad >= kMaxPadding) {
- x86_padding (code, kMaxPadding);
- pad -= kMaxPadding;
- }
- if (pad != 0) x86_padding (code, pad);
- return code;
-}
-
-guint8 *
-mono_arch_nacl_skip_nops (guint8 *code)
-{
- x86_skip_nops (code);
- return code;
-}
-
-#endif /* __native_client_codegen__ */
-
const char*
mono_arch_regname (int reg)
{
x86_patch (code, (unsigned char*)target);
}
-typedef enum {
- ArgInIReg,
- ArgInFloatSSEReg,
- ArgInDoubleSSEReg,
- ArgOnStack,
- ArgValuetypeInReg,
- ArgOnFloatFpStack,
- ArgOnDoubleFpStack,
- /* gsharedvt argument passed by addr */
- ArgGSharedVt,
- ArgNone
-} ArgStorage;
-
-typedef struct {
- gint16 offset;
- gint8 reg;
- ArgStorage storage;
- int nslots;
- gboolean is_pair;
-
- /* Only if storage == ArgValuetypeInReg */
- ArgStorage pair_storage [2];
- gint8 pair_regs [2];
-} ArgInfo;
-
-typedef struct {
- int nargs;
- guint32 stack_usage;
- guint32 reg_usage;
- guint32 freg_usage;
- gboolean need_stack_align;
- guint32 stack_align_amount;
- gboolean vtype_retaddr;
- /* The index of the vret arg in the argument list */
- int vret_arg_index;
- int vret_arg_offset;
- /* Argument space popped by the callee */
- int callee_stack_pop;
- ArgInfo ret;
- ArgInfo sig_cookie;
- ArgInfo args [1];
-} CallInfo;
-
#define FLOAT_PARAM_REGS 0
static const guint32 thiscall_param_regs [] = { X86_ECX, X86_NREG };
klass = mono_class_from_mono_type (type);
size = mini_type_stack_size_full (&klass->byval_arg, NULL, sig->pinvoke);
+#if defined(TARGET_WIN32)
+ /*
+ * Standard C and C++ doesn't allow empty structs, empty structs will always have a size of 1 byte.
+ * GCC have an extension to allow empty structs, https://gcc.gnu.org/onlinedocs/gcc/Empty-Structures.html.
+ * This cause a little dilemma since runtime build using none GCC compiler will not be compatible with
+ * GCC build C libraries and the other way around. On platforms where empty structs has size of 1 byte
+ * it must be represented in call and cannot be dropped.
+ */
+ if (size == 0 && MONO_TYPE_ISSTRUCT (type) && sig->pinvoke) {
+ /* Empty structs (1 byte size) needs to be represented in a stack slot */
+ ainfo->pass_empty_struct = TRUE;
+ size = 1;
+ }
+#endif
+
#ifdef SMALL_STRUCTS_IN_REGS
if (sig->pinvoke && is_return) {
MonoMarshalType *info;
- /*
- * the exact rules are not very well documented, the code below seems to work with the
- * code generated by gcc 3.3.3 -mno-cygwin.
- */
info = mono_marshal_load_type_info (klass);
g_assert (info);
ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+ /* Ignore empty struct return value, if used. */
+ if (info->num_fields == 0 && ainfo->pass_empty_struct) {
+ ainfo->storage = ArgValuetypeInReg;
+ return;
+ }
+
+ /*
+ * Windows x86 ABI for returning structs of size 4 or 8 bytes (regardless of type) dictates that
+ * values are passed in EDX:EAX register pairs, https://msdn.microsoft.com/en-us/library/984x0h58.aspx.
+ * This is different compared to for example float or double return types (not in struct) that will be returned
+ * in ST(0), https://msdn.microsoft.com/en-us/library/ha59cbfz.aspx.
+ *
+ * Apples OSX x86 ABI for returning structs of size 4 or 8 bytes uses a slightly different approach.
+ * If a struct includes only one scalar value, it will be handled with the same rules as scalar values.
+ * This means that structs with one float or double will be returned in ST(0). For more details,
+ * https://developer.apple.com/library/mac/documentation/DeveloperTools/Conceptual/LowLevelABI/130-IA-32_Function_Calling_Conventions/IA32.html.
+ */
+#if !defined(TARGET_WIN32)
+
/* Special case structs with only a float member */
if (info->num_fields == 1) {
int ftype = mini_get_underlying_type (info->fields [0].field->type)->type;
return;
}
}
+#endif
+
if ((info->native_size == 1) || (info->native_size == 2) || (info->native_size == 4) || (info->native_size == 8)) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ArgInIReg;
* For x86 ELF, see the "System V Application Binary Interface Intel386
* Architecture Processor Supplment, Fourth Edition" document for more
* information.
- * For x86 win32, see ???.
+ * For x86 win32, see https://msdn.microsoft.com/en-us/library/984x0h58.aspx.
*/
static CallInfo*
get_call_info_internal (CallInfo *cinfo, MonoMethodSignature *sig)
if (cinfo->vtype_retaddr) {
/* if the function returns a struct on stack, the called method already does a ret $0x4 */
cinfo->callee_stack_pop = 4;
- } else if (CALLCONV_IS_STDCALL (sig) && sig->pinvoke) {
+ } else if (CALLCONV_IS_STDCALL (sig)) {
/* Have to compensate for the stack space popped by the native callee */
cinfo->callee_stack_pop = stack_size;
}
mono_aot_register_jit_icall ("mono_x86_throw_exception", mono_x86_throw_exception);
mono_aot_register_jit_icall ("mono_x86_throw_corlib_exception", mono_x86_throw_corlib_exception);
-#if defined(ENABLE_GSHAREDVT)
+#if defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
mono_aot_register_jit_icall ("mono_x86_start_gsharedvt_call", mono_x86_start_gsharedvt_call);
#endif
}
guint32
mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
-#if !defined(__native_client__)
guint32 opts = 0;
*exclude_mask = 0;
#endif
return opts;
-#else
- return MONO_OPT_CMOV | MONO_OPT_FCMOV | MONO_OPT_SSE2;
-#endif
}
/*
header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->mempool, sig);
+ if (!cfg->arch.cinfo)
+ cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
+ cinfo = (CallInfo *)cfg->arch.cinfo;
cfg->frame_reg = X86_EBP;
offset = 0;
if (inst->opcode != OP_REGVAR) {
inst->opcode = OP_REGOFFSET;
inst->inst_basereg = X86_EBP;
+ inst->inst_offset = ainfo->offset + ARGS_OFFSET;
}
- inst->inst_offset = ainfo->offset + ARGS_OFFSET;
}
cfg->stack_offset = offset;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->mempool, sig);
+ if (!cfg->arch.cinfo)
+ cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
+ cinfo = (CallInfo *)cfg->arch.cinfo;
+
sig_ret = mini_get_underlying_type (sig->ret);
if (cinfo->ret.storage == ArgValuetypeInReg)
sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) {
- if (cinfo->ret.storage == ArgValuetypeInReg) {
+ if (cinfo->ret.storage == ArgValuetypeInReg && cinfo->ret.pair_storage[0] != ArgNone ) {
/*
* Tell the JIT to use a more efficient calling convention: call using
* OP_CALL, compute the result location after the call, and save the
size = mini_type_stack_size_full (&in->klass->byval_arg, &align, sig->pinvoke);
}
- if (size > 0) {
+ if (size > 0 || ainfo->pass_empty_struct) {
arg->opcode = OP_OUTARG_VT;
arg->sreg1 = in->dreg;
arg->klass = in->klass;
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, src->dreg);
} else if (size <= 4) {
int dreg = mono_alloc_ireg (cfg);
- MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
+ if (ainfo->pass_empty_struct) {
+ //Pass empty struct value as 0 on platforms representing empty structs as 1 byte.
+ MONO_EMIT_NEW_ICONST (cfg, dreg, 0);
+ } else {
+ MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
+ }
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, dreg);
} else if (size <= 20) {
mini_emit_memcpy (cfg, X86_ESP, ainfo->offset, src->dreg, 0, size, 4);
This is required for code patching to be safe on SMP machines.
*/
pad_size = (guint32)(code + 1 - cfg->native_code) & 0x3;
-#ifndef __native_client_codegen__
if (needs_paddings && pad_size)
x86_padding (code, 4 - pad_size);
-#endif
mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
x86_call_code (code, 0);
bb->native_offset = cfg->code_len;
}
}
-#ifdef __native_client_codegen__
- {
- /* For Native Client, all indirect call/jump targets must be */
- /* 32-byte aligned. Exception handler blocks are jumped to */
- /* indirectly as well. */
- gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
- (bb->flags & BB_EXCEPTION_HANDLER);
-
- /* if ((cfg->code_len & kNaClAlignmentMask) != 0) { */
- if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
- int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
- if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
- cfg->code_len += pad;
- bb->native_offset = cfg->code_len;
- }
- }
-#endif /* __native_client_codegen__ */
+
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
-#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+#define EXTRA_CODE_SPACE (16)
if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
break;
case OP_IDIV:
case OP_IREM:
-#if defined( __native_client_codegen__ )
- x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
/*
* The code is the same for div/rem, the allocator will allocate dreg
* to RAX/RDX as appropriate.
break;
case OP_IDIV_UN:
case OP_IREM_UN:
-#if defined( __native_client_codegen__ )
- x86_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
- EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
-#endif
if (ins->sreg2 == X86_EDX) {
x86_push_reg (code, ins->sreg2);
x86_alu_reg_reg (code, X86_XOR, X86_EDX, X86_EDX);
}
break;
case OP_DIV_IMM:
-#if defined( __native_client_codegen__ )
- if (ins->inst_imm == 0) {
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC, "DivideByZeroException");
- x86_jump32 (code, 0);
- break;
- }
-#endif
x86_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
x86_cdq (code);
x86_div_reg (code, ins->sreg2, TRUE);
break;
}
case OP_GC_SAFE_POINT: {
- const char *polling_func = NULL;
- int compare_val = 0;
guint8 *br [1];
-#if defined(__native_client_codegen__) && defined(__native_client_gc__)
- polling_func = "mono_nacl_gc";
- compare_val = 0xFFFFFFFF;
-#else
g_assert (mono_threads_is_coop_enabled ());
- polling_func = "mono_threads_state_poll";
- compare_val = 1;
-#endif
- x86_test_membase_imm (code, ins->sreg1, 0, compare_val);
+ x86_test_membase_imm (code, ins->sreg1, 0, 1);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_threads_state_poll");
x86_patch (br [0], code);
break;
}
if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
-#ifndef __native_client_codegen__
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
-#endif /* __native_client_codegen__ */
}
cpos += max_len;
case MONO_PATCH_INFO_LABEL:
case MONO_PATCH_INFO_RGCTX_FETCH:
case MONO_PATCH_INFO_JIT_ICALL_ADDR:
-#if defined(__native_client_codegen__) && defined(__native_client__)
- if (nacl_is_code_address (code)) {
- /* For tail calls, code is patched after being installed */
- /* but not through the normal "patch callsite" method. */
- unsigned char buf[kNaClAlignment];
- unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
- unsigned char *_target = target;
- int ret;
- /* All patch targets modified in x86_patch */
- /* are IP relative. */
- _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
- memcpy (buf, aligned_code, kNaClAlignment);
- /* Patch a temp buffer of bundle size, */
- /* then install to actual location. */
- x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
- ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
- g_assert (ret == 0);
- }
- else {
- x86_patch (ip, (unsigned char*)target);
- }
-#else
x86_patch (ip, (unsigned char*)target);
-#endif
break;
case MONO_PATCH_INFO_NONE:
break;
}
default: {
guint32 offset = mono_arch_get_patch_offset (ip);
-#if !defined(__native_client__)
*((gconstpointer *)(ip + offset)) = target;
-#else
- *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
-#endif
break;
}
}
MonoBasicBlock *bb;
MonoMethodSignature *sig;
MonoInst *inst;
+ CallInfo *cinfo;
+ ArgInfo *ainfo;
int alloc_size, pos, max_offset, i, cfa_offset;
guint8 *code;
gboolean need_stack_frame;
-#ifdef __native_client_codegen__
- guint alignment_check;
-#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
-#if defined(__default_codegen__)
code = cfg->native_code = g_malloc (cfg->code_size);
-#elif defined(__native_client_codegen__)
- /* native_code_alloc is not 32-byte aligned, native_code is. */
- cfg->code_size = NACL_BUNDLE_ALIGN_UP (cfg->code_size);
- cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
-
- /* Align native_code to next nearest kNaclAlignment byte. */
- cfg->native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
- cfg->native_code = (guint)cfg->native_code & ~kNaClAlignmentMask;
-
- code = cfg->native_code;
-
- alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
- g_assert(alignment_check == 0);
-#endif
#if 0
{
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_offset += LOOP_ALIGNMENT;
-#ifdef __native_client_codegen__
- /* max alignment for native client */
- if (bb->flags & BB_INDIRECT_JUMP_TARGET || bb->flags & BB_EXCEPTION_HANDLER)
- max_offset += kNaClAlignment;
-#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_LABEL)
ins->inst_c1 = max_offset;
-#ifdef __native_client_codegen__
- switch (ins->opcode)
- {
- case OP_FCALL:
- case OP_LCALL:
- case OP_VCALL:
- case OP_VCALL2:
- case OP_VOIDCALL:
- case OP_CALL:
- case OP_FCALL_REG:
- case OP_LCALL_REG:
- case OP_VCALL_REG:
- case OP_VCALL2_REG:
- case OP_VOIDCALL_REG:
- case OP_CALL_REG:
- case OP_FCALL_MEMBASE:
- case OP_LCALL_MEMBASE:
- case OP_VCALL_MEMBASE:
- case OP_VCALL2_MEMBASE:
- case OP_VOIDCALL_MEMBASE:
- case OP_CALL_MEMBASE:
- max_offset += kNaClAlignment;
- break;
- default:
- max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN] - 1;
- break;
- }
-#endif /* __native_client_codegen__ */
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
}
sig = mono_method_signature (method);
pos = 0;
+ cinfo = (CallInfo *)cfg->arch.cinfo;
+
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
inst = cfg->args [pos];
+ ainfo = &cinfo->args [pos];
if (inst->opcode == OP_REGVAR) {
g_assert (need_stack_frame);
- x86_mov_reg_membase (code, inst->dreg, X86_EBP, inst->inst_offset, 4);
+ x86_mov_reg_membase (code, inst->dreg, X86_EBP, ainfo->offset + ARGS_OFFSET, 4);
if (cfg->verbose_level > 2)
g_print ("Argument %d assigned to register %s\n", pos, mono_arch_regname (inst->dreg));
}
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (cfg->mempool, sig);
+ cinfo = (CallInfo *)cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg) {
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
guint32 size;
/* Compute size of code following the push <OFFSET> */
-#if defined(__default_codegen__)
size = 5 + 5;
-#elif defined(__native_client_codegen__)
- code = mono_nacl_align (code);
- size = kNaClAlignment;
-#endif
+
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
if ((code - cfg->native_code) - throw_ip < 126 - size) {
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
-#if defined(__default_codegen__)
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 5
-#elif defined(__native_client_codegen__)
-/* I suspect the size calculation below is actually incorrect. */
-/* TODO: fix the calculation that uses these sizes. */
-#define BR_SMALL_SIZE 16
-#define BR_LARGE_SIZE 12
-#endif /*__native_client_codegen__*/
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
}
size += item->chunk_size;
}
-#if defined(__native_client__) && defined(__native_client_codegen__)
- /* In Native Client, we don't re-use thunks, allocate from the */
- /* normal code manager paths. */
- size = NACL_BUNDLE_ALIGN_UP (size);
- code = mono_domain_code_reserve (domain, size);
-#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
-#endif
start = code;
unwind_ops = mono_arch_get_cie_program ();
g_free (buff);
}
- nacl_domain_code_validate (domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
-#ifdef __native_client_codegen__
- /* TODO: calculate this size correctly */
- code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
-#else
code_reserve = 8 + (param_count * 8);
-#endif /* __native_client_codegen__ */
/*
* The stack contains:
* <args in reverse order>
g_assert ((code - start) < code_reserve);
}
- nacl_global_codeman_validate (&start, code_reserve, &code);
-
if (has_target) {
*info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
} else {
x86_jump_membase (code, X86_EAX, offset);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (load_imt_reg)
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", - offset / sizeof (gpointer));
- else
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", offset / sizeof (gpointer));
+ tramp_name = mono_get_delegate_virtual_invoke_impl_name (load_imt_reg, offset);
*info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
g_free (tramp_name);
}
}
-#if defined(ENABLE_GSHAREDVT)
-
-#include "../../../mono-extensions/mono/mini/mini-x86-gsharedvt.c"
-
-#endif /* !MONOTOUCH */
+CallInfo*
+mono_arch_get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
+{
+ return get_call_info (mp, sig);
+}