#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
static mono_mutex_t mini_arch_mutex;
-MonoBreakpointInfo
-mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-
-/*
- * The code generated for sequence points reads from this location, which is
- * made read-only when single stepping is enabled.
- */
-static gpointer ss_trigger_page;
-
-/* Enabled breakpoints read from this trigger page */
-static gpointer bp_trigger_page;
-
-/* The size of the breakpoint sequence */
-static int breakpoint_size;
-
-/* The size of the breakpoint instruction causing the actual fault */
-static int breakpoint_fault_size;
-
-/* The size of the single step instruction causing the actual fault */
-static int single_step_fault_size;
-
/* The single step trampoline */
static gpointer ss_trampoline;
+/* The breakpoint trampoline */
+static gpointer bp_trampoline;
+
/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
return code [0] == 0xe8;
}
+static inline gboolean
+amd64_use_imm32 (gint64 val)
+{
+ if (mini_get_debug_options()->single_imm_size)
+ return FALSE;
+
+ return amd64_is_imm32 (val);
+}
+
#ifdef __native_client_codegen__
/* Keep track of instruction "depth", that is, the level of sub-instruction */
ArgOnStack,
ArgValuetypeInReg,
ArgValuetypeAddrInIReg,
+ /* gsharedvt argument passed by addr */
+ ArgGSharedVtInReg,
+ ArgGSharedVtOnStack,
ArgNone /* only in pair_storage */
} ArgStorage;
guint32 reg_usage;
guint32 freg_usage;
gboolean need_stack_align;
- gboolean vtype_retaddr;
/* The index of the vret arg in the argument list */
int vret_arg_index;
ArgInfo ret;
} ArgumentClass;
static ArgumentClass
-merge_argument_class_from_type (MonoGenericSharingContext *gsctx, MonoType *type, ArgumentClass class1)
+merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
{
ArgumentClass class2 = ARG_CLASS_NO_CLASS;
MonoType *ptype;
- ptype = mini_type_get_underlying_type (gsctx, type);
+ ptype = mini_get_underlying_type (type);
switch (ptype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
for (i = 0; i < info->num_fields; ++i) {
class2 = class1;
- class2 = merge_argument_class_from_type (gsctx, info->fields [i].field->type, class2);
+ class2 = merge_argument_class_from_type (info->fields [i].field->type, class2);
}
break;
}
}
static void
-add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
+add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, guint32 *fr, guint32 *stack_size)
{
MonoMarshalType *info = NULL;
MonoMarshalField *fields = NULL;
MonoClass *klass;
- MonoGenericSharingContext tmp_gsctx;
gboolean pass_on_stack = FALSE;
-
- /*
- * The gsctx currently contains no data, it is only used for checking whenever
- * open types are allowed, some callers like mono_arch_get_argument_info ()
- * don't pass it to us, so work around that.
- */
- if (!gsctx)
- gsctx = &tmp_gsctx;
klass = mono_class_from_mono_type (type);
- size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
+ size = mini_type_stack_size_full (&klass->byval_arg, NULL, sig->pinvoke);
#ifndef TARGET_WIN32
if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
/* We pass and return vtypes of size 8 in a register */
}
}
+#ifndef TARGET_WIN32
+ if (size == 0) {
+ ainfo->storage = ArgValuetypeInReg;
+ ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+ return;
+ }
+#endif
+
if (pass_on_stack) {
/* Allways pass in memory */
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (size, 8);
- ainfo->storage = ArgOnStack;
+ ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
g_free (fields);
return;
* the CLR.
*/
g_assert (info);
- g_assert (fields);
+
+ if (!fields) {
+ ainfo->storage = ArgValuetypeInReg;
+ ainfo->pair_storage [0] = ainfo->pair_storage [1] = ArgNone;
+ return;
+ }
#ifndef TARGET_WIN32
if (info->native_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (info->native_size, 8);
- ainfo->storage = ArgOnStack;
+ ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
g_free (fields);
return;
break;
default:
if (is_return) {
- ainfo->storage = ArgOnStack;
+ ainfo->storage = ArgValuetypeAddrInIReg;
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (info->native_size, 8);
}
/* (8 is size of quad) */
quadsize [quad] = fields [i].offset + size - (quad * 8);
- class1 = merge_argument_class_from_type (gsctx, fields [i].field->type, class1);
+ class1 = merge_argument_class_from_type (fields [i].field->type, class1);
}
g_assert (class1 != ARG_CLASS_NO_CLASS);
args [quad] = class1;
*stack_size += ALIGN_TO (info->native_size, 8);
else
*stack_size += nquads * sizeof(mgreg_t);
- ainfo->storage = ArgOnStack;
+ ainfo->storage = is_return ? ArgValuetypeAddrInIReg : ArgOnStack;
}
}
}
* Draft Version 0.23" document for more information.
*/
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
+get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
guint32 i, gr, fr, pstart;
MonoType *ret_type;
#endif
/* return value */
- ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
+ ret_type = mini_get_underlying_type (sig->ret);
switch (ret_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
cinfo->ret.reg = AMD64_RAX;
break;
}
+ if (mini_is_gsharedvt_type (ret_type)) {
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ break;
+ }
/* fall through */
#if defined( __native_client_codegen__ )
case MONO_TYPE_TYPEDBYREF:
case MONO_TYPE_VALUETYPE: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack) {
- cinfo->vtype_retaddr = TRUE;
- /* The caller passes the address where the value is stored */
- }
+ add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ g_assert (cinfo->ret.storage != ArgInIReg);
break;
}
#if !defined( __native_client_codegen__ )
case MONO_TYPE_TYPEDBYREF:
/* Same as a valuetype with size 24 */
- cinfo->vtype_retaddr = TRUE;
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
break;
#endif
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ g_assert (mini_is_gsharedvt_type (ret_type));
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ break;
case MONO_TYPE_VOID:
break;
default:
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
- if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, &stack_size, cinfo->args + 0);
} else {
pstart = 1;
}
add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
cinfo->vret_arg_index = 1;
} else {
/* this */
if (sig->hasthis)
add_general (&gr, &stack_size, cinfo->args + 0);
- if (cinfo->vtype_retaddr)
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->ret.storage = ArgValuetypeAddrInIReg;
+ }
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
+ ptype = mini_get_underlying_type (sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
add_general (&gr, &stack_size, ainfo);
break;
}
+ if (mini_is_gsharedvt_type (ptype)) {
+ /* gsharedvt arguments are passed by ref */
+ add_general (&gr, &stack_size, ainfo);
+ if (ainfo->storage == ArgInIReg)
+ ainfo->storage = ArgGSharedVtInReg;
+ else
+ ainfo->storage = ArgGSharedVtOnStack;
+ break;
+ }
/* fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
- add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
+ add_valuetype (sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_U8:
case MONO_TYPE_R8:
add_float (&fr, &stack_size, ainfo, TRUE);
break;
+ case MONO_TYPE_VAR:
+ case MONO_TYPE_MVAR:
+ /* gsharedvt arguments are passed by ref */
+ g_assert (mini_is_gsharedvt_type (ptype));
+ add_general (&gr, &stack_size, ainfo);
+ if (ainfo->storage == ArgInIReg)
+ ainfo->storage = ArgGSharedVtInReg;
+ else
+ ainfo->storage = ArgGSharedVtOnStack;
+ break;
default:
g_assert_not_reached ();
}
* Returns the size of the argument area on the stack.
*/
int
-mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
- CallInfo *cinfo = get_call_info (NULL, NULL, csig);
+ CallInfo *cinfo = get_call_info (NULL, csig);
guint32 args_size = cinfo->stack_usage;
/* The arguments are saved to a stack area in mono_arch_instrument_prolog */
gboolean res;
MonoType *callee_ret;
- c1 = get_call_info (NULL, NULL, caller_sig);
- c2 = get_call_info (NULL, NULL, callee_sig);
+ c1 = get_call_info (NULL, caller_sig);
+ c2 = get_call_info (NULL, callee_sig);
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
+ callee_ret = mini_get_underlying_type (callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
void
mono_arch_init (void)
{
- int flags;
-
mono_mutex_init_recursive (&mini_arch_mutex);
#if defined(__native_client_codegen__)
mono_native_tls_alloc (&nacl_instruction_depth, NULL);
mono_native_tls_alloc (&nacl_legacy_prefix_tag, NULL);
#endif
-#ifdef MONO_ARCH_NOMAP32BIT
- flags = MONO_MMAP_READ;
- /* amd64_mov_reg_imm () + amd64_mov_reg_membase () */
- breakpoint_size = 13;
- breakpoint_fault_size = 3;
-#else
- flags = MONO_MMAP_READ|MONO_MMAP_32BIT;
- /* amd64_mov_reg_mem () */
- breakpoint_size = 8;
- breakpoint_fault_size = 8;
-#endif
-
- /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
- single_step_fault_size = 4;
-
- ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
- bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
- mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
-
mono_aot_register_jit_icall ("mono_amd64_throw_exception", mono_amd64_throw_exception);
mono_aot_register_jit_icall ("mono_amd64_throw_corlib_exception", mono_amd64_throw_corlib_exception);
mono_aot_register_jit_icall ("mono_amd64_resume_unwind", mono_amd64_resume_unwind);
mono_aot_register_jit_icall ("mono_amd64_get_original_ip", mono_amd64_get_original_ip);
+
+ if (!mono_aot_only)
+ bp_trampoline = mini_get_breakpoint_trampoline ();
}
/*
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
/*
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
+ sig_ret = mini_get_underlying_type (sig->ret);
/*
* Contrary to mono_arch_allocate_vars (), the information should describe
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig_ret) && !mono_class_from_mono_type (sig_ret)->enumtype) || ((sig_ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
+ if ((MONO_TYPE_ISSTRUCT (sig_ret) && !mono_class_from_mono_type (sig_ret)->enumtype) || ((sig_ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->ret.storage == ArgValuetypeAddrInIReg)) {
cfg->vret_addr->opcode = OP_REGVAR;
cfg->vret_addr->inst_c0 = cinfo->ret.reg;
}
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
+ sig_ret = mini_get_underlying_type (sig->ret);
mono_arch_compute_omit_fp (cfg);
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig_ret) && !mono_class_from_mono_type (sig_ret)->enumtype) || ((sig_ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
- /* The register is volatile */
- cfg->vret_addr->opcode = OP_REGOFFSET;
- cfg->vret_addr->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp) {
- cfg->vret_addr->inst_offset = offset;
- offset += 8;
- } else {
- offset += 8;
- cfg->vret_addr->inst_offset = -offset;
- }
- if (G_UNLIKELY (cfg->verbose_level > 1)) {
- printf ("vret_addr =");
- mono_print_ins (cfg->vret_addr);
- }
+ cfg->ret->opcode = OP_REGVAR;
+ cfg->ret->inst_c0 = cinfo->ret.reg;
+ break;
+ case ArgValuetypeAddrInIReg:
+ /* The register is volatile */
+ cfg->vret_addr->opcode = OP_REGOFFSET;
+ cfg->vret_addr->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->vret_addr->inst_offset = offset;
+ offset += 8;
+ } else {
+ offset += 8;
+ cfg->vret_addr->inst_offset = -offset;
}
- else {
- cfg->ret->opcode = OP_REGVAR;
- cfg->ret->inst_c0 = cinfo->ret.reg;
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr =");
+ mono_print_ins (cfg->vret_addr);
}
break;
case ArgValuetypeInReg:
* are volatile across calls.
* FIXME: Optimize this.
*/
- if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
+ if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg) || (ainfo->storage == ArgGSharedVtInReg))
inreg = FALSE;
ins->opcode = OP_REGOFFSET;
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
+ case ArgGSharedVtInReg:
if (inreg) {
ins->opcode = OP_REGVAR;
ins->dreg = ainfo->reg;
}
break;
case ArgOnStack:
+ case ArgGSharedVtOnStack:
g_assert (!cfg->arch.omit_fp);
ins->opcode = OP_REGOFFSET;
ins->inst_basereg = cfg->frame_reg;
sig = mono_method_signature (cfg->method);
if (!cfg->arch.cinfo)
- cfg->arch.cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cfg->arch.cinfo = get_call_info (cfg->mempool, sig);
cinfo = cfg->arch.cinfo;
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
- if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig_ret)) {
+ sig_ret = mini_get_underlying_type (sig->ret);
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
printf ("vret_addr = ");
MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
-
- ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- ins->flags |= MONO_INST_VOLATILE;
- cfg->arch.ss_tramp_var = ins;
}
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_tramp_var = ins;
- ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
- cfg->arch.ss_trigger_page_var = ins;
+ cfg->arch.bp_tramp_var = ins;
}
if (cfg->method->save_lmf)
return LLVMArgInIReg;
case ArgNone:
return LLVMArgNone;
+ case ArgGSharedVtInReg:
+ case ArgGSharedVtOnStack:
+ return LLVMArgGSharedVt;
default:
g_assert_not_reached ();
return LLVMArgNone;
MonoType *t, *sig_ret;
n = sig->param_count + sig->hasthis;
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
+ sig_ret = mini_get_underlying_type (sig->ret);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]);
}
- if (MONO_TYPE_ISSTRUCT (sig_ret) && cinfo->ret.storage == ArgInIReg) {
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
for (j = 0; j < 2; ++j)
linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
break;
+ case ArgGSharedVtInReg:
+ case ArgGSharedVtOnStack:
+ linfo->args [i].storage = LLVMArgGSharedVt;
+ break;
default:
cfg->exception_message = g_strdup ("ainfo->storage");
cfg->disable_llvm = TRUE;
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
sig_ret = sig->ret;
else
t = sig->params [i - sig->hasthis];
- t = mini_get_underlying_type (cfg, t);
+ t = mini_get_underlying_type (t);
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call) {
if (!t->byref) {
if (t->type == MONO_TYPE_R4)
}
for (i = n - 1; i >= 0; --i) {
+ MonoType *t;
+
ainfo = cinfo->args + i;
in = call->args [i];
+ if (sig->hasthis && i == 0)
+ t = &mono_defaults.object_class->byval_arg;
+ else
+ t = sig->params [i - sig->hasthis];
+ t = mini_get_underlying_type (t);
+
switch (ainfo->storage) {
case ArgInIReg:
/* Already done */
case ArgOnStack:
case ArgValuetypeInReg:
case ArgValuetypeAddrInIReg:
+ case ArgGSharedVtInReg:
+ case ArgGSharedVtOnStack: {
+ if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call)
+ /* Already emitted above */
+ break;
if (ainfo->storage == ArgOnStack && call->tail_call) {
MonoInst *call_inst = (MonoInst*)call;
cfg->args [i]->flags |= MONO_INST_VOLATILE;
EMIT_NEW_ARGSTORE (cfg, call_inst, i, in);
- } else if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
- guint32 align;
- guint32 size;
+ break;
+ }
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_TYPEDBYREF) {
- size = sizeof (MonoTypedRef);
- align = sizeof (gpointer);
- }
+ guint32 align;
+ guint32 size;
+
+ if (t->type == MONO_TYPE_TYPEDBYREF) {
+ size = sizeof (MonoTypedRef);
+ align = sizeof (gpointer);
+ }
+ else {
+ if (sig->pinvoke)
+ size = mono_type_native_stack_size (t, &align);
else {
- if (sig->pinvoke)
- size = mono_type_native_stack_size (&in->klass->byval_arg, &align);
- else {
- /*
- * Other backends use mono_type_stack_size (), but that
- * aligns the size to 8, which is larger than the size of
- * the source, leading to reads of invalid memory if the
- * source is at the end of address space.
- */
- size = mono_class_value_size (in->klass, &align);
- }
+ /*
+ * Other backends use mono_type_stack_size (), but that
+ * aligns the size to 8, which is larger than the size of
+ * the source, leading to reads of invalid memory if the
+ * source is at the end of address space.
+ */
+ size = mono_class_value_size (mono_class_from_mono_type (t), &align);
}
- g_assert (in->klass);
+ }
- if (ainfo->storage == ArgOnStack && size >= 10000) {
- /* Avoid asserts in emit_memcpy () */
- cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
- cfg->exception_message = g_strdup_printf ("Passing an argument of size '%d'.", size);
- /* Continue normally */
- }
+ if (size >= 10000) {
+ /* Avoid asserts in emit_memcpy () */
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Passing an argument of size '%d'.", size);
+ /* Continue normally */
+ }
- if (size > 0) {
- MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
- arg->sreg1 = in->dreg;
- arg->klass = in->klass;
- arg->backend.size = size;
- arg->inst_p0 = call;
- arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
- memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
+ if (size > 0) {
+ MONO_INST_NEW (cfg, arg, OP_OUTARG_VT);
+ arg->sreg1 = in->dreg;
+ arg->klass = mono_class_from_mono_type (t);
+ arg->backend.size = size;
+ arg->inst_p0 = call;
+ arg->inst_p1 = mono_mempool_alloc (cfg->mempool, sizeof (ArgInfo));
+ memcpy (arg->inst_p1, ainfo, sizeof (ArgInfo));
- MONO_ADD_INS (cfg->cbb, arg);
- }
+ MONO_ADD_INS (cfg->cbb, arg);
}
break;
+ }
default:
g_assert_not_reached ();
}
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
- if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) {
- MonoInst *vtarg;
-
- if (cinfo->ret.storage == ArgValuetypeInReg) {
- if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
- /*
- * Tell the JIT to use a more efficient calling convention: call using
- * OP_CALL, compute the result location after the call, and save the
- * result there.
- */
- call->vret_in_reg = TRUE;
- /*
- * Nullify the instruction computing the vret addr to enable
- * future optimizations.
- */
- if (call->vret_var)
- NULLIFY_INS (call->vret_var);
- } else {
- if (call->tail_call)
- NOT_IMPLEMENTED;
- /*
- * The valuetype is in RAX:RDX after the call, need to be copied to
- * the stack. Push the address here, so the call instruction can
- * access it.
- */
- if (!cfg->arch.vret_addr_loc) {
- cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
- /* Prevent it from being register allocated or optimized away */
- ((MonoInst*)cfg->arch.vret_addr_loc)->flags |= MONO_INST_VOLATILE;
- }
-
- MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ((MonoInst*)cfg->arch.vret_addr_loc)->dreg, call->vret_var->dreg);
+ switch (cinfo->ret.storage) {
+ case ArgValuetypeInReg:
+ if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
+ /*
+ * Tell the JIT to use a more efficient calling convention: call using
+ * OP_CALL, compute the result location after the call, and save the
+ * result there.
+ */
+ call->vret_in_reg = TRUE;
+ /*
+ * Nullify the instruction computing the vret addr to enable
+ * future optimizations.
+ */
+ if (call->vret_var)
+ NULLIFY_INS (call->vret_var);
+ } else {
+ if (call->tail_call)
+ NOT_IMPLEMENTED;
+ /*
+ * The valuetype is in RAX:RDX after the call, need to be copied to
+ * the stack. Push the address here, so the call instruction can
+ * access it.
+ */
+ if (!cfg->arch.vret_addr_loc) {
+ cfg->arch.vret_addr_loc = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ /* Prevent it from being register allocated or optimized away */
+ ((MonoInst*)cfg->arch.vret_addr_loc)->flags |= MONO_INST_VOLATILE;
}
- }
- else {
- MONO_INST_NEW (cfg, vtarg, OP_MOVE);
- vtarg->sreg1 = call->vret_var->dreg;
- vtarg->dreg = mono_alloc_preg (cfg);
- MONO_ADD_INS (cfg->cbb, vtarg);
- mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ MONO_EMIT_NEW_UNALU (cfg, OP_MOVE, ((MonoInst*)cfg->arch.vret_addr_loc)->dreg, call->vret_var->dreg);
}
+ break;
+ case ArgValuetypeAddrInIReg: {
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_MOVE);
+ vtarg->sreg1 = call->vret_var->dreg;
+ vtarg->dreg = mono_alloc_preg (cfg);
+ MONO_ADD_INS (cfg->cbb, vtarg);
+
+ mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
+ break;
+ }
+ default:
+ break;
}
if (cfg->method->save_lmf) {
ArgInfo *ainfo = (ArgInfo*)ins->inst_p1;
int size = ins->backend.size;
- if (ainfo->storage == ArgValuetypeInReg) {
+ switch (ainfo->storage) {
+ case ArgValuetypeInReg: {
MonoInst *load;
int part;
add_outarg_reg (cfg, call, ainfo->pair_storage [part], ainfo->pair_regs [part], load);
}
- } else if (ainfo->storage == ArgValuetypeAddrInIReg) {
+ break;
+ }
+ case ArgValuetypeAddrInIReg: {
MonoInst *vtaddr, *load;
vtaddr = mono_compile_create_var (cfg, &ins->klass->byval_arg, OP_LOCAL);
} else {
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, load->dreg);
}
- } else {
+ break;
+ }
+ case ArgGSharedVtInReg:
+ /* Pass by addr */
+ mono_call_inst_add_outarg_reg (cfg, call, src->dreg, ainfo->reg, FALSE);
+ break;
+ case ArgGSharedVtOnStack:
+ g_assert_not_reached ();
+ break;
+ default:
if (size == 8) {
int dreg = mono_alloc_ireg (cfg);
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoType *ret = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
+ MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
if (ret->type == MONO_TYPE_R4) {
if (COMPILE_LLVM (cfg))
ArchDynCallInfo *info;
CallInfo *cinfo;
- cinfo = get_call_info (NULL, NULL, sig);
+ cinfo = get_call_info (NULL, sig);
if (!dyn_call_supported (sig, cinfo)) {
g_free (cinfo);
pindex = 1;
}
- if (dinfo->cinfo->vtype_retaddr)
+ if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg)
p->regs [greg ++] = PTR_TO_GREG(ret);
for (i = pindex; i < sig->param_count; i++) {
- MonoType *t = mini_type_get_underlying_type (NULL, sig->params [i]);
+ MonoType *t = mini_get_underlying_type (sig->params [i]);
gpointer *arg = args [arg_index ++];
if (t->byref) {
MonoMethodSignature *sig = dinfo->sig;
guint8 *ret = ((DynCallArgs*)buf)->ret;
mgreg_t res = ((DynCallArgs*)buf)->res;
- MonoType *sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
+ MonoType *sig_ret = mini_get_underlying_type (sig->ret);
switch (sig_ret->type) {
case MONO_TYPE_VOID:
/* Fall through */
}
case MONO_TYPE_VALUETYPE:
- if (dinfo->cinfo->vtype_retaddr) {
+ if (dinfo->cinfo->ret.storage == ArgValuetypeAddrInIReg) {
/* Nothing to do */
} else {
ArgInfo *ainfo = &dinfo->cinfo->ret;
break;
case OP_COMPARE_IMM:
case OP_LCOMPARE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
#ifndef __native_client_codegen__
/* Don't generate memindex opcodes (to simplify */
/* read sandboxing) */
- if (!amd64_is_imm32 (ins->inst_offset)) {
+ if (!amd64_use_imm32 (ins->inst_offset)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_offset;
temp->dreg = mono_alloc_ireg (cfg);
case OP_STORE_MEMBASE_IMM:
#endif
case OP_STOREI8_MEMBASE_IMM:
- if (!amd64_is_imm32 (ins->inst_imm)) {
+ if (!amd64_use_imm32 (ins->inst_imm)) {
NEW_INS (cfg, ins, temp, OP_I8CONST);
temp->inst_c0 = ins->inst_imm;
temp->dreg = mono_alloc_ireg (cfg);
case OP_FCALL:
case OP_FCALL_REG:
case OP_FCALL_MEMBASE: {
- MonoType *rtype = mini_get_underlying_type (cfg, ((MonoCallInst*)ins)->signature->ret);
+ MonoType *rtype = mini_get_underlying_type (((MonoCallInst*)ins)->signature->ret);
if (rtype->type == MONO_TYPE_R4) {
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
case OP_VCALL2:
case OP_VCALL2_REG:
case OP_VCALL2_MEMBASE:
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, ((MonoCallInst*)ins)->signature);
+ cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature);
if (cinfo->ret.storage == ArgValuetypeInReg) {
MonoInst *loc = cfg->arch.vret_addr_loc;
#ifdef TARGET_MACH
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
- guint8 *ins;
if (inited)
return have_tls_get;
- ins = (guint8*)pthread_getspecific;
+#if MONO_HAVE_FAST_TLS
+ guint8 *ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
ins [8] == 0x00 &&
ins [9] == 0xc3;
- inited = TRUE;
-
tls_gs_offset = ins[5];
+#endif
+
+ inited = TRUE;
return have_tls_get;
#elif defined(TARGET_ANDROID)
#endif
case OP_LOADI8_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 8);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 8);
}
break;
break;
case OP_LOADU4_MEM:
// FIXME: Decompose this earlier
- if (amd64_is_imm32 (ins->inst_imm))
+ if (amd64_use_imm32 (ins->inst_imm))
amd64_mov_reg_mem (code, ins->dreg, ins->inst_imm, 4);
else {
- amd64_mov_reg_imm (code, ins->dreg, ins->inst_imm);
+ amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_imm, sizeof(gpointer));
amd64_mov_reg_membase (code, ins->dreg, ins->dreg, 0, 4);
}
break;
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
break;
case OP_SEQ_POINT: {
- int i;
-
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- if (cfg->compile_aot) {
- MonoInst *var = cfg->arch.ss_tramp_var;
- guint8 *label;
-
- /* Load ss_tramp_var */
- amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- /* Load the trampoline address */
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
- /* Call it if it is non-null */
- amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
- label = code;
- amd64_branch8 (code, X86_CC_Z, 0, FALSE);
- amd64_call_reg (code, AMD64_R11);
- amd64_patch (label, code);
- } else {
- /*
- * Read from the single stepping trigger page. This will cause a
- * SIGSEGV when single stepping is enabled.
- * We do this _before_ the breakpoint, so single stepping after
- * a breakpoint is hit will step to the next IL offset.
- */
- MonoInst *var = cfg->arch.ss_trigger_page_var;
+ MonoInst *var = cfg->arch.ss_tramp_var;
+ guint8 *label;
- amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
- }
+ /* Load ss_tramp_var */
+ /* This is equal to &ss_trampoline */
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ /* Load the trampoline address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ /* Call it if it is non-null */
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_patch (label, code);
}
/*
amd64_call_reg (code, AMD64_R11);
amd64_patch (label, code);
} else {
- /*
- * A placeholder for a possible breakpoint inserted by
- * mono_arch_set_breakpoint ().
+ MonoInst *var = cfg->arch.bp_tramp_var;
+ guint8 *label;
+
+ /*
+ * Emit a test+branch against a constant, the constant will be overwritten
+ * by mono_arch_set_breakpoint () to cause the test to fail.
*/
- for (i = 0; i < breakpoint_size; ++i)
- x86_nop (code);
+ amd64_mov_reg_imm (code, AMD64_R11, 0);
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ /* Load bp_tramp_var */
+ /* This is equal to &bp_trampoline */
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ /* Call the trampoline */
+ amd64_call_membase (code, AMD64_R11, 0);
+ amd64_patch (label, code);
}
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
case OP_ICONST:
case OP_I8CONST:
- if ((((guint64)ins->inst_c0) >> 32) == 0)
+ if ((((guint64)ins->inst_c0) >> 32) == 0 && !mini_get_debug_options()->single_imm_size)
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 4);
else
amd64_mov_reg_imm_size (code, ins->dreg, ins->inst_c0, 8);
jump = code;
amd64_branch8 (code, X86_CC_NZ, -1, 1);
- code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init", FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, "mono_generic_class_init", FALSE);
ins->flags |= MONO_INST_GC_CALLSITE;
ins->backend.pc_offset = code - cfg->native_code;
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
} else {
- /* Initialize ss_trigger_page_var */
- ins = cfg->arch.ss_trigger_page_var;
+ /* Initialize ss_tramp_var */
+ ins = cfg->arch.ss_tramp_var;
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ amd64_mov_reg_imm (code, AMD64_R11, (guint64)&ss_trampoline);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
+ /* Initialize bp_tramp_var */
+ ins = cfg->arch.bp_tramp_var;
g_assert (ins->opcode == OP_REGOFFSET);
- amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ amd64_mov_reg_imm (code, AMD64_R11, (guint64)&bp_trampoline);
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
}
guchar *code = p;
int save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
- MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
+ MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
int i;
switch (ret_type->type) {
gboolean
mono_arch_is_inst_imm (gint64 imm)
{
- return amd64_is_imm32 (imm);
+ return amd64_use_imm32 (imm);
}
/*
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
-get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
+get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
+ GSList *unwind_ops = NULL;
int i;
+ unwind_ops = mono_arch_get_cie_program ();
+
if (has_target) {
start = code = mono_global_codeman_reserve (64);
nacl_global_codeman_validate (&start, 64, &code);
mono_arch_flush_icache (start, code - start);
- if (code_len)
- *code_len = code - start;
+ if (has_target) {
+ *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
+ } else {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
+ *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
+ g_free (name);
+ }
if (mono_jit_map_is_enabled ()) {
char *buff;
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
-get_delegate_virtual_invoke_impl (gboolean load_imt_reg, int offset, guint32 *code_len)
+get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
int size = 20;
+ char *tramp_name;
+ GSList *unwind_ops;
- if (offset / sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
+ if (offset / (int)sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
return NULL;
start = code = mono_global_codeman_reserve (size);
+ unwind_ops = mono_arch_get_cie_program ();
+
/* Replace the this argument with the target */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
amd64_jump_membase (code, AMD64_RAX, offset);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (code_len)
- *code_len = code - start;
+ if (load_imt_reg)
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", - offset / sizeof (gpointer));
+ else
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", offset / sizeof (gpointer));
+ *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
+ g_free (tramp_name);
return start;
}
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
- guint8 *code;
- guint32 code_len;
+ MonoTrampInfo *info;
int i;
- char *tramp_name;
- code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
+ get_delegate_invoke_impl (&info, TRUE, 0);
+ res = g_slist_prepend (res, info);
- for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
- code = get_delegate_invoke_impl (FALSE, i, &code_len);
- tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ get_delegate_invoke_impl (&info, FALSE, i);
+ res = g_slist_prepend (res, info);
}
- for (i = 0; i < MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
- code = get_delegate_virtual_invoke_impl (TRUE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
+ get_delegate_virtual_invoke_impl (&info, TRUE, - i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
- code = get_delegate_virtual_invoke_impl (FALSE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ get_delegate_virtual_invoke_impl (&info, FALSE, i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
}
return res;
return NULL;
/* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (mini_replace_type (sig->ret)))
+ if (MONO_TYPE_ISSTRUCT (mini_get_underlying_type (sig->ret)))
return NULL;
if (has_target) {
if (cached)
return cached;
- if (mono_aot_only)
+ if (mono_aot_only) {
start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
- else
- start = get_delegate_invoke_impl (TRUE, 0, NULL);
+ } else {
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, TRUE, 0);
+ mono_tramp_info_register (info, NULL);
+ }
mono_memory_barrier ();
start = mono_aot_get_trampoline (name);
g_free (name);
} else {
- start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
+ mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
- return get_delegate_virtual_invoke_impl (load_imt_reg, offset, NULL);
+ MonoTrampInfo *info;
+ gpointer code;
+
+ code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
+ if (code)
+ mono_tramp_info_register (info, NULL);
+ return code;
}
void
int size = 0;
guint8 *code, *start;
gboolean vtable_is_32bit = ((gsize)(vtable) == (gsize)(int)(gsize)(vtable));
+ GSList *unwind_ops;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
if (item->check_target_idx) {
if (!item->compare_done) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
}
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
item->chunk_size += CMP_SIZE;
else
item->chunk_size += MOV_REG_IMM_SIZE + CMP_REG_REG_SIZE;
code = mono_domain_code_reserve (domain, size);
#endif
start = code;
+
+ unwind_ops = mono_arch_get_cie_program ();
+
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof(gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
}
#endif
}
} else {
- if (amd64_is_imm32 (item->key))
+ if (amd64_use_imm32 ((gint64)item->key))
amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer));
else {
- amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
+ amd64_mov_reg_imm_size (code, MONO_ARCH_IMT_SCRATCH_REG, item->key, sizeof (gpointer));
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
item->jmp_code = code;
nacl_domain_code_validate(domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
- guint8 *orig_code = code;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
g_assert (info->bp_addrs [native_offset] == 0);
info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline ();
} else {
- /*
- * In production, we will use int3 (has to fix the size in the md
- * file). But that could confuse gdb, so during development, we emit a SIGSEGV
- * instead.
- */
- g_assert (code [0] == 0x90);
- if (breakpoint_size == 8) {
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
- } else {
- amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4);
- }
-
- g_assert (code - orig_code == breakpoint_size);
+ /* ip points to a mov r11, 0 */
+ g_assert (code [0] == 0x41);
+ g_assert (code [1] == 0xbb);
+ amd64_mov_reg_imm (code, AMD64_R11, 1);
}
}
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
guint8 *code = ip;
- int i;
if (ji->from_aot) {
guint32 native_offset = ip - (guint8*)ji->code_start;
info->bp_addrs [native_offset] = NULL;
} else {
- for (i = 0; i < breakpoint_size; ++i)
- x86_nop (code);
+ amd64_mov_reg_imm (code, AMD64_R11, 0);
}
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
-#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord;
- if (einfo->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && (gpointer)einfo->ExceptionInformation [1] == bp_trigger_page)
- return TRUE;
- else
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*) info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128)
- return TRUE;
- else
- return FALSE;
-#endif
+ /* We use soft breakpoints on amd64 */
+ return FALSE;
}
/*
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
- if (ji->from_aot) {
- /* The breakpoint instruction is a call */
- } else {
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
- }
+ g_assert_not_reached ();
}
/*
void
mono_arch_start_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
ss_trampoline = mini_get_single_step_trampoline ();
}
void
mono_arch_stop_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
ss_trampoline = NULL;
}
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
-#ifdef HOST_WIN32
- EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord;
- if (einfo->ExceptionCode == EXCEPTION_ACCESS_VIOLATION && (gpointer)einfo->ExceptionInformation [1] == ss_trigger_page)
- return TRUE;
- else
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*) info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128)
- return TRUE;
- else
- return FALSE;
-#endif
+ /* We use soft breakpoints on amd64 */
+ return FALSE;
}
/*
void
mono_arch_skip_single_step (MonoContext *ctx)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + single_step_fault_size);
+ g_assert_not_reached ();
}
/*
return FALSE;
}
}
+
+#if defined(ENABLE_GSHAREDVT)
+
+#include "../../../mono-extensions/mono/mini/mini-amd64-gsharedvt.c"
+
+#endif /* !MONOTOUCH */