#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-debug.h>
-#include <mono/metadata/gc-internal.h>
+#include <mono/metadata/gc-internals.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-mmap.h>
#endif
#endif
+/* The single step trampoline */
+static gpointer ss_trampoline;
+
+/* The breakpoint trampoline */
+static gpointer bp_trampoline;
+
/* This mutex protects architecture specific caches */
#define mono_mini_arch_lock() mono_mutex_lock (&mini_arch_mutex)
#define mono_mini_arch_unlock() mono_mutex_unlock (&mini_arch_mutex)
#define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI))
-MonoBreakpointInfo
-mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+#define OP_SEQ_POINT_BP_OFFSET 7
static guint8*
emit_load_aotconst (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji, int dreg, int tramp_type, gconstpointer target);
#endif /* __native_client_codegen__ */
-/*
- * The code generated for sequence points reads from this location, which is
- * made read-only when single stepping is enabled.
- */
-static gpointer ss_trigger_page;
-
-/* Enabled breakpoints read from this trigger page */
-static gpointer bp_trigger_page;
-
const char*
mono_arch_regname (int reg)
{
static void
-add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
+add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
guint32 *gr, const guint32 *param_regs, guint32 *fr, guint32 *stack_size)
{
MonoClass *klass;
klass = mono_class_from_mono_type (type);
- size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
+ size = mini_type_stack_size_full (&klass->byval_arg, NULL, sig->pinvoke);
#ifdef SMALL_STRUCTS_IN_REGS
if (sig->pinvoke && is_return) {
/* Special case structs with only a float member */
if (info->num_fields == 1) {
- int ftype = mini_type_get_underlying_type (gsctx, info->fields [0].field->type)->type;
+ int ftype = mini_get_underlying_type (info->fields [0].field->type)->type;
if ((info->native_size == 8) && (ftype == MONO_TYPE_R8)) {
ainfo->storage = ArgValuetypeInReg;
ainfo->pair_storage [0] = ArgOnDoubleFpStack;
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig)
+get_call_info_internal (CallInfo *cinfo, MonoMethodSignature *sig)
{
guint32 i, gr, fr, pstart;
const guint32 *param_regs;
/* return value */
{
- ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
+ ret_type = mini_get_underlying_type (sig->ret);
switch (ret_type->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
cinfo->ret.reg = X86_EAX;
break;
}
- if (mini_is_gsharedvt_type_gsctx (gsctx, ret_type)) {
+ if (mini_is_gsharedvt_type (ret_type)) {
cinfo->ret.storage = ArgOnStack;
cinfo->vtype_retaddr = TRUE;
break;
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, NULL, &tmp_fr, &tmp_stacksize);
+ add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, NULL, &tmp_fr, &tmp_stacksize);
if (cinfo->ret.storage == ArgOnStack) {
cinfo->vtype_retaddr = TRUE;
/* The caller passes the address where the value is stored */
}
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
- g_assert (mini_is_gsharedvt_type_gsctx (gsctx, ret_type));
+ g_assert (mini_is_gsharedvt_type (ret_type));
cinfo->ret.storage = ArgOnStack;
cinfo->vtype_retaddr = TRUE;
break;
* are sometimes made using calli without sig->hasthis set, like in the delegate
* invoke wrappers.
*/
- if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_get_underlying_type (sig->params [0]))))) {
if (sig->hasthis) {
add_general (&gr, param_regs, &stack_size, cinfo->args + 0);
} else {
add_general (&gr, param_regs, &stack_size, ainfo);
continue;
}
- ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
+ ptype = mini_get_underlying_type (sig->params [i]);
switch (ptype->type) {
case MONO_TYPE_I1:
case MONO_TYPE_U1:
add_general (&gr, param_regs, &stack_size, ainfo);
break;
}
- if (mini_is_gsharedvt_type_gsctx (gsctx, ptype)) {
+ if (mini_is_gsharedvt_type (ptype)) {
/* gsharedvt arguments are passed by ref */
add_general (&gr, param_regs, &stack_size, ainfo);
g_assert (ainfo->storage == ArgOnStack);
/* Fall through */
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_TYPEDBYREF:
- add_valuetype (gsctx, sig, ainfo, ptype, FALSE, &gr, param_regs, &fr, &stack_size);
+ add_valuetype (sig, ainfo, ptype, FALSE, &gr, param_regs, &fr, &stack_size);
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
/* gsharedvt arguments are passed by ref */
- g_assert (mini_is_gsharedvt_type_gsctx (gsctx, ptype));
+ g_assert (mini_is_gsharedvt_type (ptype));
add_general (&gr, param_regs, &stack_size, ainfo);
g_assert (ainfo->storage == ArgOnStack);
ainfo->storage = ArgGSharedVt;
}
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
+get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
{
int n = sig->hasthis + sig->param_count;
CallInfo *cinfo;
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
- return get_call_info_internal (gsctx, cinfo, sig);
+ return get_call_info_internal (cinfo, sig);
}
/*
*
* Returns the size of the argument area on the stack.
* This should be signal safe, since it is called from
- * mono_arch_find_jit_info ().
+ * mono_arch_unwind_frame ().
* FIXME: The metadata calls might not be signal safe.
*/
int
-mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int len, k, args_size = 0;
int size, pad;
cinfo = (CallInfo*)g_newa (guint8*, len);
memset (cinfo, 0, len);
- cinfo = get_call_info_internal (gsctx, cinfo, csig);
+ cinfo = get_call_info_internal (cinfo, csig);
arg_info [0].offset = offset;
arg_info [0].size = args_size;
for (k = 0; k < param_count; k++) {
- size = mini_type_stack_size_full (NULL, csig->params [k], &align, csig->pinvoke);
+ size = mini_type_stack_size_full (csig->params [k], &align, csig->pinvoke);
/* ignore alignment for now */
align = 1;
/* OP_TAILCALL doesn't work with AOT */
return FALSE;
- c1 = get_call_info (NULL, NULL, caller_sig);
- c2 = get_call_info (NULL, NULL, callee_sig);
+ c1 = get_call_info (NULL, caller_sig);
+ c2 = get_call_info (NULL, callee_sig);
/*
* Tail calls with more callee stack usage than the caller cannot be supported, since
* the extra stack space would be left on the stack after the tail call.
*/
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
+ callee_ret = mini_get_underlying_type (callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
{
mono_mutex_init_recursive (&mini_arch_mutex);
- ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ);
- bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
- mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+ if (!mono_aot_only)
+ bp_trampoline = mini_get_breakpoint_trampoline ();
mono_aot_register_jit_icall ("mono_x86_throw_exception", mono_x86_throw_exception);
mono_aot_register_jit_icall ("mono_x86_throw_corlib_exception", mono_x86_throw_corlib_exception);
void
mono_arch_cleanup (void)
{
- if (ss_trigger_page)
- mono_vfree (ss_trigger_page, mono_pagesize ());
- if (bp_trigger_page)
- mono_vfree (bp_trigger_page, mono_pagesize ());
mono_mutex_destroy (&mini_arch_mutex);
}
header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
cfg->frame_reg = X86_EBP;
offset = 0;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
+ cinfo = get_call_info (cfg->mempool, sig);
+ sig_ret = mini_get_underlying_type (sig->ret);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
- if ((cinfo->ret.storage != ArgValuetypeInReg) && (MONO_TYPE_ISSTRUCT (sig_ret) || mini_is_gsharedvt_variable_type (cfg, sig_ret))) {
+ if ((cinfo->ret.storage != ArgValuetypeInReg) && (MONO_TYPE_ISSTRUCT (sig_ret) || mini_is_gsharedvt_variable_type (sig_ret))) {
cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
}
+ if (cfg->gen_sdb_seq_points) {
+ MonoInst *ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_tramp_var = ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.bp_tramp_var = ins;
+ }
+
if (cfg->method->save_lmf) {
cfg->create_lmf_var = TRUE;
cfg->lmf_ir = TRUE;
MonoType *t;
for (; start_arg < sig->param_count; ++start_arg) {
- t = mini_replace_type (sig->params [start_arg]);
+ t = mini_get_underlying_type (sig->params [start_arg]);
if (!t->byref && t->type == MONO_TYPE_R8) {
fp_space += sizeof (double);
*fp_arg_setup = start_arg;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
sig_ret = sig->ret;
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
*/
}
- if (mini_type_is_vtype (cfg, sig_ret) && cinfo->ret.storage == ArgInIReg) {
+ if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage == ArgInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
linfo->vret_arg_index = cinfo->vret_arg_index;
}
- if (mini_type_is_vtype (cfg, sig_ret) && cinfo->ret.storage != ArgInIReg) {
+ if (mini_type_is_vtype (sig_ret) && cinfo->ret.storage != ArgInIReg) {
// FIXME:
cfg->exception_message = g_strdup ("vtype ret in call");
cfg->disable_llvm = TRUE;
switch (ainfo->storage) {
case ArgInIReg:
- linfo->args [i].storage = LLVMArgInIReg;
+ linfo->args [i].storage = LLVMArgNormal;
break;
case ArgInDoubleSSEReg:
case ArgInFloatSSEReg:
- linfo->args [i].storage = LLVMArgInFPReg;
+ linfo->args [i].storage = LLVMArgNormal;
break;
case ArgOnStack:
- if (mini_type_is_vtype (cfg, t)) {
+ if (mini_type_is_vtype (t)) {
if (mono_class_value_size (mono_class_from_mono_type (t), NULL) == 0)
/* LLVM seems to allocate argument space for empty structures too */
linfo->args [i].storage = LLVMArgNone;
else
linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
- linfo->args [i].storage = LLVMArgInIReg;
- if (t->byref) {
- if (t->type == MONO_TYPE_R4)
- linfo->args [i].storage = LLVMArgInFPReg;
- else if (t->type == MONO_TYPE_R8)
- linfo->args [i].storage = LLVMArgInFPReg;
- }
+ linfo->args [i].storage = LLVMArgNormal;
}
break;
case ArgValuetypeInReg:
sig = call->signature;
n = sig->param_count + sig->hasthis;
- sig_ret = mini_get_underlying_type (cfg, sig->ret);
+ sig_ret = mini_get_underlying_type (sig->ret);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
call->call_info = cinfo;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
else
t = &mono_defaults.int_class->byval_arg;
orig_type = t;
- t = mini_type_get_underlying_type (cfg->generic_sharing_context, t);
+ t = mini_get_underlying_type (t);
MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
align = sizeof (gpointer);
}
else {
- size = mini_type_stack_size_full (cfg->generic_sharing_context, &in->klass->byval_arg, &align, sig->pinvoke);
+ size = mini_type_stack_size_full (&in->klass->byval_arg, &align, sig->pinvoke);
}
if (size > 0) {
mono_call_inst_add_outarg_reg (cfg, call, dreg, ainfo->reg, FALSE);
}
else {
- if (cfg->gsharedvt && mini_is_gsharedvt_klass (cfg, ins->klass)) {
+ if (cfg->gsharedvt && mini_is_gsharedvt_klass (ins->klass)) {
/* Pass by addr */
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, X86_ESP, ainfo->offset, src->dreg);
} else if (size <= 4) {
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoType *ret = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+ MonoType *ret = mini_get_underlying_type (mono_method_signature (method)->ret);
if (!ret->byref) {
if (ret->type == MONO_TYPE_R4) {
guchar *code = p;
int arg_size = 0, stack_usage = 0, save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
- MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+ MonoType *ret_type = mini_get_underlying_type (mono_method_signature (method)->ret);
switch (ret_type->type) {
case MONO_TYPE_VOID:
#ifdef TARGET_MACH
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
- guint32 *ins;
if (inited)
return have_tls_get;
#ifdef MONO_HAVE_FAST_TLS
+ guint32 *ins;
+
ins = (guint32*)pthread_getspecific;
/*
* We're looking for these two instructions:
if (cfg->compile_aot)
NOT_IMPLEMENTED;
+ /* Have to use ecx as a temp reg since this can occur after OP_SETRET */
+
/*
- * Read from the single stepping trigger page. This will cause a
- * SIGSEGV when single stepping is enabled.
* We do this _before_ the breakpoint, so single stepping after
* a breakpoint is hit will step to the next IL offset.
*/
- if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
- x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)ss_trigger_page);
+ if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
+ MonoInst *var = cfg->arch.ss_tramp_var;
+ guint8 *br [1];
+
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ /* Load ss_tramp_var */
+ /* This is equal to &ss_trampoline */
+ x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, sizeof (mgreg_t));
+ x86_alu_membase_imm (code, X86_CMP, X86_ECX, 0, 0);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+ x86_call_membase (code, X86_ECX, 0);
+ x86_patch (br [0], code);
+ }
+
+ /*
+ * Many parts of sdb depend on the ip after the single step trampoline call to be equal to the seq point offset.
+ * This means we have to put the loading of bp_tramp_var after the offset.
+ */
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
+ MonoInst *var = cfg->arch.bp_tramp_var;
+
+ g_assert (var);
+ g_assert (var->opcode == OP_REGOFFSET);
+ /* Load the address of the bp trampoline */
+ /* This needs to be constant size */
+ guint8 *start = code;
+ x86_mov_reg_membase (code, X86_ECX, var->inst_basereg, var->inst_offset, 4);
+ if (code < start + OP_SEQ_POINT_BP_OFFSET) {
+ int size = start + OP_SEQ_POINT_BP_OFFSET - code;
+ x86_padding (code, size);
+ }
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
*/
- for (i = 0; i < 6; ++i)
+ for (i = 0; i < 2; ++i)
x86_nop (code);
/*
* Add an additional nop so skipping the bp doesn't cause the ip to point
int compare_val = 0;
guint8 *br [1];
-#if defined (USE_COOP_GC)
- polling_func = "mono_threads_state_poll";
- compare_val = 1;
-#elif defined(__native_client_codegen__) && defined(__native_client_gc__)
+#if defined(__native_client_codegen__) && defined(__native_client_gc__)
polling_func = "mono_nacl_gc";
compare_val = 0xFFFFFFFF;
+#else
+ g_assert (mono_threads_is_coop_enabled ());
+ polling_func = "mono_threads_state_poll";
+ compare_val = 1;
#endif
- if (!polling_func)
- break;
x86_test_membase_imm (code, ins->sreg1, 0, compare_val);
br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
case MONO_PATCH_INFO_IP:
*((gconstpointer *)(ip)) = target;
break;
- case MONO_PATCH_INFO_CLASS_INIT: {
- guint8 *code = ip;
- /* Might already been changed to a nop */
- x86_call_code (code, 0);
- x86_patch (ip, (unsigned char*)target);
- break;
- }
case MONO_PATCH_INFO_ABS:
case MONO_PATCH_INFO_METHOD:
case MONO_PATCH_INFO_METHOD_JUMP:
if (mono_jit_trace_calls != NULL && mono_trace_eval (method))
code = mono_arch_instrument_prolog (cfg, mono_trace_enter_method, code, TRUE);
+ {
+ MonoInst *ins;
+
+ if (cfg->arch.ss_tramp_var) {
+ /* Initialize ss_tramp_var */
+ ins = cfg->arch.ss_tramp_var;
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ g_assert (!cfg->compile_aot);
+ x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (guint32)&ss_trampoline, 4);
+ }
+
+ if (cfg->arch.bp_tramp_var) {
+ /* Initialize bp_tramp_var */
+ ins = cfg->arch.bp_tramp_var;
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ g_assert (!cfg->compile_aot);
+ x86_mov_membase_imm (code, ins->inst_basereg, ins->inst_offset, (guint32)&bp_trampoline, 4);
+ }
+ }
+
/* load arguments allocated to register from the stack */
sig = mono_method_signature (method);
pos = 0;
gboolean supported = FALSE;
if (cfg->compile_aot) {
-#if MONO_HAVE_FAST_TLS
+#if defined(MONO_HAVE_FAST_TLS)
supported = TRUE;
#endif
} else if (mono_get_jit_tls_offset () != -1) {
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
+ cinfo = get_call_info (cfg->mempool, sig);
if (cinfo->ret.storage == ArgValuetypeInReg) {
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
if (CALLCONV_IS_STDCALL (sig)) {
MonoJitArgumentInfo *arg_info = alloca (sizeof (MonoJitArgumentInfo) * (sig->param_count + 1));
- stack_to_pop = mono_arch_get_argument_info (NULL, sig, sig->param_count, arg_info);
+ stack_to_pop = mono_arch_get_argument_info (sig, sig->param_count, arg_info);
} else if (cinfo->callee_stack_pop)
stack_to_pop = cinfo->callee_stack_pop;
else
int i;
int size = 0;
guint8 *code, *start;
+ GSList *unwind_ops;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
code = mono_domain_code_reserve (domain, size);
#endif
start = code;
+
+ unwind_ops = mono_arch_get_cie_program ();
+
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
item->code_target = code;
nacl_domain_code_validate (domain, &start, size, &code);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
+ mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
+
return start;
}
* call.
*/
guint32
-mono_x86_get_this_arg_offset (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig)
+mono_x86_get_this_arg_offset (MonoMethodSignature *sig)
{
return 0;
}
#define MAX_ARCH_DELEGATE_PARAMS 10
static gpointer
-get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
+get_delegate_invoke_impl (MonoTrampInfo **info, gboolean has_target, guint32 param_count)
{
guint8 *code, *start;
int code_reserve = 64;
+ GSList *unwind_ops;
+
+ unwind_ops = mono_arch_get_cie_program ();
/*
* The stack contains:
nacl_global_codeman_validate (&start, code_reserve, &code);
- if (code_len)
- *code_len = code - start;
+ if (has_target) {
+ *info = mono_tramp_info_create ("delegate_invoke_impl_has_target", start, code - start, NULL, unwind_ops);
+ } else {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", param_count);
+ *info = mono_tramp_info_create (name, start, code - start, NULL, unwind_ops);
+ g_free (name);
+ }
if (mono_jit_map_is_enabled ()) {
char *buff;
#define MAX_VIRTUAL_DELEGATE_OFFSET 32
static gpointer
-get_delegate_virtual_invoke_impl (gboolean load_imt_reg, int offset, guint32 *code_size)
+get_delegate_virtual_invoke_impl (MonoTrampInfo **info, gboolean load_imt_reg, int offset)
{
guint8 *code, *start;
int size = 24;
+ char *tramp_name;
+ GSList *unwind_ops;
+
+ if (offset / (int)sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
+ return NULL;
/*
* The stack contains:
*/
start = code = mono_global_codeman_reserve (size);
+ unwind_ops = mono_arch_get_cie_program ();
+
/* Replace the this argument with the target */
x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
x86_mov_reg_membase (code, X86_ECX, X86_EAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 4);
x86_jump_membase (code, X86_EAX, offset);
mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
- if (code_size)
- *code_size = code - start;
+ if (load_imt_reg)
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", - offset / sizeof (gpointer));
+ else
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", offset / sizeof (gpointer));
+ *info = mono_tramp_info_create (tramp_name, start, code - start, NULL, unwind_ops);
+ g_free (tramp_name);
+
return start;
}
mono_arch_get_delegate_invoke_impls (void)
{
GSList *res = NULL;
- guint8 *code;
- guint32 code_len;
+ MonoTrampInfo *info;
int i;
- char *tramp_name;
- code = get_delegate_invoke_impl (TRUE, 0, &code_len);
- res = g_slist_prepend (res, mono_tramp_info_create ("delegate_invoke_impl_has_target", code, code_len, NULL, NULL));
+ get_delegate_invoke_impl (&info, TRUE, 0);
+ res = g_slist_prepend (res, info);
- for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
- code = get_delegate_invoke_impl (FALSE, i, &code_len);
- tramp_name = g_strdup_printf ("delegate_invoke_impl_target_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ get_delegate_invoke_impl (&info, FALSE, i);
+ res = g_slist_prepend (res, info);
}
- for (i = 0; i < MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
- code = get_delegate_virtual_invoke_impl (TRUE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ for (i = 0; i <= MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
+ get_delegate_virtual_invoke_impl (&info, TRUE, - i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
- code = get_delegate_virtual_invoke_impl (FALSE, i * SIZEOF_VOID_P, &code_len);
- tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", i);
- res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
- g_free (tramp_name);
+ get_delegate_virtual_invoke_impl (&info, FALSE, i * SIZEOF_VOID_P);
+ res = g_slist_prepend (res, info);
}
return res;
if (cached)
return cached;
- if (mono_aot_only)
+ if (mono_aot_only) {
start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
- else
- start = get_delegate_invoke_impl (TRUE, 0, NULL);
+ } else {
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, TRUE, 0);
+ mono_tramp_info_register (info, NULL);
+ }
mono_memory_barrier ();
start = mono_aot_get_trampoline (name);
g_free (name);
} else {
- start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ MonoTrampInfo *info;
+ start = get_delegate_invoke_impl (&info, FALSE, sig->param_count);
+ mono_tramp_info_register (info, NULL);
}
mono_memory_barrier ();
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
- return get_delegate_virtual_invoke_impl (load_imt_reg, offset, NULL);
+ MonoTrampInfo *info;
+ gpointer code;
+
+ code = get_delegate_virtual_invoke_impl (&info, load_imt_reg, offset);
+ if (code)
+ mono_tramp_info_register (info, NULL);
+ return code;
}
mgreg_t
return tramps;
}
-
-#if __APPLE__
-#define DBG_SIGNAL SIGBUS
-#else
-#define DBG_SIGNAL SIGSEGV
-#endif
-
/* Soft Debug support */
#ifdef MONO_ARCH_SOFT_DEBUG_SUPPORTED
void
mono_arch_set_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
- guint8 *code = ip;
+ guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET;
- /*
- * In production, we will use int3 (has to fix the size in the md
- * file). But that could confuse gdb, so during development, we emit a SIGSEGV
- * instead.
- */
g_assert (code [0] == 0x90);
- x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)bp_trigger_page);
+ x86_call_membase (code, X86_ECX, 0);
}
/*
void
mono_arch_clear_breakpoint (MonoJitInfo *ji, guint8 *ip)
{
- guint8 *code = ip;
+ guint8 *code = ip + OP_SEQ_POINT_BP_OFFSET;
int i;
- for (i = 0; i < 6; ++i)
+ for (i = 0; i < 2; ++i)
x86_nop (code);
}
void
mono_arch_start_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+ ss_trampoline = mini_get_single_step_trampoline ();
}
/*
void
mono_arch_stop_single_stepping (void)
{
- mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+ ss_trampoline = NULL;
}
/*
gboolean
mono_arch_is_single_step_event (void *info, void *sigctx)
{
-#ifdef TARGET_WIN32
- EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord; /* Sometimes the address is off by 4 */
-
- if (((gpointer)einfo->ExceptionInformation[1] >= ss_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)ss_trigger_page + 128))
- return TRUE;
- else
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*) info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= ss_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)ss_trigger_page + 128))
- return TRUE;
- else
- return FALSE;
-#endif
+ /* We use soft breakpoints */
+ return FALSE;
}
gboolean
mono_arch_is_breakpoint_event (void *info, void *sigctx)
{
-#ifdef TARGET_WIN32
- EXCEPTION_RECORD* einfo = ((EXCEPTION_POINTERS*)info)->ExceptionRecord; /* Sometimes the address is off by 4 */
- if (((gpointer)einfo->ExceptionInformation[1] >= bp_trigger_page && (guint8*)einfo->ExceptionInformation[1] <= (guint8*)bp_trigger_page + 128))
- return TRUE;
- else
- return FALSE;
-#else
- siginfo_t* sinfo = (siginfo_t*)info;
- /* Sometimes the address is off by 4 */
- if (sinfo->si_signo == DBG_SIGNAL && (sinfo->si_addr >= bp_trigger_page && (guint8*)sinfo->si_addr <= (guint8*)bp_trigger_page + 128))
- return TRUE;
- else
- return FALSE;
-#endif
+ /* We use soft breakpoints */
+ return FALSE;
}
-#define BREAKPOINT_SIZE 6
+#define BREAKPOINT_SIZE 2
/*
* mono_arch_skip_breakpoint:
void
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + BREAKPOINT_SIZE);
+ g_assert_not_reached ();
}
/*
void
mono_arch_skip_single_step (MonoContext *ctx)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 6);
+ g_assert_not_reached ();
}
/*