#include <mono/utils/mono-memory-model.h>
#include <mono/utils/mono-tls.h>
#include <mono/utils/mono-hwcap-x86.h>
+#include <mono/utils/mono-threads.h>
#include "trace.h"
#include "ir-emit.h"
#define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
/* Under windows, the calling convention is never stdcall */
#define CALLCONV_IS_STDCALL(call_conv) (FALSE)
#else
/* The size of the single step instruction causing the actual fault */
static int single_step_fault_size;
+/* The single step trampoline */
+static gpointer ss_trampoline;
+
/* Offset between fp and the first argument in the callee */
#define ARGS_OFFSET 16
#define GP_SCRATCH_REG AMD64_R11
#define DEBUG(a) if (cfg->verbose_level > 1) a
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
static AMD64_Reg_No param_regs [] = { AMD64_RCX, AMD64_RDX, AMD64_R8, AMD64_R9 };
static AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX };
}
}
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
#define FLOAT_PARAM_REGS 4
#else
#define FLOAT_PARAM_REGS 8
ptype = mini_type_get_underlying_type (gsctx, type);
switch (ptype->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_CHAR:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
break;
case MONO_TYPE_R4:
case MONO_TYPE_R8:
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
class2 = ARG_CLASS_INTEGER;
#else
class2 = ARG_CLASS_SSE;
klass = mono_class_from_mono_type (type);
size = mini_type_stack_size_full (gsctx, &klass->byval_arg, NULL, sig->pinvoke);
-#ifndef HOST_WIN32
+#ifndef TARGET_WIN32
if (!sig->pinvoke && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
/* We pass and return vtypes of size 8 in a register */
} else if (!sig->pinvoke || (size == 0) || (size > 16)) {
g_assert (info);
g_assert (fields);
-#ifndef HOST_WIN32
+#ifndef TARGET_WIN32
if (info->native_size > 16) {
ainfo->offset = *stack_size;
*stack_size += ALIGN_TO (info->native_size, 8);
gr = 0;
fr = 0;
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
/* Reserve space where the callee can save the argument registers */
stack_size = 4 * sizeof (mgreg_t);
#endif
/* return value */
- {
- ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
- switch (ret_type->type) {
- case MONO_TYPE_BOOLEAN:
- case MONO_TYPE_I1:
- case MONO_TYPE_U1:
- case MONO_TYPE_I2:
- case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
- case MONO_TYPE_I4:
- case MONO_TYPE_U4:
- case MONO_TYPE_I:
- case MONO_TYPE_U:
- case MONO_TYPE_PTR:
- case MONO_TYPE_FNPTR:
- case MONO_TYPE_CLASS:
- case MONO_TYPE_OBJECT:
- case MONO_TYPE_SZARRAY:
- case MONO_TYPE_ARRAY:
- case MONO_TYPE_STRING:
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = AMD64_RAX;
- break;
- case MONO_TYPE_U8:
- case MONO_TYPE_I8:
+ ret_type = mini_type_get_underlying_type (gsctx, sig->ret);
+ switch (ret_type->type) {
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_FNPTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = AMD64_RAX;
+ break;
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I8:
+ cinfo->ret.storage = ArgInIReg;
+ cinfo->ret.reg = AMD64_RAX;
+ break;
+ case MONO_TYPE_R4:
+ cinfo->ret.storage = ArgInFloatSSEReg;
+ cinfo->ret.reg = AMD64_XMM0;
+ break;
+ case MONO_TYPE_R8:
+ cinfo->ret.storage = ArgInDoubleSSEReg;
+ cinfo->ret.reg = AMD64_XMM0;
+ break;
+ case MONO_TYPE_GENERICINST:
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = AMD64_RAX;
break;
- case MONO_TYPE_R4:
- cinfo->ret.storage = ArgInFloatSSEReg;
- cinfo->ret.reg = AMD64_XMM0;
- break;
- case MONO_TYPE_R8:
- cinfo->ret.storage = ArgInDoubleSSEReg;
- cinfo->ret.reg = AMD64_XMM0;
- break;
- case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (ret_type)) {
- cinfo->ret.storage = ArgInIReg;
- cinfo->ret.reg = AMD64_RAX;
- break;
- }
- /* fall through */
+ }
+ /* fall through */
#if defined( __native_client_codegen__ )
- case MONO_TYPE_TYPEDBYREF:
+ case MONO_TYPE_TYPEDBYREF:
#endif
- case MONO_TYPE_VALUETYPE: {
- guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
+ case MONO_TYPE_VALUETYPE: {
+ guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (gsctx, sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack) {
- cinfo->vtype_retaddr = TRUE;
- /* The caller passes the address where the value is stored */
- }
- break;
+ add_valuetype (gsctx, sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ if (cinfo->ret.storage == ArgOnStack) {
+ cinfo->vtype_retaddr = TRUE;
+ /* The caller passes the address where the value is stored */
}
+ break;
+ }
#if !defined( __native_client_codegen__ )
- case MONO_TYPE_TYPEDBYREF:
- /* Same as a valuetype with size 24 */
- cinfo->vtype_retaddr = TRUE;
- break;
+ case MONO_TYPE_TYPEDBYREF:
+ /* Same as a valuetype with size 24 */
+ cinfo->vtype_retaddr = TRUE;
+ break;
#endif
- case MONO_TYPE_VOID:
- break;
- default:
- g_error ("Can't handle as return value 0x%x", ret_type->type);
- }
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", ret_type->type);
}
pstart = 0;
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
/* The float param registers and other param registers must be the same index on Windows x64.*/
if (gr > fr)
fr = gr;
ptype = mini_type_get_underlying_type (gsctx, sig->params [i]);
switch (ptype->type) {
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_I2:
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_I4:
c1 = get_call_info (NULL, NULL, caller_sig);
c2 = get_call_info (NULL, NULL, callee_sig);
res = c1->stack_usage >= c2->stack_usage;
- callee_ret = mini_replace_type (callee_sig->ret);
+ callee_ret = mini_get_underlying_type (cfg, callee_sig->ret);
if (callee_ret && MONO_TYPE_ISSTRUCT (callee_ret) && c2->ret.storage != ArgValuetypeInReg)
/* An address on the callee's stack is passed as the first argument */
res = FALSE;
mono_arch_compute_omit_fp (cfg);
- if (cfg->globalra) {
- if (cfg->arch.omit_fp)
- regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
-
- regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
- regs = g_list_prepend (regs, (gpointer)AMD64_R12);
- regs = g_list_prepend (regs, (gpointer)AMD64_R13);
- regs = g_list_prepend (regs, (gpointer)AMD64_R14);
-#ifndef __native_client_codegen__
- regs = g_list_prepend (regs, (gpointer)AMD64_R15);
-#endif
-
- regs = g_list_prepend (regs, (gpointer)AMD64_R10);
- regs = g_list_prepend (regs, (gpointer)AMD64_R9);
- regs = g_list_prepend (regs, (gpointer)AMD64_R8);
- regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RDX);
- regs = g_list_prepend (regs, (gpointer)AMD64_RCX);
- regs = g_list_prepend (regs, (gpointer)AMD64_RAX);
- } else {
- if (cfg->arch.omit_fp)
- regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
+ if (cfg->arch.omit_fp)
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBP);
- /* We use the callee saved registers for global allocation */
- regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
- regs = g_list_prepend (regs, (gpointer)AMD64_R12);
- regs = g_list_prepend (regs, (gpointer)AMD64_R13);
- regs = g_list_prepend (regs, (gpointer)AMD64_R14);
+ /* We use the callee saved registers for global allocation */
+ regs = g_list_prepend (regs, (gpointer)AMD64_RBX);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R12);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R13);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R14);
#ifndef __native_client_codegen__
- regs = g_list_prepend (regs, (gpointer)AMD64_R15);
+ regs = g_list_prepend (regs, (gpointer)AMD64_R15);
#endif
-#ifdef HOST_WIN32
- regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
- regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
+#ifdef TARGET_WIN32
+ regs = g_list_prepend (regs, (gpointer)AMD64_RDI);
+ regs = g_list_prepend (regs, (gpointer)AMD64_RSI);
#endif
- }
return regs;
}
{
MonoType *sig_ret;
MonoMethodSignature *sig;
- MonoMethodHeader *header;
MonoInst *ins;
int i;
CallInfo *cinfo;
- header = cfg->header;
-
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
/*
* Contrary to mono_arch_allocate_vars (), the information should describe
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = &cinfo->args [i];
- MonoType *arg_type;
ins = cfg->args [i];
- if (sig->hasthis && (i == 0))
- arg_type = &mono_defaults.object_class->byval_arg;
- else
- arg_type = sig->params [i - sig->hasthis];
-
switch (ainfo->storage) {
case ArgInIReg:
case ArgInFloatSSEReg:
{
MonoType *sig_ret;
MonoMethodSignature *sig;
- MonoMethodHeader *header;
MonoInst *ins;
int i, offset;
guint32 locals_stack_size, locals_stack_align;
gint32 *offsets;
CallInfo *cinfo;
- header = cfg->header;
-
sig = mono_method_signature (cfg->method);
cinfo = cfg->arch.cinfo;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
mono_arch_compute_omit_fp (cfg);
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
if ((MONO_TYPE_ISSTRUCT (sig_ret) && !mono_class_from_mono_type (sig_ret)->enumtype) || ((sig_ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
- if (cfg->globalra) {
- cfg->vret_addr->opcode = OP_REGVAR;
- cfg->vret_addr->inst_c0 = cinfo->ret.reg;
+ /* The register is volatile */
+ cfg->vret_addr->opcode = OP_REGOFFSET;
+ cfg->vret_addr->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->vret_addr->inst_offset = offset;
+ offset += 8;
} else {
- /* The register is volatile */
- cfg->vret_addr->opcode = OP_REGOFFSET;
- cfg->vret_addr->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp) {
- cfg->vret_addr->inst_offset = offset;
- offset += 8;
- } else {
- offset += 8;
- cfg->vret_addr->inst_offset = -offset;
- }
- if (G_UNLIKELY (cfg->verbose_level > 1)) {
- printf ("vret_addr =");
- mono_print_ins (cfg->vret_addr);
- }
+ offset += 8;
+ cfg->vret_addr->inst_offset = -offset;
+ }
+ if (G_UNLIKELY (cfg->verbose_level > 1)) {
+ printf ("vret_addr =");
+ mono_print_ins (cfg->vret_addr);
}
}
else {
default:
g_assert_not_reached ();
}
- if (!cfg->globalra)
- cfg->ret->dreg = cfg->ret->inst_c0;
+ cfg->ret->dreg = cfg->ret->inst_c0;
}
/* Allocate locals */
- if (!cfg->globalra) {
- offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
- if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
- char *mname = mono_method_full_name (cfg->method, TRUE);
- cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
- cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
- g_free (mname);
- return;
- }
+ offsets = mono_allocate_stack_slots (cfg, cfg->arch.omit_fp ? FALSE: TRUE, &locals_stack_size, &locals_stack_align);
+ if (locals_stack_size > MONO_ARCH_MAX_FRAME_SIZE) {
+ char *mname = mono_method_full_name (cfg->method, TRUE);
+ cfg->exception_type = MONO_EXCEPTION_INVALID_PROGRAM;
+ cfg->exception_message = g_strdup_printf ("Method %s stack is too big.", mname);
+ g_free (mname);
+ return;
+ }
- if (locals_stack_align) {
- offset += (locals_stack_align - 1);
- offset &= ~(locals_stack_align - 1);
- }
- if (cfg->arch.omit_fp) {
- cfg->locals_min_stack_offset = offset;
- cfg->locals_max_stack_offset = offset + locals_stack_size;
- } else {
- cfg->locals_min_stack_offset = - (offset + locals_stack_size);
- cfg->locals_max_stack_offset = - offset;
- }
+ if (locals_stack_align) {
+ offset += (locals_stack_align - 1);
+ offset &= ~(locals_stack_align - 1);
+ }
+ if (cfg->arch.omit_fp) {
+ cfg->locals_min_stack_offset = offset;
+ cfg->locals_max_stack_offset = offset + locals_stack_size;
+ } else {
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
+ }
- for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
- if (offsets [i] != -1) {
- MonoInst *ins = cfg->varinfo [i];
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- if (cfg->arch.omit_fp)
- ins->inst_offset = (offset + offsets [i]);
- else
- ins->inst_offset = - (offset + offsets [i]);
- //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
- }
+ for (i = cfg->locals_start; i < cfg->num_varinfo; i++) {
+ if (offsets [i] != -1) {
+ MonoInst *ins = cfg->varinfo [i];
+ ins->opcode = OP_REGOFFSET;
+ ins->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp)
+ ins->inst_offset = (offset + offsets [i]);
+ else
+ ins->inst_offset = - (offset + offsets [i]);
+ //printf ("allocated local %d to ", i); mono_print_tree_nl (ins);
}
- offset += locals_stack_size;
}
+ offset += locals_stack_size;
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
g_assert (!cfg->arch.omit_fp);
if (ins->opcode != OP_REGVAR) {
ArgInfo *ainfo = &cinfo->args [i];
gboolean inreg = TRUE;
- MonoType *arg_type;
-
- if (sig->hasthis && (i == 0))
- arg_type = &mono_defaults.object_class->byval_arg;
- else
- arg_type = sig->params [i - sig->hasthis];
-
- if (cfg->globalra) {
- /* The new allocator needs info about the original locations of the arguments */
- switch (ainfo->storage) {
- case ArgInIReg:
- case ArgInFloatSSEReg:
- case ArgInDoubleSSEReg:
- ins->opcode = OP_REGVAR;
- ins->inst_c0 = ainfo->reg;
- break;
- case ArgOnStack:
- g_assert (!cfg->arch.omit_fp);
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- ins->inst_offset = ainfo->offset + ARGS_OFFSET;
- break;
- case ArgValuetypeInReg:
- ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- /* These arguments are saved to the stack in the prolog */
- offset = ALIGN_TO (offset, sizeof(mgreg_t));
- if (cfg->arch.omit_fp) {
- ins->inst_offset = offset;
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
- } else {
- offset += (ainfo->storage == ArgValuetypeInReg) ? ainfo->nregs * sizeof (mgreg_t) : sizeof (mgreg_t);
- ins->inst_offset = - offset;
- }
- break;
- default:
- g_assert_not_reached ();
- }
-
- continue;
- }
/* FIXME: Allocate volatile arguments to registers */
if (ins->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT))
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
if ((cinfo->ret.storage != ArgValuetypeInReg) && MONO_TYPE_ISSTRUCT (sig_ret)) {
cfg->vret_addr = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_ARG);
if (G_UNLIKELY (cfg->verbose_level > 1)) {
}
}
- if (cfg->gen_seq_points_debug_data) {
+ if (cfg->gen_sdb_seq_points) {
MonoInst *ins;
if (cfg->compile_aot) {
MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.seq_point_info_var = ins;
+
+ ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.ss_tramp_var = ins;
}
ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
if (cfg->method->save_lmf) {
cfg->lmf_ir = TRUE;
-#if !defined(HOST_WIN32)
+#if !defined(TARGET_WIN32)
if (mono_get_lmf_tls_offset () != -1 && !optimize_for_xen)
cfg->lmf_ir_mono_lmf = TRUE;
#endif
MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
}
+#ifdef ENABLE_LLVM
static inline LLVMArgStorage
arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage)
{
}
}
-#ifdef ENABLE_LLVM
LLVMCallInfo*
mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
{
MonoType *t, *sig_ret;
n = sig->param_count + sig->hasthis;
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
MonoInst *arg, *in;
MonoMethodSignature *sig;
MonoType *sig_ret;
- int i, n, stack_size;
+ int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
- stack_size = 0;
-
sig = call->signature;
n = sig->param_count + sig->hasthis;
else
t = sig->params [i - sig->hasthis];
+ t = mini_get_underlying_type (cfg, t);
if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call) {
if (!t->byref) {
if (t->type == MONO_TYPE_R4)
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == sig->sentinelpos))
emit_sig_cookie (cfg, call, cinfo);
- sig_ret = mini_replace_type (sig->ret);
+ sig_ret = mini_get_underlying_type (cfg, sig->ret);
if (sig_ret && MONO_TYPE_ISSTRUCT (sig_ret)) {
MonoInst *vtarg;
void
mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
{
- MonoType *ret = mini_replace_type (mono_method_signature (method)->ret);
+ MonoType *ret = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
if (ret->type == MONO_TYPE_R4) {
if (COMPILE_LLVM (cfg))
p->regs [greg ++] = PTR_TO_GREG(ret);
for (i = pindex; i < sig->param_count; i++) {
- MonoType *t = mono_type_get_underlying_type (sig->params [i]);
+ MonoType *t = mini_type_get_underlying_type (NULL, sig->params [i]);
gpointer *arg = args [arg_index ++];
if (t->byref) {
p->regs [greg ++] = *(guint64*)(arg);
break;
#endif
- case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
p->regs [greg ++] = *(guint8*)(arg);
break;
p->regs [greg ++] = *(gint16*)(arg);
break;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
p->regs [greg ++] = *(guint16*)(arg);
break;
case MONO_TYPE_I4:
MonoMethodSignature *sig = dinfo->sig;
guint8 *ret = ((DynCallArgs*)buf)->ret;
mgreg_t res = ((DynCallArgs*)buf)->res;
- MonoType *sig_ret = mono_type_get_underlying_type (sig->ret);
+ MonoType *sig_ret = mini_type_get_underlying_type (NULL, sig->ret);
switch (sig_ret->type) {
case MONO_TYPE_VOID:
*(gint8*)ret = res;
break;
case MONO_TYPE_U1:
- case MONO_TYPE_BOOLEAN:
*(guint8*)ret = res;
break;
case MONO_TYPE_I2:
*(gint16*)ret = res;
break;
case MONO_TYPE_U2:
- case MONO_TYPE_CHAR:
*(guint16*)ret = res;
break;
case MONO_TYPE_I4:
static inline guint8*
emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data, gboolean win64_adjust_stack)
{
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (win64_adjust_stack)
amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 32);
#endif
code = emit_call_body (cfg, code, patch_type, data);
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (win64_adjust_stack)
amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 32);
#endif
int sreg = tree->sreg1;
int need_touch = FALSE;
-#if defined(HOST_WIN32)
+#if defined(TARGET_WIN32)
need_touch = TRUE;
#elif defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
if (!tree->flags & MONO_INST_INIT)
break;
case OP_FCALL:
case OP_FCALL_REG:
- case OP_FCALL_MEMBASE:
- if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
+ case OP_FCALL_MEMBASE: {
+ MonoType *rtype = mini_get_underlying_type (cfg, ((MonoCallInst*)ins)->signature->ret);
+ if (rtype->type == MONO_TYPE_R4) {
amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
else {
amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
}
break;
+ }
case OP_RCALL:
case OP_RCALL_REG:
case OP_RCALL_MEMBASE:
guint8*
mono_amd64_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (tls_offset < 64) {
x86_prefix (code, X86_GS_PREFIX);
amd64_mov_reg_mem (code, dreg, (tls_offset * 8) + 0x1480, 8);
static guint8*
amd64_emit_tls_set (guint8 *code, int sreg, int tls_offset)
{
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
g_assert_not_reached ();
#elif defined(__APPLE__)
x86_prefix (code, X86_GS_PREFIX);
amd64_emit_tls_set_reg (guint8 *code, int sreg, int offset_reg)
{
/* offset_reg contains a value translated by mono_arch_translate_tls_offset () */
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
g_assert_not_reached ();
#elif defined(__APPLE__)
x86_prefix (code, X86_GS_PREFIX);
MonoCallInst *call;
guint offset;
guint8 *code = cfg->native_code + cfg->code_len;
- MonoInst *last_ins = NULL;
- guint last_offset = 0;
int max_len;
/* Fix max_offset estimate for each successor bb */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
- if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
+ if ((cfg->prof_options & MONO_PROFILE_COVERAGE) && cfg->coverage_info) {
MonoProfileCoverageInfo *cov = cfg->coverage_info;
g_assert (!cfg->compile_aot);
case OP_SEQ_POINT: {
int i;
- /*
- * Read from the single stepping trigger page. This will cause a
- * SIGSEGV when single stepping is enabled.
- * We do this _before_ the breakpoint, so single stepping after
- * a breakpoint is hit will step to the next IL offset.
- */
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- MonoInst *var = cfg->arch.ss_trigger_page_var;
+ if (cfg->compile_aot) {
+ MonoInst *var = cfg->arch.ss_tramp_var;
+ guint8 *label;
+
+ /* Load ss_tramp_var */
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ /* Load the trampoline address */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ /* Call it if it is non-null */
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+ amd64_call_reg (code, AMD64_R11);
+ amd64_patch (label, code);
+ } else {
+ /*
+ * Read from the single stepping trigger page. This will cause a
+ * SIGSEGV when single stepping is enabled.
+ * We do this _before_ the breakpoint, so single stepping after
+ * a breakpoint is hit will step to the next IL offset.
+ */
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
- amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
+ }
}
/*
guint32 offset = code - cfg->native_code;
guint32 val;
MonoInst *info_var = cfg->arch.seq_point_info_var;
+ guint8 *label;
/* Load info var */
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
val = ((offset) * sizeof (guint8*)) + MONO_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
- /* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
+ /* Load the info->bp_addrs [offset], which is either NULL or the address of the breakpoint trampoline */
amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
+ label = code;
+ amd64_branch8 (code, X86_CC_Z, 0, FALSE);
+ /* Call the trampoline */
+ amd64_call_reg (code, AMD64_R11);
+ amd64_patch (label, code);
} else {
/*
* A placeholder for a possible breakpoint inserted by
amd64_shift_reg (code, X86_SAR, ins->dreg);
break;
case OP_SHR_IMM:
- g_assert (amd64_is_imm32 (ins->inst_imm));
- amd64_shift_reg_imm_size (code, X86_SAR, ins->dreg, ins->inst_imm, 4);
- break;
case OP_LSHR_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SAR, ins->dreg, ins->inst_imm);
amd64_shift_reg (code, X86_SHR, ins->dreg);
break;
case OP_SHL_IMM:
- g_assert (amd64_is_imm32 (ins->inst_imm));
- amd64_shift_reg_imm_size (code, X86_SHL, ins->dreg, ins->inst_imm, 4);
- break;
case OP_LSHL_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_shift_reg_imm (code, X86_SHL, ins->dreg, ins->inst_imm);
amd64_pop_reg (code, AMD64_RDI);
break;
}
+ case OP_GENERIC_CLASS_INIT: {
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+
+ g_assert (ins->sreg1 == MONO_AMD64_ARG_REG1);
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ amd64_test_membase_imm_size (code, ins->sreg1, byte_offset, bitmask, 1);
+ jump = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+
+ code = emit_call (cfg, code, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init", FALSE);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
+
+ x86_patch (jump, code);
+ break;
+ }
+
case OP_X86_LEA:
amd64_lea_memindex (code, ins->dreg, ins->sreg1, ins->inst_imm, ins->sreg2, ins->backend.shift_amount);
break;
amd64_ret (code);
break;
}
-
+ case OP_GET_EX_OBJ:
+ if (ins->dreg != AMD64_RAX)
+ amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, sizeof (gpointer));
+ break;
case OP_LABEL:
ins->inst_c0 = code - cfg->native_code;
break;
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
- case OP_NACL_GC_SAFE_POINT: {
-#if defined(__native_client_codegen__) && defined(__native_client_gc__)
- if (cfg->compile_aot)
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
- else {
- guint8 *br [1];
+ case OP_GC_SAFE_POINT: {
+ const char *polling_func = NULL;
+ int compare_val = 0;
+ guint8 *br [1];
- amd64_mov_reg_imm_size (code, AMD64_R11, (gpointer)&__nacl_thread_suspension_needed, 4);
- amd64_test_membase_imm_size (code, AMD64_R11, 0, 0xFFFFFFFF, 4);
- br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
- amd64_patch (br[0], code);
- }
+#if defined (USE_COOP_GC)
+ polling_func = "mono_threads_state_poll";
+ compare_val = 1;
+#elif defined(__native_client_codegen__) && defined(__native_client_gc__)
+ polling_func = "mono_nacl_gc";
+ compare_val = 0xFFFFFFFF;
#endif
+ if (!polling_func)
+ break;
+
+ amd64_test_membase_imm_size (code, ins->sreg1, 0, compare_val, 4);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, polling_func, FALSE);
+ amd64_patch (br[0], code);
break;
}
+
case OP_GC_LIVENESS_DEF:
case OP_GC_LIVENESS_USE:
case OP_GC_PARAM_SLOT_LIVENESS_DEF:
g_assert_not_reached ();
#endif
}
-
- last_ins = ins;
- last_offset = offset;
}
cfg->code_len = code - cfg->native_code;
}
void
-mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, MonoCodeManager *dyn_code_mp, gboolean run_cctors)
+mono_arch_patch_code_new (MonoCompile *cfg, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gpointer target)
{
- MonoJumpInfo *patch_info;
- gboolean compile_aot = !run_cctors;
-
- for (patch_info = ji; patch_info; patch_info = patch_info->next) {
- unsigned char *ip = patch_info->ip.i + code;
- unsigned char *target;
+ unsigned char *ip = ji->ip.i + code;
- if (compile_aot) {
- switch (patch_info->type) {
- case MONO_PATCH_INFO_BB:
- case MONO_PATCH_INFO_LABEL:
+ /*
+ * Debug code to help track down problems where the target of a near call is
+ * is not valid.
+ */
+ if (amd64_is_near_call (ip)) {
+ gint64 disp = (guint8*)target - (guint8*)ip;
+
+ if (!amd64_is_imm32 (disp)) {
+ printf ("TYPE: %d\n", ji->type);
+ switch (ji->type) {
+ case MONO_PATCH_INFO_INTERNAL_METHOD:
+ printf ("V: %s\n", ji->data.name);
+ break;
+ case MONO_PATCH_INFO_METHOD_JUMP:
+ case MONO_PATCH_INFO_METHOD:
+ printf ("V: %s\n", ji->data.method->name);
break;
default:
- /* No need to patch these */
- continue;
- }
- }
-
- target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
-
- switch (patch_info->type) {
- case MONO_PATCH_INFO_NONE:
- continue;
- case MONO_PATCH_INFO_METHOD_REL:
- case MONO_PATCH_INFO_R8:
- case MONO_PATCH_INFO_R4:
- g_assert_not_reached ();
- continue;
- case MONO_PATCH_INFO_BB:
- break;
- default:
- break;
- }
-
- /*
- * Debug code to help track down problems where the target of a near call is
- * is not valid.
- */
- if (amd64_is_near_call (ip)) {
- gint64 disp = (guint8*)target - (guint8*)ip;
-
- if (!amd64_is_imm32 (disp)) {
- printf ("TYPE: %d\n", patch_info->type);
- switch (patch_info->type) {
- case MONO_PATCH_INFO_INTERNAL_METHOD:
- printf ("V: %s\n", patch_info->data.name);
- break;
- case MONO_PATCH_INFO_METHOD_JUMP:
- case MONO_PATCH_INFO_METHOD:
- printf ("V: %s\n", patch_info->data.method->name);
- break;
- default:
- break;
- }
+ break;
}
}
-
- amd64_patch (ip, (gpointer)target);
}
+
+ amd64_patch (ip, (gpointer)target);
}
#ifndef DISABLE_JIT
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
mono_emit_unwind_op_offset (cfg, code, AMD64_RBP, - cfa_offset);
async_exc_point (code);
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
mono_arch_unwindinfo_add_push_nonvol (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
/* These are handled automatically by the stack marking code */
amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
mono_emit_unwind_op_def_cfa_reg (cfg, code, AMD64_RBP);
async_exc_point (code);
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
mono_arch_unwindinfo_add_set_fpreg (&cfg->arch.unwindinfo, cfg->native_code, code, AMD64_RBP);
#endif
}
/* Allocate stack frame */
if (alloc_size) {
/* See mono_emit_stack_alloc */
-#if defined(HOST_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+#if defined(TARGET_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
guint32 remaining_size = alloc_size;
/*FIXME handle unbounded code expansion, we should use a loop in case of more than X interactions*/
guint32 required_code_size = ((remaining_size / 0x1000) + 1) * 10; /*10 is the max size of amd64_alu_reg_imm + amd64_test_membase_reg*/
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
}
async_exc_point (code);
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (cfg->arch.omit_fp)
mono_arch_unwindinfo_add_alloc_stack (&cfg->arch.unwindinfo, cfg->native_code, code, 0x1000);
#endif
mono_emit_unwind_op_def_cfa_offset (cfg, code, cfa_offset);
async_exc_point (code);
}
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (cfg->arch.omit_fp)
mono_arch_unwindinfo_add_alloc_stack (&cfg->arch.unwindinfo, cfg->native_code, code, remaining_size);
#endif
/* Keep this in sync with emit_load_volatile_arguments */
for (i = 0; i < sig->param_count + sig->hasthis; ++i) {
ArgInfo *ainfo = cinfo->args + i;
- gint32 stack_offset;
- MonoType *arg_type;
ins = cfg->args [i];
/* Unused arguments */
continue;
- if (sig->hasthis && (i == 0))
- arg_type = &mono_defaults.object_class->byval_arg;
- else
- arg_type = sig->params [i - sig->hasthis];
-
- stack_offset = ainfo->offset + ARGS_OFFSET;
-
- if (cfg->globalra) {
- /* All the other moves are done by the register allocator */
- switch (ainfo->storage) {
- case ArgInFloatSSEReg:
- amd64_sse_cvtss2sd_reg_reg (code, ainfo->reg, ainfo->reg);
- break;
- case ArgValuetypeInReg:
- for (quad = 0; quad < 2; quad ++) {
- switch (ainfo->pair_storage [quad]) {
- case ArgInIReg:
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad], sizeof(mgreg_t));
- break;
- case ArgInFloatSSEReg:
- amd64_movss_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
- break;
- case ArgInDoubleSSEReg:
- amd64_movsd_membase_reg (code, ins->inst_basereg, ins->inst_offset + (quad * sizeof(mgreg_t)), ainfo->pair_regs [quad]);
- break;
- case ArgNone:
- break;
- default:
- g_assert_not_reached ();
- }
- }
- break;
- default:
- break;
- }
-
- continue;
- }
-
/* Save volatile arguments to the stack */
if (ins->opcode != OP_REGVAR) {
switch (ainfo->storage) {
}
}
- if (cfg->gen_seq_points_debug_data) {
+ if (cfg->gen_sdb_seq_points) {
MonoInst *info_var = cfg->arch.seq_point_info_var;
/* Initialize seq_point_info_var */
amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8);
}
- /* Initialize ss_trigger_page_var */
- ins = cfg->arch.ss_trigger_page_var;
-
- g_assert (ins->opcode == OP_REGOFFSET);
-
if (cfg->compile_aot) {
+ /* Initialize ss_tramp_var */
+ ins = cfg->arch.ss_tramp_var;
+ g_assert (ins->opcode == OP_REGOFFSET);
+
amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, MONO_STRUCT_OFFSET (SeqPointInfo, ss_tramp_addr), 8);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
} else {
+ /* Initialize ss_trigger_page_var */
+ ins = cfg->arch.ss_trigger_page_var;
+
+ g_assert (ins->opcode == OP_REGOFFSET);
+
amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
- amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
cfg->code_len = code - cfg->native_code;
mono_arch_emit_epilog (MonoCompile *cfg)
{
MonoMethod *method = cfg->method;
- int quad, pos, i;
+ int quad, i;
guint8 *code;
int max_epilog_size;
CallInfo *cinfo;
code = mono_arch_instrument_epilog (cfg, mono_trace_leave_method, code, TRUE);
/* the code restoring the registers must be kept in sync with OP_TAILCALL */
- pos = 0;
if (method->save_lmf) {
/* check if we need to restore protection of the stack after a stack overflow */
mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
{
guchar *code = p;
- CallInfo *cinfo = NULL;
MonoMethodSignature *sig;
MonoInst *inst;
int i, n, stack_area = 0;
/* Allocate a new area on the stack and save arguments there */
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
-
n = sig->param_count + sig->hasthis;
stack_area = ALIGN_TO (n * 8, 16);
guchar *code = p;
int save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
- MonoType *ret_type = mini_replace_type (mono_method_signature (method)->ret);
+ MonoType *ret_type = mini_get_underlying_type (cfg, mono_method_signature (method)->ret);
int i;
switch (ret_type->type) {
mono_sigctx_to_monoctx (sigctx, &ctx);
- rip = (guint8*)ctx.rip;
+ rip = (guint8*)ctx.gregs [AMD64_RIP];
if (IS_REX (rip [0])) {
reg = amd64_rex_b (rip [0]);
/* idiv REG */
reg += x86_modrm_rm (rip [1]);
- switch (reg) {
- case AMD64_RAX:
- value = ctx.rax;
- break;
- case AMD64_RBX:
- value = ctx.rbx;
- break;
- case AMD64_RCX:
- value = ctx.rcx;
- break;
- case AMD64_RDX:
- value = ctx.rdx;
- break;
- case AMD64_RBP:
- value = ctx.rbp;
- break;
- case AMD64_RSP:
- value = ctx.rsp;
- break;
- case AMD64_RSI:
- value = ctx.rsi;
- break;
- case AMD64_RDI:
- value = ctx.rdi;
- break;
- case AMD64_R12:
- value = ctx.r12;
- break;
- case AMD64_R13:
- value = ctx.r13;
- break;
- case AMD64_R14:
- value = ctx.r14;
- break;
- case AMD64_R15:
- value = ctx.r15;
- break;
- default:
- g_assert_not_reached ();
- reg = -1;
- }
+ value = ctx.gregs [reg];
if (value == -1)
return TRUE;
gboolean
mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guint8 *buf, int size)
{
- int i;
- gboolean can_write = TRUE;
/*
* If method_start is non-NULL we need to perform bound checks, since we access memory
* at code - offset we could go before the start of the method and end up in a different
memset (buf, 0, size);
memcpy (buf + offset - diff, method_start, diff + size - offset);
}
- code -= offset;
- for (i = 0; i < MONO_BREAKPOINT_ARRAY_SIZE; ++i) {
- int idx = mono_breakpoint_info_index [i];
- guint8 *ptr;
- if (idx < 1)
- continue;
- ptr = mono_breakpoint_info [idx].address;
- if (ptr >= code && ptr < code + size) {
- guint8 saved_byte = mono_breakpoint_info [idx].saved_byte;
- can_write = FALSE;
- /*g_print ("patching %p with 0x%02x (was: 0x%02x)\n", ptr, saved_byte, buf [ptr - code]);*/
- buf [ptr - code] = saved_byte;
- }
- }
- return can_write;
+ return TRUE;
}
#if defined(__native_client_codegen__)
/* We have to shift the arguments left */
amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
for (i = 0; i < param_count; ++i) {
-#ifdef HOST_WIN32
+#ifdef TARGET_WIN32
if (i < 3)
amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
else
}
nacl_global_codeman_validate (&start, 64, &code);
+ mono_arch_flush_icache (start, code - start);
if (code_len)
*code_len = code - start;
return start;
}
+#define MAX_VIRTUAL_DELEGATE_OFFSET 32
+
+static gpointer
+get_delegate_virtual_invoke_impl (gboolean load_imt_reg, int offset, guint32 *code_len)
+{
+ guint8 *code, *start;
+ int size = 20;
+
+ if (offset / sizeof (gpointer) > MAX_VIRTUAL_DELEGATE_OFFSET)
+ return NULL;
+
+ start = code = mono_global_codeman_reserve (size);
+
+ /* Replace the this argument with the target */
+ amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
+ amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
+
+ if (load_imt_reg) {
+ /* Load the IMT reg */
+ amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8);
+ }
+
+ /* Load the vtable */
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
+ amd64_jump_membase (code, AMD64_RAX, offset);
+ mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
+
+ if (code_len)
+ *code_len = code - start;
+
+ return start;
+}
+
/*
* mono_arch_get_delegate_invoke_impls:
*
g_free (tramp_name);
}
+ for (i = 0; i < MAX_VIRTUAL_DELEGATE_OFFSET; ++i) {
+ code = get_delegate_virtual_invoke_impl (TRUE, i * SIZEOF_VOID_P, &code_len);
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_imt_%d", i);
+ res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
+ g_free (tramp_name);
+
+ code = get_delegate_virtual_invoke_impl (FALSE, i * SIZEOF_VOID_P, &code_len);
+ tramp_name = g_strdup_printf ("delegate_virtual_invoke_%d", i);
+ res = g_slist_prepend (res, mono_tramp_info_create (tramp_name, code, code_len, NULL, NULL));
+ g_free (tramp_name);
+ }
+
return res;
}
gpointer
mono_arch_get_delegate_virtual_invoke_impl (MonoMethodSignature *sig, MonoMethod *method, int offset, gboolean load_imt_reg)
{
- guint8 *code, *start;
- int size = 20;
-
- start = code = mono_global_codeman_reserve (size);
-
- /* Replace the this argument with the target */
- amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
- amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, target), 8);
-
- if (load_imt_reg) {
- /* Load the IMT reg */
- amd64_mov_reg_membase (code, MONO_ARCH_IMT_REG, AMD64_RAX, MONO_STRUCT_OFFSET (MonoDelegate, method), 8);
- }
-
- /* Load the vtable */
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoObject, vtable), 8);
- amd64_jump_membase (code, AMD64_RAX, offset);
- mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_DELEGATE_INVOKE, NULL);
-
- return start;
+ return get_delegate_virtual_invoke_impl (load_imt_reg, offset, NULL);
}
void
return 0;
}
-#define _CTX_REG(ctx,fld,i) ((&ctx->fld)[i])
-
mgreg_t
mono_arch_context_get_int_reg (MonoContext *ctx, int reg)
{
- switch (reg) {
- case AMD64_RCX: return ctx->rcx;
- case AMD64_RDX: return ctx->rdx;
- case AMD64_RBX: return ctx->rbx;
- case AMD64_RBP: return ctx->rbp;
- case AMD64_RSP: return ctx->rsp;
- default:
- return _CTX_REG (ctx, rax, reg);
- }
+ return ctx->gregs [reg];
}
void
mono_arch_context_set_int_reg (MonoContext *ctx, int reg, mgreg_t val)
{
- switch (reg) {
- case AMD64_RCX:
- ctx->rcx = val;
- break;
- case AMD64_RDX:
- ctx->rdx = val;
- break;
- case AMD64_RBX:
- ctx->rbx = val;
- break;
- case AMD64_RBP:
- ctx->rbp = val;
- break;
- case AMD64_RSP:
- ctx->rsp = val;
- break;
- default:
- _CTX_REG (ctx, rax, reg) = val;
- }
+ ctx->gregs [reg] = val;
}
gpointer
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
g_assert (info->bp_addrs [native_offset] == 0);
- info->bp_addrs [native_offset] = bp_trigger_page;
+ info->bp_addrs [native_offset] = mini_get_breakpoint_trampoline ();
} else {
/*
* In production, we will use int3 (has to fix the size in the md
guint32 native_offset = ip - (guint8*)ji->code_start;
SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
- g_assert (info->bp_addrs [native_offset] == 0);
- info->bp_addrs [native_offset] = info;
+ info->bp_addrs [native_offset] = NULL;
} else {
for (i = 0; i < breakpoint_size; ++i)
x86_nop (code);
mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
if (ji->from_aot) {
- /* amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8) */
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 3);
+ /* The breakpoint instruction is a call */
} else {
MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
}
mono_arch_start_single_stepping (void)
{
mono_mprotect (ss_trigger_page, mono_pagesize (), 0);
+ ss_trampoline = mini_get_single_step_trampoline ();
}
/*
mono_arch_stop_single_stepping (void)
{
mono_mprotect (ss_trigger_page, mono_pagesize (), MONO_MMAP_READ);
+ ss_trampoline = NULL;
}
/*
{
SeqPointInfo *info;
MonoJitInfo *ji;
- int i;
// FIXME: Add a free function
// FIXME: Optimize the size
info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
- info->ss_trigger_page = ss_trigger_page;
- info->bp_trigger_page = bp_trigger_page;
- /* Initialize to a valid address */
- for (i = 0; i < ji->code_size; ++i)
- info->bp_addrs [i] = info;
+ info->ss_tramp_addr = &ss_trampoline;
mono_domain_lock (domain);
g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,