* Zoltan Varga (vargaz@gmail.com)
*
* (C) 2003 Ximian, Inc.
+ * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
+ * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
*/
#include "mini.h"
#include <string.h>
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+/* Structure used by the sequence points in AOTed code */
+typedef struct {
+ gpointer ss_trigger_page;
+ gpointer bp_trigger_page;
+ gpointer bp_addrs [MONO_ZERO_LEN_ARRAY];
+} SeqPointInfo;
+
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
return "unknown";
}
-G_GNUC_UNUSED static void
-break_count (void)
-{
-}
-
-G_GNUC_UNUSED static gboolean
-debug_count (void)
-{
- static int count = 0;
- count ++;
-
- if (!getenv ("COUNT"))
- return TRUE;
-
- if (count == atoi (getenv ("COUNT"))) {
- break_count ();
- }
-
- if (count > atoi (getenv ("COUNT"))) {
- return FALSE;
- }
-
- return TRUE;
-}
-
static gboolean
debug_omit_fp (void)
{
#if 0
- return debug_count ();
+ return mono_debug_count ();
#else
return TRUE;
#endif
break;
}
/* fall through */
+#if defined( __native_client_codegen__ )
+ case MONO_TYPE_TYPEDBYREF:
+#endif
case MONO_TYPE_VALUETYPE: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
}
break;
}
+#if !defined( __native_client_codegen__ )
case MONO_TYPE_TYPEDBYREF:
/* Same as a valuetype with size 24 */
cinfo->vtype_retaddr = TRUE;
break;
+#endif
case MONO_TYPE_VOID:
break;
default:
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
break;
case MONO_TYPE_TYPEDBYREF:
-#ifdef HOST_WIN32
+#if defined( HOST_WIN32 ) || defined( __native_client_codegen__ )
add_valuetype (gsctx, sig, ainfo, sig->params [i], FALSE, &gr, &fr, &stack_size);
#else
stack_size += sizeof (MonoTypedRef);
* Returns the size of the argument area on the stack.
*/
int
-mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
+mono_arch_get_argument_info (MonoGenericSharingContext *gsctx, MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
int k;
CallInfo *cinfo = get_call_info (NULL, NULL, csig);
InitializeCriticalSection (&mini_arch_mutex);
#if defined(__native_client_codegen__)
- mono_native_tls_alloc (nacl_instruction_depth, NULL);
+ mono_native_tls_alloc (&nacl_instruction_depth, NULL);
mono_native_tls_set_value (nacl_instruction_depth, (gpointer)0);
- mono_native_tls_alloc (nacl_rex_tag, NULL);
- mono_native_tls_alloc (nacl_legacy_prefix_tag, NULL);
+ mono_native_tls_alloc (&nacl_rex_tag, NULL);
+ mono_native_tls_alloc (&nacl_legacy_prefix_tag, NULL);
#endif
#ifdef MONO_ARCH_NOMAP32BIT
/* amd64_mov_reg_imm () + amd64_mov_reg_membase () */
breakpoint_size = 13;
breakpoint_fault_size = 3;
- /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
- single_step_fault_size = 5;
#else
flags = MONO_MMAP_READ|MONO_MMAP_32BIT;
/* amd64_mov_reg_mem () */
breakpoint_size = 8;
breakpoint_fault_size = 8;
- single_step_fault_size = 8;
#endif
+ /* amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4); */
+ single_step_fault_size = 4;
+
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), flags);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
* This function returns the optimizations supported on this cpu.
*/
guint32
-mono_arch_cpu_optimizazions (guint32 *exclude_mask)
+mono_arch_cpu_optimizations (guint32 *exclude_mask)
{
int eax, ebx, ecx, edx;
guint32 opts = 0;
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
+ if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || ((sig->ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
cfg->vret_addr->opcode = OP_REGVAR;
cfg->vret_addr->inst_c0 = cinfo->ret.reg;
}
} else {
if (cfg->arch.omit_fp)
cfg->arch.reg_save_area_offset = offset;
- /* Reserve space for caller saved registers */
+ /* Reserve space for callee saved registers */
for (i = 0; i < AMD64_NREG; ++i)
if (AMD64_IS_CALLEE_SAVED_REG (i) && (cfg->used_int_regs & (1 << i))) {
offset += sizeof(mgreg_t);
case ArgInIReg:
case ArgInFloatSSEReg:
case ArgInDoubleSSEReg:
- if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || (sig->ret->type == MONO_TYPE_TYPEDBYREF)) {
+ if ((MONO_TYPE_ISSTRUCT (sig->ret) && !mono_class_from_mono_type (sig->ret)->enumtype) || ((sig->ret->type == MONO_TYPE_TYPEDBYREF) && cinfo->vtype_retaddr)) {
if (cfg->globalra) {
cfg->vret_addr->opcode = OP_REGVAR;
cfg->vret_addr->inst_c0 = cinfo->ret.reg;
cfg->ret->inst_basereg = cfg->frame_reg;
if (cfg->arch.omit_fp) {
cfg->ret->inst_offset = offset;
- offset += 16;
+ offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
} else {
- offset += 16;
+ offset += cinfo->ret.pair_storage [1] == ArgNone ? 8 : 16;
cfg->ret->inst_offset = - offset;
}
break;
if (cfg->gen_seq_points) {
MonoInst *ins;
+ if (cfg->compile_aot) {
+ MonoInst *ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
+ ins->flags |= MONO_INST_VOLATILE;
+ cfg->arch.seq_point_info_var = ins;
+ }
+
ins = mono_compile_create_var (cfg, &mono_defaults.int_class->byval_arg, OP_LOCAL);
ins->flags |= MONO_INST_VOLATILE;
cfg->arch.ss_trigger_page_var = ins;
lmf_var->flags |= MONO_INST_LMF;
cfg->arch.lmf_var = lmf_var;
}
+
+#ifndef MONO_AMD64_NO_PUSHES
+ cfg->arch_eh_jit_info = 1;
+#endif
}
static void
{
MonoInst *arg;
MonoMethodSignature *tmp_sig;
- MonoInst *sig_arg;
+ int sig_reg;
if (call->tail_call)
NOT_IMPLEMENTED;
- /* FIXME: Add support for signature tokens to AOT */
- cfg->disable_aot = TRUE;
-
g_assert (cinfo->sig_cookie.storage == ArgOnStack);
/*
tmp_sig->sentinelpos = 0;
memcpy (tmp_sig->params, call->signature->params + call->signature->sentinelpos, tmp_sig->param_count * sizeof (MonoType*));
- MONO_INST_NEW (cfg, sig_arg, OP_ICONST);
- sig_arg->dreg = mono_alloc_ireg (cfg);
- sig_arg->inst_p0 = tmp_sig;
- MONO_ADD_INS (cfg->cbb, sig_arg);
+ sig_reg = mono_alloc_ireg (cfg);
+ MONO_EMIT_NEW_SIGNATURECONST (cfg, sig_reg, tmp_sig);
if (cfg->arch.no_pushes) {
- MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_arg->dreg);
+ MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_reg);
} else {
MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
- arg->sreg1 = sig_arg->dreg;
+ arg->sreg1 = sig_reg;
MONO_ADD_INS (cfg->cbb, arg);
}
}
}
}
else {
- if (cfg->abs_patches && g_hash_table_lookup (cfg->abs_patches, data)) {
- /*
- * This is not really an optimization, but required because the
- * generic class init trampolines use R11 to pass the vtable.
- */
- near_call = TRUE;
+ MonoJumpInfo *jinfo = NULL;
+
+ if (cfg->abs_patches)
+ jinfo = g_hash_table_lookup (cfg->abs_patches, data);
+ if (jinfo) {
+ if (jinfo->type == MONO_PATCH_INFO_JIT_ICALL_ADDR) {
+ if ((((guint64)data) >> 32) == 0)
+ near_call = TRUE;
+ no_patch = TRUE;
+ } else {
+ /*
+ * This is not really an optimization, but required because the
+ * generic class init trampolines use R11 to pass the vtable.
+ */
+ near_call = TRUE;
+ }
} else {
MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
if (info) {
- if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) &&
- strstr (cfg->method->name, info->name)) {
- /* A call to the wrapped function */
- if ((((guint64)data) >> 32) == 0)
- near_call = TRUE;
- no_patch = TRUE;
- }
- else if (info->func == info->wrapper) {
+ if (info->func == info->wrapper) {
/* No wrapper */
if ((((guint64)info->func) >> 32) == 0)
near_call = TRUE;
#ifdef MONO_ARCH_NOMAP32BIT
near_call = FALSE;
#endif
-
+#if defined(__native_client__)
+ /* Always use near_call == TRUE for Native Client */
+ near_call = TRUE;
+#endif
/* The 64bit XEN kernel does not honour the MAP_32BIT flag. (#522894) */
if (optimize_for_xen)
near_call = FALSE;
#ifdef __APPLE__
static gboolean have_tls_get = FALSE;
static gboolean inited = FALSE;
+ guint8 *ins;
if (inited)
return have_tls_get;
- guint8 *ins = (guint8*)pthread_getspecific;
+ ins = (guint8*)pthread_getspecific;
/*
* We're looking for these two instructions:
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
#ifndef DISABLE_JIT
-
-#if defined(__native_client__) || defined(__native_client_codegen__)
-void mono_nacl_gc()
-{
-#ifdef __native_client_gc__
- __nacl_suspend_thread_if_needed();
-#endif
-}
-#endif
-
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
break;
case OP_COMPARE_IMM:
+#if defined(__mono_ilp32__)
+ /* Comparison of pointer immediates should be 4 bytes to avoid sign-extend problems */
+ g_assert (amd64_is_imm32 (ins->inst_imm));
+ amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
+ break;
+#endif
case OP_LCOMPARE_IMM:
g_assert (amd64_is_imm32 (ins->inst_imm));
amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
case OP_SEQ_POINT: {
int i;
- if (cfg->compile_aot)
- NOT_IMPLEMENTED;
-
/*
* Read from the single stepping trigger page. This will cause a
* SIGSEGV when single stepping is enabled.
* a breakpoint is hit will step to the next IL offset.
*/
if (ins->flags & MONO_INST_SINGLE_STEP_LOC) {
- if (((guint64)ss_trigger_page >> 32) == 0)
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)ss_trigger_page, 4);
- else {
- MonoInst *var = cfg->arch.ss_trigger_page_var;
+ MonoInst *var = cfg->arch.ss_trigger_page_var;
- amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
- amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
- }
+ amd64_mov_reg_membase (code, AMD64_R11, var->inst_basereg, var->inst_offset, 8);
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_R11, 0, 0, 4);
}
/*
* This is the address which is saved in seq points,
- * get_ip_for_single_step () / get_ip_for_breakpoint () needs to compute this
- * from the address of the instruction causing the fault.
*/
mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
- /*
- * A placeholder for a possible breakpoint inserted by
- * mono_arch_set_breakpoint ().
+ if (cfg->compile_aot) {
+ guint32 offset = code - cfg->native_code;
+ guint32 val;
+ MonoInst *info_var = cfg->arch.seq_point_info_var;
+
+ /* Load info var */
+ amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
+ val = ((offset) * sizeof (guint8*)) + G_STRUCT_OFFSET (SeqPointInfo, bp_addrs);
+ /* Load the info->bp_addrs [offset], which is either a valid address or the address of a trigger page */
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, val, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
+ } else {
+ /*
+ * A placeholder for a possible breakpoint inserted by
+ * mono_arch_set_breakpoint ().
+ */
+ for (i = 0; i < breakpoint_size; ++i)
+ x86_nop (code);
+ }
+ /*
+ * Add an additional nop so skipping the bp doesn't cause the ip to point
+ * to another IL offset.
*/
- for (i = 0; i < breakpoint_size; ++i)
- x86_nop (code);
+ x86_nop (code);
break;
}
case OP_ADDCC:
+ case OP_LADDCC:
case OP_LADD:
amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
break;
amd64_alu_reg_imm (code, X86_ADC, ins->dreg, ins->inst_imm);
break;
case OP_SUBCC:
+ case OP_LSUBCC:
case OP_LSUB:
amd64_alu_reg_reg (code, X86_SUB, ins->sreg1, ins->sreg2);
break;
}
case OP_LDIV:
case OP_LREM:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
/* Regalloc magic makes the div/rem cases the same */
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
break;
case OP_LDIV_UN:
case OP_LREM_UN:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
break;
case OP_IDIV:
case OP_IREM:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm (code, X86_CMP, ins->sreg2, 0);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_cdq_size (code, 4);
break;
case OP_IDIV_UN:
case OP_IREM_UN:
+#if defined( __native_client_codegen__ )
+ amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg2, 0, 4);
+ EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, TRUE, "DivideByZeroException");
+#endif
if (ins->sreg2 == AMD64_RDX) {
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RDX, 8);
amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
/* FIXME: no tracing support... */
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
- code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, FALSE);
+ code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, TRUE);
g_assert (!cfg->method->save_lmf);
* done:
*/
- if (value != AMD64_RDX)
- amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
- amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
- if (shifted_nursery_start >> 31) {
- /*
- * The value we need to compare against is 64 bits, so we need
- * another spare register. We use RBX, which we save and
- * restore.
- */
- amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
- amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
- amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
- amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
- } else {
- amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ if (mono_gc_card_table_nursery_check ()) {
+ if (value != AMD64_RDX)
+ amd64_mov_reg_reg (code, AMD64_RDX, value, 8);
+ amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, nursery_shift);
+ if (shifted_nursery_start >> 31) {
+ /*
+ * The value we need to compare against is 64 bits, so we need
+ * another spare register. We use RBX, which we save and
+ * restore.
+ */
+ amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RBX, 8);
+ amd64_mov_reg_imm (code, AMD64_RBX, shifted_nursery_start);
+ amd64_alu_reg_reg (code, X86_CMP, AMD64_RDX, AMD64_RBX);
+ amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, -8, 8);
+ } else {
+ amd64_alu_reg_imm (code, X86_CMP, AMD64_RDX, shifted_nursery_start);
+ }
+ br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
}
- br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
amd64_mov_reg_reg (code, AMD64_RDX, ptr, 8);
amd64_shift_reg_imm (code, X86_SHR, AMD64_RDX, card_table_shift);
if (card_table_mask)
amd64_alu_reg_membase (code, X86_ADD, AMD64_RDX, AMD64_RIP, 0);
amd64_mov_membase_imm (code, AMD64_RDX, 0, 1, 1);
- x86_patch (br, code);
+ if (mono_gc_card_table_nursery_check ())
+ x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
break;
}
case OP_NACL_GC_SAFE_POINT: {
-#if defined(__native_client_codegen__)
- code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+#if defined(__native_client_codegen__) && defined(__native_client_gc__)
+ if (cfg->compile_aot)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+ else {
+ guint8 *br [1];
+
+ amd64_mov_reg_imm_size (code, AMD64_R11, (gpointer)&__nacl_thread_suspension_needed, 4);
+ amd64_test_membase_imm_size (code, AMD64_R11, 0, 0xFFFFFFFF, 4);
+ br[0] = code; x86_branch8 (code, X86_CC_EQ, 0, FALSE);
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc, TRUE);
+ amd64_patch (br[0], code);
+ }
#endif
break;
}
(cfg->rgctx_var->inst_basereg == AMD64_RBP || cfg->rgctx_var->inst_basereg == AMD64_RSP));
amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, sizeof(gpointer));
+
+ mono_add_var_location (cfg, cfg->rgctx_var, TRUE, MONO_ARCH_RGCTX_REG, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, cfg->rgctx_var, FALSE, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, code - cfg->native_code, 0);
}
/* compute max_length in order to use short forward jumps */
size = 8;
*/
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, size);
+
+ /*
+ * Save the original location of 'this',
+ * get_generic_info_from_stack_frame () needs this to properly look up
+ * the argument value during the handling of async exceptions.
+ */
+ if (ins == cfg->args [0]) {
+ mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, ins, FALSE, ins->inst_basereg, ins->inst_offset, code - cfg->native_code, 0);
+ }
break;
}
case ArgInFloatSSEReg:
default:
g_assert_not_reached ();
}
- }
- }
-
- /* Might need to attach the thread to the JIT or change the domain for the callback */
- if (method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
- guint64 domain = (guint64)cfg->domain;
-
- args_clobbered = TRUE;
- /*
- * The call might clobber argument registers, but they are already
- * saved to the stack/global regs.
- */
- if (appdomain_tls_offset != -1 && lmf_tls_offset != -1) {
- guint8 *buf, *no_domain_branch;
-
- code = mono_amd64_emit_tls_get (code, AMD64_RAX, appdomain_tls_offset);
- if (cfg->compile_aot) {
- /* AOT code is only used in the root domain */
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
- } else {
- if ((domain >> 32) == 0)
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
- else
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+ if (ins == cfg->args [0]) {
+ mono_add_var_location (cfg, ins, TRUE, ainfo->reg, 0, 0, code - cfg->native_code);
+ mono_add_var_location (cfg, ins, TRUE, ins->dreg, 0, code - cfg->native_code, 0);
}
- amd64_alu_reg_reg (code, X86_CMP, AMD64_RAX, AMD64_ARG_REG1);
- no_domain_branch = code;
- x86_branch8 (code, X86_CC_NE, 0, 0);
- code = mono_amd64_emit_tls_get ( code, AMD64_RAX, lmf_addr_tls_offset);
- amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
- buf = code;
- x86_branch8 (code, X86_CC_NE, 0, 0);
- amd64_patch (no_domain_branch, code);
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_jit_thread_attach", TRUE);
- amd64_patch (buf, code);
-#ifdef HOST_WIN32
- /* The TLS key actually contains a pointer to the MonoJitTlsData structure */
- /* FIXME: Add a separate key for LMF to avoid this */
- amd64_alu_reg_imm (code, X86_ADD, AMD64_RAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
-#endif
- } else {
- g_assert (!cfg->compile_aot);
- if (cfg->compile_aot) {
- /* AOT code is only used in the root domain */
- amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
- } else {
- if ((domain >> 32) == 0)
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
- else
- amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
- }
- code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
- (gpointer)"mono_jit_thread_attach", TRUE);
}
}
}
}
- /* Initialize ss_trigger_page_var */
- if (cfg->arch.ss_trigger_page_var) {
- MonoInst *var = cfg->arch.ss_trigger_page_var;
+ if (cfg->gen_seq_points) {
+ MonoInst *info_var = cfg->arch.seq_point_info_var;
- g_assert (!cfg->compile_aot);
- g_assert (var->opcode == OP_REGOFFSET);
+ /* Initialize seq_point_info_var */
+ if (cfg->compile_aot) {
+ /* Initialize the variable from a GOT slot */
+ /* Same as OP_AOTCONST */
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_SEQ_POINT_INFO, cfg->method);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, sizeof(gpointer));
+ g_assert (info_var->opcode == OP_REGOFFSET);
+ amd64_mov_membase_reg (code, info_var->inst_basereg, info_var->inst_offset, AMD64_R11, 8);
+ }
+
+ /* Initialize ss_trigger_page_var */
+ ins = cfg->arch.ss_trigger_page_var;
- amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
- amd64_mov_membase_reg (code, var->inst_basereg, var->inst_offset, AMD64_R11, 8);
+ g_assert (ins->opcode == OP_REGOFFSET);
+
+ if (cfg->compile_aot) {
+ amd64_mov_reg_membase (code, AMD64_R11, info_var->inst_basereg, info_var->inst_offset, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, G_STRUCT_OFFSET (SeqPointInfo, ss_trigger_page), 8);
+ } else {
+ amd64_mov_reg_imm (code, AMD64_R11, (guint64)ss_trigger_page);
+ }
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
cfg->code_len = code - cfg->native_code;
int save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
MonoType *ret_type = mini_type_get_underlying_type (NULL, mono_method_signature (method)->ret);
+ int i;
switch (ret_type->type) {
case MONO_TYPE_VOID:
amd64_mov_reg_imm (code, AMD64_RAX, 0);
if (preserve_argument_registers) {
- amd64_push_reg (code, MONO_AMD64_ARG_REG1);
- amd64_push_reg (code, MONO_AMD64_ARG_REG2);
+ for (i = 0; i < PARAM_REGS; ++i)
+ amd64_push_reg (code, param_regs [i]);
}
mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func, TRUE);
if (preserve_argument_registers) {
- amd64_pop_reg (code, MONO_AMD64_ARG_REG2);
- amd64_pop_reg (code, MONO_AMD64_ARG_REG1);
+ for (i = PARAM_REGS - 1; i >= 0; --i)
+ amd64_pop_reg (code, param_regs [i]);
}
/* Restore result */
return start;
}
-
-/*
- * Support for fast access to the thread-local lmf structure using the GS
- * segment register on NPTL + kernel 2.6.x.
- */
-
-static gboolean tls_offset_inited = FALSE;
-
void
-mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
+mono_arch_finish_init (void)
{
- if (!tls_offset_inited) {
#ifdef HOST_WIN32
- /*
- * We need to init this multiple times, since when we are first called, the key might not
- * be initialized yet.
- */
- appdomain_tls_offset = mono_domain_get_tls_key ();
- lmf_tls_offset = mono_get_jit_tls_key ();
- lmf_addr_tls_offset = mono_get_jit_tls_key ();
-
- /* Only 64 tls entries can be accessed using inline code */
- if (appdomain_tls_offset >= 64)
- appdomain_tls_offset = -1;
- if (lmf_tls_offset >= 64)
- lmf_tls_offset = -1;
+ /*
+ * We need to init this multiple times, since when we are first called, the key might not
+ * be initialized yet.
+ */
+ appdomain_tls_offset = mono_domain_get_tls_key ();
+ lmf_tls_offset = mono_get_jit_tls_key ();
+ lmf_addr_tls_offset = mono_get_jit_tls_key ();
+
+ /* Only 64 tls entries can be accessed using inline code */
+ if (appdomain_tls_offset >= 64)
+ appdomain_tls_offset = -1;
+ if (lmf_tls_offset >= 64)
+ lmf_tls_offset = -1;
+ if (lmf_addr_tls_offset >= 64)
+ lmf_addr_tls_offset = -1;
#else
- tls_offset_inited = TRUE;
#ifdef MONO_XEN_OPT
- optimize_for_xen = access ("/proc/xen", F_OK) == 0;
+ optimize_for_xen = access ("/proc/xen", F_OK) == 0;
#endif
- appdomain_tls_offset = mono_domain_get_tls_offset ();
- lmf_tls_offset = mono_get_lmf_tls_offset ();
- lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
+ appdomain_tls_offset = mono_domain_get_tls_offset ();
+ lmf_tls_offset = mono_get_lmf_tls_offset ();
+ lmf_addr_tls_offset = mono_get_lmf_addr_tls_offset ();
#endif
- }
}
void
if (item->check_target_idx || fail_case) {
if (!item->compare_done || fail_case) {
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
/* enable the commented code to assert on wrong method */
#if 0
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof(gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
}
} else {
if (amd64_is_imm32 (item->key))
- amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
+ amd64_alu_reg_imm_size (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key, sizeof (gpointer));
else {
amd64_mov_reg_imm (code, MONO_ARCH_IMT_SCRATCH_REG, item->key);
amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, MONO_ARCH_IMT_SCRATCH_REG);
guint8 *code = ip;
guint8 *orig_code = code;
- /*
- * In production, we will use int3 (has to fix the size in the md
- * file). But that could confuse gdb, so during development, we emit a SIGSEGV
- * instead.
- */
- g_assert (code [0] == 0x90);
- if (breakpoint_size == 8) {
- amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
+ if (ji->from_aot) {
+ guint32 native_offset = ip - (guint8*)ji->code_start;
+ SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
+
+ g_assert (info->bp_addrs [native_offset] == 0);
+ info->bp_addrs [native_offset] = bp_trigger_page;
} else {
- amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8);
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4);
- }
+ /*
+ * In production, we will use int3 (has to fix the size in the md
+ * file). But that could confuse gdb, so during development, we emit a SIGSEGV
+ * instead.
+ */
+ g_assert (code [0] == 0x90);
+ if (breakpoint_size == 8) {
+ amd64_mov_reg_mem (code, AMD64_R11, (guint64)bp_trigger_page, 4);
+ } else {
+ amd64_mov_reg_imm_size (code, AMD64_R11, (guint64)bp_trigger_page, 8);
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 4);
+ }
- g_assert (code - orig_code == breakpoint_size);
+ g_assert (code - orig_code == breakpoint_size);
+ }
}
/*
guint8 *code = ip;
int i;
- for (i = 0; i < breakpoint_size; ++i)
- x86_nop (code);
+ if (ji->from_aot) {
+ guint32 native_offset = ip - (guint8*)ji->code_start;
+ SeqPointInfo *info = mono_arch_get_seq_point_info (mono_domain_get (), ji->code_start);
+
+ g_assert (info->bp_addrs [native_offset] == 0);
+ info->bp_addrs [native_offset] = info;
+ } else {
+ for (i = 0; i < breakpoint_size; ++i)
+ x86_nop (code);
+ }
}
gboolean
#endif
}
-/*
- * mono_arch_get_ip_for_breakpoint:
- *
- * Convert the ip in CTX to the address where a breakpoint was placed.
- */
-guint8*
-mono_arch_get_ip_for_breakpoint (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- /* ip points to the instruction causing the fault */
- ip -= (breakpoint_size - breakpoint_fault_size);
-
- return ip;
-}
-
/*
* mono_arch_skip_breakpoint:
*
* we resume, the instruction is not executed again.
*/
void
-mono_arch_skip_breakpoint (MonoContext *ctx)
+mono_arch_skip_breakpoint (MonoContext *ctx, MonoJitInfo *ji)
{
- MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
+ if (ji->from_aot) {
+ /* amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8) */
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + 3);
+ } else {
+ MONO_CONTEXT_SET_IP (ctx, (guint8*)MONO_CONTEXT_GET_IP (ctx) + breakpoint_fault_size);
+ }
}
/*
#endif
}
-/*
- * mono_arch_get_ip_for_single_step:
- *
- * Convert the ip in CTX to the address stored in seq_points.
- */
-guint8*
-mono_arch_get_ip_for_single_step (MonoJitInfo *ji, MonoContext *ctx)
-{
- guint8 *ip = MONO_CONTEXT_GET_IP (ctx);
-
- ip += single_step_fault_size;
-
- return ip;
-}
-
/*
* mono_arch_skip_single_step:
*
gpointer
mono_arch_get_seq_point_info (MonoDomain *domain, guint8 *code)
{
- NOT_IMPLEMENTED;
- return NULL;
+ SeqPointInfo *info;
+ MonoJitInfo *ji;
+ int i;
+
+ // FIXME: Add a free function
+
+ mono_domain_lock (domain);
+ info = g_hash_table_lookup (domain_jit_info (domain)->arch_seq_points,
+ code);
+ mono_domain_unlock (domain);
+
+ if (!info) {
+ ji = mono_jit_info_table_find (domain, (char*)code);
+ g_assert (ji);
+
+ // FIXME: Optimize the size
+ info = g_malloc0 (sizeof (SeqPointInfo) + (ji->code_size * sizeof (gpointer)));
+
+ info->ss_trigger_page = ss_trigger_page;
+ info->bp_trigger_page = bp_trigger_page;
+ /* Initialize to a valid address */
+ for (i = 0; i < ji->code_size; ++i)
+ info->bp_addrs [i] = info;
+
+ mono_domain_lock (domain);
+ g_hash_table_insert (domain_jit_info (domain)->arch_seq_points,
+ code, info);
+ mono_domain_unlock (domain);
+ }
+
+ return info;
}
#endif