#include <mono/metadata/threads.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/mono-debug.h>
+#include <mono/metadata/gc-internal.h>
#include <mono/utils/mono-math.h>
#include <mono/utils/mono-counters.h>
#include <mono/utils/mono-mmap.h>
#define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
#endif
+#define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI))
+
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
+
+#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentX86;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskX86;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
+
+/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
+/* Check that alignment doesn't cross an alignment boundary. */
+guint8 *
+mono_arch_nacl_pad (guint8 *code, int pad)
+{
+ const int kMaxPadding = 7; /* see x86-codegen.h: x86_padding() */
+
+ if (pad == 0) return code;
+ /* assertion: alignment cannot cross a block boundary */
+ g_assert(((uintptr_t)code & (~kNaClAlignmentMask)) ==
+ (((uintptr_t)code + pad - 1) & (~kNaClAlignmentMask)));
+ while (pad >= kMaxPadding) {
+ x86_padding (code, kMaxPadding);
+ pad -= kMaxPadding;
+ }
+ if (pad != 0) x86_padding (code, pad);
+ return code;
+}
+
+guint8 *
+mono_arch_nacl_skip_nops (guint8 *code)
+{
+ x86_skip_nops (code);
+ return code;
+}
+
+#endif /* __native_client_codegen__ */
+
/*
* The code generated for sequence points reads from this location, which is
* made read-only when single stepping is enabled.
guint32 freg_usage;
gboolean need_stack_align;
guint32 stack_align_amount;
+ gboolean vtype_retaddr;
+ /* The index of the vret arg in the argument list */
+ int vret_arg_index;
ArgInfo ret;
ArgInfo sig_cookie;
ArgInfo args [1];
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig)
{
- guint32 i, gr, fr;
+ guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
+ gboolean is_pinvoke = sig->pinvoke;
gr = 0;
fr = 0;
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
add_valuetype (gsctx, sig, &cinfo->ret, sig->ret, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- if (cinfo->ret.storage == ArgOnStack)
+ if (cinfo->ret.storage == ArgOnStack) {
+ cinfo->vtype_retaddr = TRUE;
/* The caller passes the address where the value is stored */
- add_general (&gr, &stack_size, &cinfo->ret);
+ }
break;
}
case MONO_TYPE_TYPEDBYREF:
- /* Same as a valuetype with size 24 */
- add_general (&gr, &stack_size, &cinfo->ret);
- ;
+ /* Same as a valuetype with size 12 */
+ cinfo->vtype_retaddr = TRUE;
break;
case MONO_TYPE_VOID:
cinfo->ret.storage = ArgNone;
}
}
- /* this */
- if (sig->hasthis)
- add_general (&gr, &stack_size, cinfo->args + 0);
+ pstart = 0;
+ /*
+ * To simplify get_this_arg_reg () and LLVM integration, emit the vret arg after
+ * the first argument, allowing 'this' to be always passed in the first arg reg.
+ * Also do this if the first argument is a reference type, since virtual calls
+ * are sometimes made using calli without sig->hasthis set, like in the delegate
+ * invoke wrappers.
+ */
+ if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
+ if (sig->hasthis) {
+ add_general (&gr, &stack_size, cinfo->args + 0);
+ } else {
+ add_general (&gr, &stack_size, &cinfo->args [sig->hasthis + 0]);
+ pstart = 1;
+ }
+ add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->vret_arg_index = 1;
+ } else {
+ /* this */
+ if (sig->hasthis)
+ add_general (&gr, &stack_size, cinfo->args + 0);
+
+ if (cinfo->vtype_retaddr)
+ add_general (&gr, &stack_size, &cinfo->ret);
+ }
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG) && (n == 0)) {
gr = PARAM_REGS;
add_general (&gr, &stack_size, &cinfo->sig_cookie);
}
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = pstart; i < sig->param_count; ++i) {
ArgInfo *ainfo = &cinfo->args [sig->hasthis + i];
MonoType *ptype;
}
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
{
int n = sig->hasthis + sig->param_count;
CallInfo *cinfo;
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
- return get_call_info_internal (gsctx, cinfo, sig, is_pinvoke);
+ return get_call_info_internal (gsctx, cinfo, sig);
}
/*
*
* Returns the size of the argument area on the stack.
* This should be signal safe, since it is called from
- * mono_arch_find_jit_info_ext ().
+ * mono_arch_find_jit_info ().
* FIXME: The metadata calls might not be signal safe.
*/
int
mono_arch_get_argument_info (MonoMethodSignature *csig, int param_count, MonoJitArgumentInfo *arg_info)
{
- int k, args_size = 0;
+ int len, k, args_size = 0;
int size, pad;
guint32 align;
int offset = 8;
CallInfo *cinfo;
/* Avoid g_malloc as it is not signal safe */
- cinfo = (CallInfo*)g_newa (guint8*, sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1)));
+ len = sizeof (CallInfo) + (sizeof (ArgInfo) * (csig->param_count + 1));
+ cinfo = (CallInfo*)g_newa (guint8*, len);
+ memset (cinfo, 0, len);
+
+ cinfo = get_call_info_internal (NULL, cinfo, csig);
- cinfo = get_call_info_internal (NULL, cinfo, csig, FALSE);
+ arg_info [0].offset = offset;
- if (MONO_TYPE_ISSTRUCT (csig->ret) && (cinfo->ret.storage == ArgOnStack)) {
+ if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) {
args_size += sizeof (gpointer);
offset += 4;
}
- arg_info [0].offset = offset;
-
if (csig->hasthis) {
args_size += sizeof (gpointer);
offset += 4;
}
+ if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && csig->hasthis) {
+ /* Emitted after this */
+ args_size += sizeof (gpointer);
+ offset += 4;
+ }
+
arg_info [0].size = args_size;
for (k = 0; k < param_count; k++) {
offset += pad;
arg_info [k + 1].offset = offset;
offset += size;
+
+ if (k == 0 && cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && !csig->hasthis) {
+ /* Emitted after the first arg */
+ args_size += sizeof (gpointer);
+ offset += 4;
+ }
}
if (mono_do_x86_stack_align && !CALLCONV_IS_STDCALL (csig))
return args_size;
}
+gboolean
+mono_x86_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
+{
+ CallInfo *c1, *c2;
+ gboolean res;
+
+ c1 = get_call_info (NULL, NULL, caller_sig);
+ c2 = get_call_info (NULL, NULL, callee_sig);
+ res = c1->stack_usage >= c2->stack_usage;
+ if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret) && c2->ret.storage != ArgValuetypeInReg)
+ /* An address on the callee's stack is passed as the first argument */
+ res = FALSE;
+
+ g_free (c1);
+ g_free (c2);
+
+ return res;
+}
+
static const guchar cpuid_impl [] = {
0x55, /* push %ebp */
0x89, 0xe5, /* mov %esp,%ebp */
static int
cpuid (int id, int* p_eax, int* p_ebx, int* p_ecx, int* p_edx)
{
+#if defined(__native_client__)
+ /* Taken from below, the bug listed in the comment is */
+ /* only valid for non-static cases. */
+ __asm__ __volatile__ ("cpuid"
+ : "=a" (*p_eax), "=b" (*p_ebx), "=c" (*p_ecx), "=d" (*p_edx)
+ : "a" (id));
+ return 1;
+#else
int have_cpuid = 0;
#ifndef _MSC_VER
__asm__ __volatile__ (
return 1;
}
return 0;
+#endif
}
/*
guint32
mono_arch_cpu_optimizazions (guint32 *exclude_mask)
{
+#if !defined(__native_client__)
int eax, ebx, ecx, edx;
guint32 opts = 0;
#endif
}
return opts;
+#else
+ return MONO_OPT_CMOV | MONO_OPT_FCMOV | MONO_OPT_SSE2;
+#endif
}
/*
header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cfg->frame_reg = X86_EBP;
offset = 0;
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
/*
* EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
* have locals larger than 8 bytes we need to make sure that
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
/* Vtype returned using a hidden argument */
linfo->ret.storage = LLVMArgVtypeRetAddr;
+ linfo->vret_arg_index = cinfo->vret_arg_index;
}
if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage != ArgInIReg) {
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
ArgInfo *ainfo = cinfo->args + i;
MonoType *t;
+ if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 1 && i == 0) {
+ /* Push the vret arg before the first argument */
+ MonoInst *vtarg;
+ MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
+ vtarg->type = STACK_MP;
+ vtarg->sreg1 = call->vret_var->dreg;
+ MONO_ADD_INS (cfg->cbb, vtarg);
+ }
+
if (i >= sig->hasthis)
t = sig->params [i - sig->hasthis];
else
MONO_ADD_INS (cfg->cbb, vtarg);
mono_call_inst_add_outarg_reg (cfg, call, vtarg->dreg, cinfo->ret.reg, FALSE);
- } else {
+ } else if (cinfo->vtype_retaddr && cinfo->vret_arg_index == 0) {
MonoInst *vtarg;
MONO_INST_NEW (cfg, vtarg, OP_X86_PUSH);
vtarg->type = STACK_MP;
sig = mono_method_signature (method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
/* This is the opposite of the code in emit_prolog */
x86_pop_reg (code, X86_EDX); \
x86_pop_reg (code, X86_EAX);
+/* REAL_PRINT_REG does not appear to be used, and was not adapted to work with Native Client. */
+#ifdef __native__client_codegen__
+#define REAL_PRINT_REG(text, reg) g_assert_not_reached()
+#endif
+
/* benchmark and set based on cpu */
#define LOOP_ALIGNMENT 8
#define bb_is_loop_start(bb) ((bb)->loop_body_start && (bb)->nesting)
#ifndef DISABLE_JIT
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void
+mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+ __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
bb->native_offset = cfg->code_len;
}
}
-
+#ifdef __native_client_codegen__
+ {
+ /* For Native Client, all indirect call/jump targets must be */
+ /* 32-byte aligned. Exception handler blocks are jumped to */
+ /* indirectly as well. */
+ gboolean bb_needs_alignment = (bb->flags & BB_INDIRECT_JUMP_TARGET) ||
+ (bb->flags & BB_EXCEPTION_HANDLER);
+
+ /* if ((cfg->code_len & kNaClAlignmentMask) != 0) { */
+ if ( bb_needs_alignment && ((cfg->code_len & kNaClAlignmentMask) != 0)) {
+ int pad = kNaClAlignment - (cfg->code_len & kNaClAlignmentMask);
+ if (pad != kNaClAlignment) code = mono_arch_nacl_pad(code, pad);
+ cfg->code_len += pad;
+ bb->native_offset = cfg->code_len;
+ }
+ }
+#endif /* __native_client_codegen__ */
if (cfg->verbose_level > 2)
g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
mono_debug_open_block (cfg, bb, offset);
+ if (mono_break_at_bb_method && mono_method_desc_full_match (mono_break_at_bb_method, cfg->method) && bb->block_num == mono_break_at_bb_bb_num)
+ x86_breakpoint (code);
+
MONO_BB_FOR_EACH_INS (bb, ins) {
offset = code - cfg->native_code;
max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
- if (G_UNLIKELY (offset > (cfg->code_size - max_len - 16))) {
+#define EXTRA_CODE_SPACE (NACL_SIZE (16, 16 + kNaClAlignment))
+
+ if (G_UNLIKELY (offset > (cfg->code_size - max_len - EXTRA_CODE_SPACE))) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
cfg->disable_aot = TRUE;
break;
}
+ case OP_TAILCALL: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ int pos = 0, i;
+
+ /* FIXME: no tracing support... */
+ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+ code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
+ /* reset offset to make max_len work */
+ offset = code - cfg->native_code;
+
+ g_assert (!cfg->method->save_lmf);
+
+ //code = emit_load_volatile_arguments (cfg, code);
+
+ /* restore callee saved registers */
+ for (i = 0; i < X86_NREG; ++i)
+ if (X86_IS_CALLEE_SAVED_REG (i) && cfg->used_int_regs & (1 << i))
+ pos -= 4;
+ if (cfg->used_int_regs & (1 << X86_ESI)) {
+ x86_mov_reg_membase (code, X86_ESI, X86_EBP, pos, 4);
+ pos += 4;
+ }
+ if (cfg->used_int_regs & (1 << X86_EDI)) {
+ x86_mov_reg_membase (code, X86_EDI, X86_EBP, pos, 4);
+ pos += 4;
+ }
+ if (cfg->used_int_regs & (1 << X86_EBX)) {
+ x86_mov_reg_membase (code, X86_EBX, X86_EBP, pos, 4);
+ pos += 4;
+ }
+
+ /* Copy arguments on the stack to our argument area */
+ for (i = 0; i < call->stack_usage; i += 4) {
+ x86_mov_reg_membase (code, X86_EAX, X86_ESP, i, 4);
+ x86_mov_membase_reg (code, X86_EBP, 8 + i, X86_EAX, 4);
+ }
+
+ /* restore ESP/EBP */
+ x86_leave (code);
+ offset = code - cfg->native_code;
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
+ x86_jump32 (code, 0);
+
+ cfg->disable_aot = TRUE;
+ break;
+ }
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL
* note that cmp DWORD PTR [eax], eax is one byte shorter than
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
/* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
* bytes to pop, we want to use pops. GCC does this (note it won't happen
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
case OP_CALL_MEMBASE:
call = (MonoCallInst*)ins;
- /*
- * Emit a few nops to simplify get_vcall_slot ().
- */
- x86_nop (code);
- x86_nop (code);
- x86_nop (code);
-
x86_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
break;
}
case OP_MEMORY_BARRIER: {
- /* Not needed on x86 */
+ /* http://blogs.sun.com/dave/resource/NHM-Pipeline-Blog-V2.txt */
+ x86_prefix (code, X86_LOCK_PREFIX);
+ x86_alu_membase_imm (code, X86_ADD, X86_ESP, 0, 0);
break;
}
case OP_ATOMIC_ADD_I4: {
break;
}
case OP_ATOMIC_CAS_I4: {
+ g_assert (ins->dreg == X86_EAX);
g_assert (ins->sreg3 == X86_EAX);
g_assert (ins->sreg1 != X86_EAX);
g_assert (ins->sreg1 != ins->sreg2);
x86_prefix (code, X86_LOCK_PREFIX);
x86_cmpxchg_membase_reg (code, ins->sreg1, ins->inst_offset, ins->sreg2);
+ break;
+ }
+ case OP_CARD_TABLE_WBARRIER: {
+ int ptr = ins->sreg1;
+ int value = ins->sreg2;
+ guchar *br;
+ int nursery_shift, card_table_shift;
+ gpointer card_table_mask;
+ size_t nursery_size;
+ gulong card_table = (gulong)mono_gc_get_card_table (&card_table_shift, &card_table_mask);
+ gulong nursery_start = (gulong)mono_gc_get_nursery (&nursery_shift, &nursery_size);
- if (ins->dreg != X86_EAX)
- x86_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
+ /*
+ * We need one register we can clobber, we choose EDX and make sreg1
+ * fixed EAX to work around limitations in the local register allocator.
+ * sreg2 might get allocated to EDX, but that is not a problem since
+ * we use it before clobbering EDX.
+ */
+ g_assert (ins->sreg1 == X86_EAX);
+
+ /*
+ * This is the code we produce:
+ *
+ * edx = value
+ * edx >>= nursery_shift
+ * cmp edx, (nursery_start >> nursery_shift)
+ * jne done
+ * edx = ptr
+ * edx >>= card_table_shift
+ * card_table[edx] = 1
+ * done:
+ */
+
+ if (value != X86_EDX)
+ x86_mov_reg_reg (code, X86_EDX, value, 4);
+ x86_shift_reg_imm (code, X86_SHR, X86_EDX, nursery_shift);
+ x86_alu_reg_imm (code, X86_CMP, X86_EDX, nursery_start >> nursery_shift);
+ br = code; x86_branch8 (code, X86_CC_NE, -1, FALSE);
+ x86_mov_reg_reg (code, X86_EDX, ptr, 4);
+ x86_shift_reg_imm (code, X86_SHR, X86_EDX, card_table_shift);
+ if (card_table_mask)
+ x86_alu_reg_imm (code, X86_AND, X86_EDX, (int)card_table_mask);
+ x86_mov_membase_imm (code, X86_EDX, card_table, 1, 1);
+ x86_patch (br, code);
break;
}
#ifdef MONO_ARCH_SIMD_INTRINSICS
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
+ case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc);
+#endif
+ break;
+ }
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
g_assert_not_reached ();
}
if (G_UNLIKELY ((code - cfg->native_code - offset) > max_len)) {
+#ifndef __native_client_codegen__
g_warning ("wrong maximal instruction length of instruction %s (expected %d, got %d)",
- mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
+ mono_inst_name (ins->opcode), max_len, code - cfg->native_code - offset);
g_assert_not_reached ();
+#endif /* __native_client_codegen__ */
}
cpos += max_len;
case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_EXIT:
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ unsigned char *_target = target;
+ int ret;
+ /* All patch targets modified in x86_patch */
+ /* are IP relative. */
+ _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
+ }
+ else {
+ x86_patch (ip, target);
+ }
+#else
x86_patch (ip, target);
+#endif
break;
case MONO_PATCH_INFO_NONE:
break;
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8: {
+ guint32 offset = mono_arch_get_patch_offset (ip);
+ *((gconstpointer *)(ip + offset)) = target;
+ break;
+ }
default: {
guint32 offset = mono_arch_get_patch_offset (ip);
+#if !defined(__native_client__)
*((gconstpointer *)(ip + offset)) = target;
+#else
+ *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
+#endif
break;
}
}
int alloc_size, pos, max_offset, i, cfa_offset;
guint8 *code;
gboolean need_stack_frame;
+#ifdef __native_client_codegen__
+ guint alignment_check;
+#endif
cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
+#if defined(__default_codegen__)
code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
+ /* native_code_alloc is not 32-byte aligned, native_code is. */
+ cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
+
+ /* Align native_code to next nearest kNaclAlignment byte. */
+ cfg->native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ cfg->native_code = (guint)cfg->native_code & ~kNaClAlignmentMask;
+
+ code = cfg->native_code;
+
+ alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
+ g_assert(alignment_check == 0);
+#endif
/* Offset between RSP and the CFA */
cfa_offset = 0;
mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP);
+ } else {
+ cfg->frame_reg = X86_ESP;
}
alloc_size = cfg->stack_offset;
if (G_UNLIKELY (required_code_size >= (cfg->code_size - offset))) {
while (required_code_size >= (cfg->code_size - offset))
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
code = cfg->native_code + offset;
mono_jit_stats.code_reallocs++;
}
/* max alignment for loops */
if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
max_offset += LOOP_ALIGNMENT;
-
+#ifdef __native_client_codegen__
+ /* max alignment for native client */
+ max_offset += kNaClAlignment;
+#endif
MONO_BB_FOR_EACH_INS (bb, ins) {
if (ins->opcode == OP_LABEL)
ins->inst_c1 = max_offset;
-
+#ifdef __native_client_codegen__
+ {
+ int space_in_block = kNaClAlignment -
+ ((max_offset + cfg->code_len) & kNaClAlignmentMask);
+ int max_len = ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+ if (space_in_block < max_len && max_len < kNaClAlignment) {
+ max_offset += space_in_block;
+ }
+ }
+#endif /* __native_client_codegen__ */
max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
}
}
while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
mono_jit_stats.code_reallocs++;
}
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (cinfo->ret.storage == ArgValuetypeInReg) {
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
while (cfg->code_len + code_size > (cfg->code_size - 16)) {
cfg->code_size *= 2;
- cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+ cfg->native_code = mono_realloc_native_code(cfg);
mono_jit_stats.code_reallocs++;
}
guint32 size;
/* Compute size of code following the push <OFFSET> */
+#if defined(__default_codegen__)
size = 5 + 5;
-
+#elif defined(__native_client_codegen__)
+ code = mono_nacl_align (code);
+ size = kNaClAlignment;
+#endif
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
if ((code - cfg->native_code) - throw_ip < 126 - size) {
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
+#if defined(__default_codegen__)
#define BR_SMALL_SIZE 2
#define BR_LARGE_SIZE 5
+#elif defined(__native_client_codegen__)
+/* I suspect the size calculation below is actually incorrect. */
+/* TODO: fix the calculation that uses these sizes. */
+#define BR_SMALL_SIZE 16
+#define BR_LARGE_SIZE 12
+#endif /*__native_client_codegen__*/
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
}
size += item->chunk_size;
}
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* In Native Client, we don't re-use thunks, allocate from the */
+ /* normal code manager paths. */
+ code = mono_domain_code_reserve (domain, size);
+#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
+#endif
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
g_free (buff);
}
#endif
+ if (mono_jit_map_is_enabled ()) {
+ char *buff;
+ if (vtable)
+ buff = g_strdup_printf ("imt_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
+ else
+ buff = g_strdup_printf ("imt_thunk_entries_%d", count);
+ mono_emit_jit_tramp (start, code - start, buff);
+ g_free (buff);
+ }
+
+ nacl_domain_code_validate (domain, &start, size, &code);
return start;
}
return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
}
+GSList*
+mono_arch_get_cie_program (void)
+{
+ GSList *l = NULL;
+
+ mono_add_unwind_op_def_cfa (l, (guint8*)NULL, (guint8*)NULL, X86_ESP, 4);
+ mono_add_unwind_op_offset (l, (guint8*)NULL, (guint8*)NULL, X86_NREG, -4);
+
+ return l;
+}
+
MonoInst*
mono_arch_emit_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
{
return can_write;
}
-gpointer
-mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
-{
- guint8 buf [8];
- guint8 reg = 0;
- gint32 disp = 0;
-
- mono_breakpoint_clean_code (NULL, code, 8, buf, sizeof (buf));
- code = buf + 8;
-
- *displacement = 0;
-
- code -= 6;
-
- /*
- * A given byte sequence can match more than case here, so we have to be
- * really careful about the ordering of the cases. Longer sequences
- * come first.
- * There are two types of calls:
- * - direct calls: 0xff address_byte 8/32 bits displacement
- * - indirect calls: nop nop nop <call>
- * The nops make sure we don't confuse the instruction preceeding an indirect
- * call with a direct call.
- */
- if ((code [1] != 0xe8) && (code [3] == 0xff) && ((code [4] & 0x18) == 0x10) && ((code [4] >> 6) == 1)) {
- reg = code [4] & 0x07;
- disp = (signed char)code [5];
- } else if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
- reg = code [1] & 0x07;
- disp = *((gint32*)(code + 2));
- } else if ((code [1] == 0xe8)) {
- return NULL;
- } else if ((code [4] == 0xff) && (((code [5] >> 6) & 0x3) == 0) && (((code [5] >> 3) & 0x7) == 2)) {
- /*
- * This is a interface call
- * 8b 40 30 mov 0x30(%eax),%eax
- * ff 10 call *(%eax)
- */
- disp = 0;
- reg = code [5] & 0x07;
- }
- else
- return NULL;
-
- *displacement = disp;
- return (gpointer)regs [reg];
-}
-
/*
* mono_x86_get_this_arg_offset:
*
guint32
mono_x86_get_this_arg_offset (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig)
{
- CallInfo *cinfo = NULL;
- int offset;
-
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-
- offset = cinfo->args [0].offset;
- } else {
- offset = 0;
- }
-
- return offset;
+ return 0;
}
gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig,
- mgreg_t *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (mgreg_t *regs, guint8 *code)
{
guint32 esp = regs [X86_ESP];
CallInfo *cinfo = NULL;
gpointer res;
int offset;
- /*
- * Avoid expensive calls to get_generic_context_from_code () + get_call_info
- * if possible.
- */
- if (MONO_TYPE_ISSTRUCT (sig->ret)) {
- if (!gsctx && code)
- gsctx = mono_get_generic_context_from_code (code);
- cinfo = get_call_info (gsctx, NULL, sig, FALSE);
-
- offset = cinfo->args [0].offset;
- } else {
- offset = 0;
- }
+ offset = 0;
/*
* The stack looks like:
* <other args>
* <this=delegate>
- * <possible vtype return address>
* <return addr>
* <4 pointers pushed by mono_arch_create_trampoline_code ()>
*/
get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
{
guint8 *code, *start;
+ int code_reserve = 64;
/*
* The stack contains:
*/
if (has_target) {
- start = code = mono_global_codeman_reserve (64);
+ start = code = mono_global_codeman_reserve (code_reserve);
/* Replace the this argument with the target */
x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- g_assert ((code - start) < 64);
+ g_assert ((code - start) < code_reserve);
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
- int code_reserve = 8 + (param_count * 8);
-
+#ifdef __native_client_codegen__
+ /* TODO: calculate this size correctly */
+ code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
+#else
+ code_reserve = 8 + (param_count * 8);
+#endif /* __native_client_codegen__ */
/*
* The stack contains:
* <args in reverse order>
g_assert ((code - start) < code_reserve);
}
+ nacl_global_codeman_validate(&start, code_reserve, &code);
mono_debug_add_delegate_trampoline (start, code - start);
if (code_len)
*code_len = code - start;
+ if (mono_jit_map_is_enabled ()) {
+ char *buff;
+ if (has_target)
+ buff = (char*)"delegate_invoke_has_target";
+ else
+ buff = g_strdup_printf ("delegate_invoke_no_target_%d", param_count);
+ mono_emit_jit_tramp (start, code - start, buff);
+ if (!has_target)
+ g_free (buff);
+ }
+
return start;
}