#define CALLCONV_IS_STDCALL(sig) (((sig)->call_convention) == MONO_CALL_STDCALL)
#endif
+#define X86_IS_CALLEE_SAVED_REG(reg) (((reg) == X86_EBX) || ((reg) == X86_EDI) || ((reg) == X86_ESI))
+
MonoBreakpointInfo
mono_breakpoint_info [MONO_BREAKPOINT_ARRAY_SIZE];
-static gpointer
-mono_realloc_native_code (MonoCompile *cfg)
-{
-#ifdef __native_client_codegen__
- guint old_padding;
- gpointer native_code;
- guint alignment_check;
-
- /* Save the old alignment offset so we can re-align after the realloc. */
- old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
-
- cfg->native_code_alloc = g_realloc (cfg->native_code_alloc,
- cfg->code_size + kNaClAlignment);
-
- /* Align native_code to next nearest kNaClAlignment byte. */
- native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
- native_code = (guint)native_code & ~kNaClAlignmentMask;
-
- /* Shift the data to be 32-byte aligned again. */
- memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
-
- alignment_check = (guint)native_code & kNaClAlignmentMask;
- g_assert (alignment_check == 0);
- return native_code;
-#else
- return g_realloc (cfg->native_code, cfg->code_size);
-#endif
-}
#ifdef __native_client_codegen__
+const guint kNaClAlignment = kNaClAlignmentX86;
+const guint kNaClAlignmentMask = kNaClAlignmentMaskX86;
+
+/* Default alignment for Native Client is 32-byte. */
+gint8 nacl_align_byte = -32; /* signed version of 0xe0 */
/* mono_arch_nacl_pad: Add pad bytes of alignment instructions at code, */
/* Check that alignment doesn't cross an alignment boundary. */
* For x86 win32, see ???.
*/
static CallInfo*
-get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info_internal (MonoGenericSharingContext *gsctx, CallInfo *cinfo, MonoMethodSignature *sig)
{
guint32 i, gr, fr, pstart;
MonoType *ret_type;
int n = sig->hasthis + sig->param_count;
guint32 stack_size = 0;
+ gboolean is_pinvoke = sig->pinvoke;
gr = 0;
fr = 0;
}
static CallInfo*
-get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig, gboolean is_pinvoke)
+get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSignature *sig)
{
int n = sig->hasthis + sig->param_count;
CallInfo *cinfo;
else
cinfo = g_malloc0 (sizeof (CallInfo) + (sizeof (ArgInfo) * n));
- return get_call_info_internal (gsctx, cinfo, sig, is_pinvoke);
+ return get_call_info_internal (gsctx, cinfo, sig);
}
/*
cinfo = (CallInfo*)g_newa (guint8*, len);
memset (cinfo, 0, len);
- cinfo = get_call_info_internal (NULL, cinfo, csig, FALSE);
+ cinfo = get_call_info_internal (NULL, cinfo, csig);
arg_info [0].offset = offset;
return args_size;
}
+gboolean
+mono_x86_tail_call_supported (MonoMethodSignature *caller_sig, MonoMethodSignature *callee_sig)
+{
+ CallInfo *c1, *c2;
+ gboolean res;
+
+ c1 = get_call_info (NULL, NULL, caller_sig);
+ c2 = get_call_info (NULL, NULL, callee_sig);
+ res = c1->stack_usage >= c2->stack_usage;
+ if (callee_sig->ret && MONO_TYPE_ISSTRUCT (callee_sig->ret) && c2->ret.storage != ArgValuetypeInReg)
+ /* An address on the callee's stack is passed as the first argument */
+ res = FALSE;
+
+ g_free (c1);
+ g_free (c2);
+
+ return res;
+}
+
static const guchar cpuid_impl [] = {
0x55, /* push %ebp */
0x89, 0xe5, /* mov %esp,%ebp */
header = cfg->header;
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
cfg->frame_reg = X86_EBP;
offset = 0;
offset += (locals_stack_align - 1);
offset &= ~(locals_stack_align - 1);
}
+ cfg->locals_min_stack_offset = - (offset + locals_stack_size);
+ cfg->locals_max_stack_offset = - offset;
/*
* EBP is at alignment 8 % MONO_ARCH_FRAME_ALIGNMENT, so if we
* have locals larger than 8 bytes we need to make sure that
sig = mono_method_signature (cfg->method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (cinfo->ret.storage == ArgValuetypeInReg)
cfg->ret_var_is_local = TRUE;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
sig = call->signature;
n = sig->param_count + sig->hasthis;
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG))
sentinelpos = sig->sentinelpos + (sig->hasthis ? 1 : 0);
sig = mono_method_signature (method);
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
/* This is the opposite of the code in emit_prolog */
#ifndef DISABLE_JIT
+#if defined(__native_client__) || defined(__native_client_codegen__)
+void
+mono_nacl_gc()
+{
+#ifdef __native_client_gc__
+ __nacl_suspend_thread_if_needed();
+#endif
+}
+#endif
+
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
{
cfg->disable_aot = TRUE;
break;
}
+ case OP_TAILCALL: {
+ MonoCallInst *call = (MonoCallInst*)ins;
+ int pos = 0, i;
+
+ /* FIXME: no tracing support... */
+ if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+ code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
+ /* reset offset to make max_len work */
+ offset = code - cfg->native_code;
+
+ g_assert (!cfg->method->save_lmf);
+
+ //code = emit_load_volatile_arguments (cfg, code);
+
+ /* restore callee saved registers */
+ for (i = 0; i < X86_NREG; ++i)
+ if (X86_IS_CALLEE_SAVED_REG (i) && cfg->used_int_regs & (1 << i))
+ pos -= 4;
+ if (cfg->used_int_regs & (1 << X86_ESI)) {
+ x86_mov_reg_membase (code, X86_ESI, X86_EBP, pos, 4);
+ pos += 4;
+ }
+ if (cfg->used_int_regs & (1 << X86_EDI)) {
+ x86_mov_reg_membase (code, X86_EDI, X86_EBP, pos, 4);
+ pos += 4;
+ }
+ if (cfg->used_int_regs & (1 << X86_EBX)) {
+ x86_mov_reg_membase (code, X86_EBX, X86_EBP, pos, 4);
+ pos += 4;
+ }
+
+ /* Copy arguments on the stack to our argument area */
+ for (i = 0; i < call->stack_usage; i += 4) {
+ x86_mov_reg_membase (code, X86_EAX, X86_ESP, i, 4);
+ x86_mov_membase_reg (code, X86_EBP, 8 + i, X86_EAX, 4);
+ }
+
+ /* restore ESP/EBP */
+ x86_leave (code);
+ offset = code - cfg->native_code;
+ mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
+ x86_jump32 (code, 0);
+
+ cfg->disable_aot = TRUE;
+ break;
+ }
case OP_CHECK_THIS:
/* ensure ins->sreg1 is not NULL
* note that cmp DWORD PTR [eax], eax is one byte shorter than
code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
else
code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
/* a pop is one byte, while an add reg, imm is 3. So if there are 4 or 8
* bytes to pop, we want to use pops. GCC does this (note it won't happen
case OP_CALL_REG:
call = (MonoCallInst*)ins;
x86_call_reg (code, ins->sreg1);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
call = (MonoCallInst*)ins;
x86_call_membase (code, ins->sreg1, ins->inst_offset);
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature)) {
if (call->stack_usage == 4)
x86_pop_reg (code, X86_ECX);
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_throw_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_RETHROW: {
x86_push_reg (code, ins->sreg1);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
(gpointer)"mono_arch_rethrow_exception");
+ ins->flags |= MONO_INST_GC_CALLSITE;
+ ins->backend.pc_offset = code - cfg->native_code;
break;
}
case OP_CALL_HANDLER:
x86_cmpxchg_membase_reg (code, ins->sreg1, ins->inst_offset, ins->sreg2);
break;
}
-#ifdef HAVE_SGEN_GC
case OP_CARD_TABLE_WBARRIER: {
int ptr = ins->sreg1;
int value = ins->sreg2;
x86_patch (br, code);
break;
}
-#endif
#ifdef MONO_ARCH_SIMD_INTRINSICS
case OP_ADDPS:
x86_sse_alu_ps_reg_reg (code, X86_SSE_ADD, ins->sreg1, ins->sreg2);
MONO_VARINFO (cfg, ins->inst_c0)->live_range_end = code - cfg->native_code;
break;
}
+ case OP_NACL_GC_SAFE_POINT: {
+#if defined(__native_client_codegen__)
+ code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)mono_nacl_gc);
+#endif
+ break;
+ }
+ case OP_GC_LIVENESS_DEF:
+ case OP_GC_LIVENESS_USE:
+ case OP_GC_PARAM_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ break;
+ case OP_GC_SPILL_SLOT_LIVENESS_DEF:
+ ins->backend.pc_offset = code - cfg->native_code;
+ bb->spill_slot_defs = g_slist_prepend_mempool (cfg->mempool, bb->spill_slot_defs, ins);
+ break;
default:
g_warning ("unknown opcode %s\n", mono_inst_name (ins->opcode));
g_assert_not_reached ();
case MONO_PATCH_INFO_GENERIC_CLASS_INIT:
case MONO_PATCH_INFO_MONITOR_ENTER:
case MONO_PATCH_INFO_MONITOR_EXIT:
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (nacl_is_code_address (code)) {
+ /* For tail calls, code is patched after being installed */
+ /* but not through the normal "patch callsite" method. */
+ unsigned char buf[kNaClAlignment];
+ unsigned char *aligned_code = (uintptr_t)code & ~kNaClAlignmentMask;
+ unsigned char *_target = target;
+ int ret;
+ /* All patch targets modified in x86_patch */
+ /* are IP relative. */
+ _target = _target + (uintptr_t)buf - (uintptr_t)aligned_code;
+ memcpy (buf, aligned_code, kNaClAlignment);
+ /* Patch a temp buffer of bundle size, */
+ /* then install to actual location. */
+ x86_patch (buf + ((uintptr_t)code - (uintptr_t)aligned_code), _target);
+ ret = nacl_dyncode_modify (aligned_code, buf, kNaClAlignment);
+ g_assert (ret == 0);
+ }
+ else {
+ x86_patch (ip, target);
+ }
+#else
x86_patch (ip, target);
+#endif
break;
case MONO_PATCH_INFO_NONE:
break;
+ case MONO_PATCH_INFO_R4:
+ case MONO_PATCH_INFO_R8: {
+ guint32 offset = mono_arch_get_patch_offset (ip);
+ *((gconstpointer *)(ip + offset)) = target;
+ break;
+ }
default: {
guint32 offset = mono_arch_get_patch_offset (ip);
+#if !defined(__native_client__)
*((gconstpointer *)(ip + offset)) = target;
+#else
+ *((gconstpointer *)(ip + offset)) = nacl_modify_patch_target (target);
+#endif
break;
}
}
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ code = cfg->native_code = g_malloc (cfg->code_size);
+#elif defined(__native_client_codegen__)
/* native_code_alloc is not 32-byte aligned, native_code is. */
cfg->native_code_alloc = g_malloc (cfg->code_size + kNaClAlignment);
alignment_check = (guint)cfg->native_code & kNaClAlignmentMask;
g_assert(alignment_check == 0);
-#else
- code = cfg->native_code = g_malloc (cfg->code_size);
#endif
/* Offset between RSP and the CFA */
mono_emit_unwind_op_offset (cfg, code, X86_EBP, - cfa_offset);
x86_mov_reg_reg (code, X86_EBP, X86_ESP, 4);
mono_emit_unwind_op_def_cfa_reg (cfg, code, X86_EBP);
+ } else {
+ cfg->frame_reg = X86_ESP;
}
alloc_size = cfg->stack_offset;
}
/* Load returned vtypes into registers if needed */
- cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
+ cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig);
if (cinfo->ret.storage == ArgValuetypeInReg) {
for (quad = 0; quad < 2; quad ++) {
switch (cinfo->ret.pair_storage [quad]) {
guint32 size;
/* Compute size of code following the push <OFFSET> */
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ size = 5 + 5;
+#elif defined(__native_client_codegen__)
code = mono_nacl_align (code);
size = kNaClAlignment;
-#else
- size = 5 + 5;
#endif
/*This is aligned to 16 bytes by the callee. This way we save a few bytes here.*/
//[1 + 5] x86_jump_mem(inst,mem)
#define CMP_SIZE 6
-#ifdef __native_client_codegen__
-/* These constants should be coming from cpu-x86.md */
+#if defined(__default_codegen__)
+#define BR_SMALL_SIZE 2
+#define BR_LARGE_SIZE 5
+#elif defined(__native_client_codegen__)
/* I suspect the size calculation below is actually incorrect. */
-/* TODO: fix the calculation that uses these sizes. */
+/* TODO: fix the calculation that uses these sizes. */
#define BR_SMALL_SIZE 16
#define BR_LARGE_SIZE 12
-#else
-#define BR_SMALL_SIZE 2
-#define BR_LARGE_SIZE 5
-#endif /* __native_client_codegen__ */
+#endif /*__native_client_codegen__*/
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
#define DEBUG_IMT 0
int size = 0;
guint8 *code, *start;
-#ifdef __native_client_codegen__
- /* g_print("mono_arch_build_imt_thunk needs to be aligned.\n"); */
-#endif
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
if (item->is_equals) {
}
size += item->chunk_size;
}
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* In Native Client, we don't re-use thunks, allocate from the */
+ /* normal code manager paths. */
+ code = mono_domain_code_reserve (domain, size);
+#else
if (fail_tramp)
code = mono_method_alloc_generic_virtual_thunk (domain, size);
else
code = mono_domain_code_reserve (domain, size);
+#endif
start = code;
for (i = 0; i < count; ++i) {
MonoIMTCheckItem *item = imt_entries [i];
g_free (buff);
}
+ nacl_domain_code_validate (domain, &start, size, &code);
+
return start;
}
get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
{
guint8 *code, *start;
+ int code_reserve = 64;
/*
* The stack contains:
*/
if (has_target) {
- start = code = mono_global_codeman_reserve (64);
+ start = code = mono_global_codeman_reserve (code_reserve);
/* Replace the this argument with the target */
x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
x86_mov_membase_reg (code, X86_ESP, 4, X86_ECX, 4);
x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
- g_assert ((code - start) < 64);
+ g_assert ((code - start) < code_reserve);
} else {
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
#ifdef __native_client_codegen__
/* TODO: calculate this size correctly */
- int code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
+ code_reserve = 13 + (param_count * 8) + 2 * kNaClAlignment;
#else
- int code_reserve = 8 + (param_count * 8);
+ code_reserve = 8 + (param_count * 8);
#endif /* __native_client_codegen__ */
/*
* The stack contains:
g_assert ((code - start) < code_reserve);
}
+ nacl_global_codeman_validate(&start, code_reserve, &code);
mono_debug_add_delegate_trampoline (start, code - start);
if (code_len)