}
}
+void
+mono_x86_patch (unsigned char* code, gpointer target)
+{
+ x86_patch (code, (unsigned char*)target);
+}
typedef enum {
ArgInIReg,
cinfo->ret.storage = ArgOnDoubleFpStack;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
cinfo->ret.storage = ArgInIReg;
cinfo->ret.reg = X86_EAX;
break;
add_general (&gr, &stack_size, ainfo);
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (sig->params [i])) {
+ if (!mono_type_generic_inst_is_valuetype (ptype)) {
add_general (&gr, &stack_size, ainfo);
break;
}
ss_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ);
bp_trigger_page = mono_valloc (NULL, mono_pagesize (), MONO_MMAP_READ|MONO_MMAP_32BIT);
mono_mprotect (bp_trigger_page, mono_pagesize (), 0);
+
+ mono_aot_register_jit_icall ("mono_x86_throw_exception", mono_x86_throw_exception);
+ mono_aot_register_jit_icall ("mono_x86_throw_corlib_exception", mono_x86_throw_corlib_exception);
}
/*
guint32 opts = 0;
*exclude_mask = 0;
+
+ if (mono_aot_only)
+ /* The cpuid function allocates from the global codeman */
+ return opts;
+
/* Feature Flags function, flags returned in EDX. */
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 15)) {
int eax, ebx, ecx, edx;
guint32 sse_opts = 0;
+ if (mono_aot_only)
+ /* The cpuid function allocates from the global codeman */
+ return sse_opts;
+
if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
if (edx & (1 << 25))
- sse_opts |= 1 << SIMD_VERSION_SSE1;
+ sse_opts |= SIMD_VERSION_SSE1;
if (edx & (1 << 26))
- sse_opts |= 1 << SIMD_VERSION_SSE2;
+ sse_opts |= SIMD_VERSION_SSE2;
if (ecx & (1 << 0))
- sse_opts |= 1 << SIMD_VERSION_SSE3;
+ sse_opts |= SIMD_VERSION_SSE3;
if (ecx & (1 << 9))
- sse_opts |= 1 << SIMD_VERSION_SSSE3;
+ sse_opts |= SIMD_VERSION_SSSE3;
if (ecx & (1 << 19))
- sse_opts |= 1 << SIMD_VERSION_SSE41;
+ sse_opts |= SIMD_VERSION_SSE41;
if (ecx & (1 << 20))
- sse_opts |= 1 << SIMD_VERSION_SSE42;
+ sse_opts |= SIMD_VERSION_SSE42;
}
/* Yes, all this needs to be done to check for sse4a.
if ((((unsigned int) eax) >= 0x80000001) && (ebx == 0x68747541) && (ecx == 0x444D4163) && (edx == 0x69746E65)) {
cpuid (0x80000001, &eax, &ebx, &ecx, &edx);
if (ecx & (1 << 6))
- sse_opts |= 1 << SIMD_VERSION_SSE4a;
+ sse_opts |= SIMD_VERSION_SSE4a;
}
}
if (cfg->arch.need_stack_frame_inited)
return cfg->arch.need_stack_frame;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
if (cfg->disable_omit_fp)
gint32 *offsets;
CallInfo *cinfo;
- header = mono_method_get_header (cfg->method);
+ header = cfg->header;
sig = mono_method_signature (cfg->method);
cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, FALSE);
int i, n;
CallInfo *cinfo;
ArgInfo *ainfo;
- int j;
LLVMCallInfo *linfo;
+ MonoType *t;
n = sig->param_count + sig->hasthis;
for (i = 0; i < n; ++i) {
ainfo = cinfo->args + i;
+ if (i >= sig->hasthis)
+ t = sig->params [i - sig->hasthis];
+ else
+ t = &mono_defaults.int_class->byval_arg;
+
linfo->args [i].storage = LLVMArgNone;
switch (ainfo->storage) {
linfo->args [i].storage = LLVMArgInFPReg;
break;
case ArgOnStack:
- if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
- linfo->args [i].storage = LLVMArgVtypeByVal;
+ if (MONO_TYPE_ISSTRUCT (t)) {
+ if (mono_class_value_size (mono_class_from_mono_type (t), NULL) == 0)
+ /* LLVM seems to allocate argument space for empty structures too */
+ linfo->args [i].storage = LLVMArgNone;
+ else
+ linfo->args [i].storage = LLVMArgVtypeByVal;
} else {
linfo->args [i].storage = LLVMArgInIReg;
- if (!sig->params [i - sig->hasthis]->byref) {
- if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+ if (t->byref) {
+ if (t->type == MONO_TYPE_R4)
linfo->args [i].storage = LLVMArgInFPReg;
- } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+ else if (t->type == MONO_TYPE_R8)
linfo->args [i].storage = LLVMArgInFPReg;
- }
}
}
break;
guchar *code = p;
int arg_size = 0, stack_usage = 0, save_mode = SAVE_NONE;
MonoMethod *method = cfg->method;
-
- switch (mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret)->type) {
+ MonoType *ret_type = mini_type_get_underlying_type (cfg->generic_sharing_context, mono_method_signature (method)->ret);
+
+ switch (ret_type->type) {
case MONO_TYPE_VOID:
/* special case string .ctor icall */
if (strcmp (".ctor", method->name) && method->klass == mono_defaults.string_class) {
stack_usage = enable_arguments ? 16 : 8;
break;
case MONO_TYPE_GENERICINST:
- if (!mono_type_generic_inst_is_valuetype (mono_method_signature (method)->ret)) {
+ if (!mono_type_generic_inst_is_valuetype (ret_type)) {
save_mode = SAVE_EAX;
stack_usage = enable_arguments ? 8 : 4;
break;
return code;
}
+gboolean
+mono_x86_have_tls_get (void)
+{
+#ifdef __APPLE__
+ guint32 *ins = (guint32*)pthread_getspecific;
+ /*
+ * We're looking for these two instructions:
+ *
+ * mov 0x4(%esp),%eax
+ * mov %gs:0x48(,%eax,4),%eax
+ */
+ return ins [0] == 0x0424448b && ins [1] == 0x85048b65 && ins [2] == 0x00000048;
+#else
+ return TRUE;
+#endif
+}
+
/*
* mono_x86_emit_tls_get:
* @code: buffer to store code to
guint8*
mono_x86_emit_tls_get (guint8* code, int dreg, int tls_offset)
{
-#ifdef TARGET_WIN32
+#if defined(__APPLE__)
+ x86_prefix (code, X86_GS_PREFIX);
+ x86_mov_reg_mem (code, dreg, 0x48 + tls_offset * 4, 4);
+#elif defined(TARGET_WIN32)
/*
* See the Under the Hood article in the May 1996 issue of Microsoft Systems
* Journal and/or a disassembly of the TlsGet () function.
case OP_NOT_NULL:
break;
case OP_SEQ_POINT: {
- int i, il_offset;
+ int i;
if (cfg->compile_aot)
NOT_IMPLEMENTED;
if (ins->flags & MONO_INST_SINGLE_STEP_LOC)
x86_alu_reg_mem (code, X86_CMP, X86_EAX, (guint32)ss_trigger_page);
- il_offset = ins->inst_imm;
+ mono_add_seq_point (cfg, bb, ins, code - cfg->native_code);
- if (!cfg->seq_points)
- cfg->seq_points = g_ptr_array_new ();
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (il_offset));
- g_ptr_array_add (cfg->seq_points, GUINT_TO_POINTER (code - cfg->native_code));
/*
* A placeholder for a possible breakpoint inserted by
* mono_arch_set_breakpoint ().
x86_mov_reg_imm (code, ins->dreg, 0);
break;
case OP_LOAD_GOTADDR:
- x86_call_imm (code, 0);
- /*
- * The patch needs to point to the pop, since the GOT offset needs
- * to be added to that address.
- */
- mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
- x86_pop_reg (code, ins->dreg);
- x86_alu_reg_imm (code, X86_ADD, ins->dreg, 0xf0f0f0f0);
+ g_assert (ins->dreg == MONO_ARCH_GOT_REG);
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
break;
case OP_GOT_ENTRY:
mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_right->inst_i1, ins->inst_right->inst_p0);
x86_alu_reg_imm (code, X86_SUB, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_target_bb);
x86_call_imm (code, 0);
+ mono_cfg_add_try_hole (cfg, ins->inst_eh_block, code, bb);
x86_alu_reg_imm (code, X86_ADD, X86_ESP, MONO_ARCH_FRAME_ALIGNMENT - 4);
break;
case OP_START_HANDLER: {
guint8 *code;
gboolean need_stack_frame;
- cfg->code_size = MAX (mono_method_get_header (method)->code_size * 4, 10240);
+ cfg->code_size = MAX (cfg->header->code_size * 4, 10240);
if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
cfg->code_size += 512;
#endif
}
else {
- g_assert (!cfg->compile_aot);
- x86_push_imm (code, cfg->domain);
+ if (cfg->compile_aot) {
+ /*
+ * This goes before the saving of callee saved regs, so save the got reg
+ * ourselves.
+ */
+ x86_push_reg (code, MONO_ARCH_GOT_REG);
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
+ x86_push_imm (code, 0);
+ } else {
+ x86_push_imm (code, cfg->domain);
+ }
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_jit_thread_attach");
x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
+ if (cfg->compile_aot)
+ x86_pop_reg (code, MONO_ARCH_GOT_REG);
}
}
if (method->save_lmf) {
pos += sizeof (MonoLMF);
- if (cfg->compile_aot)
- cfg->disable_aot = TRUE;
-
/* save the current IP */
- mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
- x86_push_imm_template (code);
+ if (cfg->compile_aot) {
+ /* This pushes the current ip */
+ x86_call_imm (code, 0);
+ } else {
+ mono_add_patch_info (cfg, code + 1 - cfg->native_code, MONO_PATCH_INFO_IP, NULL);
+ x86_push_imm_template (code);
+ }
cfa_offset += sizeof (gpointer);
/* save all caller saved regs */
x86_alu_reg_imm (code, X86_ADD, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, lmf));
#endif
} else {
+ if (cfg->compile_aot)
+ code = mono_arch_emit_load_got_addr (cfg->native_code, code, cfg, NULL);
code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, (gpointer)"mono_get_lmf_addr");
}
#define BR_LARGE_SIZE 5
#define JUMP_IMM_SIZE 6
#define ENABLE_WRONG_METHOD_CHECK 0
+#define DEBUG_IMT 0
static int
imt_branch_distance (MonoIMTCheckItem **imt_entries, int start, int target)
if (!fail_tramp)
mono_stats.imt_thunks_size += code - start;
g_assert (code - start <= size);
+
+#if DEBUG_IMT
+ {
+ char *buff = g_strdup_printf ("thunk_for_class_%s_%s_entries_%d", vtable->klass->name_space, vtable->klass->name, count);
+ mono_disassemble_code (NULL, (guint8*)start, code - start, buff);
+ g_free (buff);
+ }
+#endif
+
return start;
}
{
return (MonoMethod*) regs [MONO_ARCH_IMT_REG];
}
-
-MonoObject*
-mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
-{
- MonoMethodSignature *sig = mono_method_signature (method);
- CallInfo *cinfo = get_call_info (gsctx, NULL, sig, FALSE);
- int this_argument_offset;
- MonoObject *this_argument;
-
- /*
- * this is the offset of the this arg from esp as saved at the start of
- * mono_arch_create_trampoline_code () in tramp-x86.c.
- */
- this_argument_offset = 5;
- if (MONO_TYPE_ISSTRUCT (sig->ret) && (cinfo->ret.storage == ArgOnStack))
- this_argument_offset++;
-
- this_argument = * (MonoObject**) (((guint8*) regs [X86_ESP]) + this_argument_offset * sizeof (gpointer));
-
- g_free (cinfo);
- return this_argument;
-}
#endif
MonoVTable*
#define MAX_ARCH_DELEGATE_PARAMS 10
-gpointer
-mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+static gpointer
+get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
{
guint8 *code, *start;
- if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
- return NULL;
-
- /* FIXME: Support more cases */
- if (MONO_TYPE_ISSTRUCT (sig->ret))
- return NULL;
-
/*
* The stack contains:
* <delegate>
*/
if (has_target) {
- static guint8* cached = NULL;
- if (cached)
- return cached;
-
start = code = mono_global_codeman_reserve (64);
/* Replace the this argument with the target */
x86_jump_membase (code, X86_EAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < 64);
-
- mono_debug_add_delegate_trampoline (start, code - start);
-
- mono_memory_barrier ();
-
- cached = start;
} else {
- static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
int i = 0;
/* 8 for mov_reg and jump, plus 8 for each parameter */
- int code_reserve = 8 + (sig->param_count * 8);
-
- for (i = 0; i < sig->param_count; ++i)
- if (!mono_is_regsize_var (sig->params [i]))
- return NULL;
-
- code = cache [sig->param_count];
- if (code)
- return code;
+ int code_reserve = 8 + (param_count * 8);
/*
* The stack contains:
x86_mov_reg_membase (code, X86_ECX, X86_ESP, 4, 4);
/* move args up */
- for (i = 0; i < sig->param_count; ++i) {
+ for (i = 0; i < param_count; ++i) {
x86_mov_reg_membase (code, X86_EAX, X86_ESP, (i+2)*4, 4);
x86_mov_membase_reg (code, X86_ESP, (i+1)*4, X86_EAX, 4);
}
x86_jump_membase (code, X86_ECX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
g_assert ((code - start) < code_reserve);
+ }
+
+ mono_debug_add_delegate_trampoline (start, code - start);
+
+ if (code_len)
+ *code_len = code - start;
+
+ return start;
+}
+
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
+{
+ GSList *res = NULL;
+ guint8 *code;
+ guint32 code_len;
+ int i;
+
+ code = get_delegate_invoke_impl (TRUE, 0, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len, NULL, NULL));
+
+ for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
+ code = get_delegate_invoke_impl (FALSE, i, &code_len);
+ res = g_slist_prepend (res, mono_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len, NULL, NULL));
+ }
+
+ return res;
+}
+
+gpointer
+mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
+{
+ guint8 *code, *start;
+
+ if (sig->param_count > MAX_ARCH_DELEGATE_PARAMS)
+ return NULL;
+
+ /* FIXME: Support more cases */
+ if (MONO_TYPE_ISSTRUCT (sig->ret))
+ return NULL;
+
+ /*
+ * The stack contains:
+ * <delegate>
+ * <return addr>
+ */
+
+ if (has_target) {
+ static guint8* cached = NULL;
+ if (cached)
+ return cached;
+
+ if (mono_aot_only)
+ start = mono_aot_get_trampoline ("delegate_invoke_impl_has_target");
+ else
+ start = get_delegate_invoke_impl (TRUE, 0, NULL);
+
+ mono_memory_barrier ();
+
+ cached = start;
+ } else {
+ static guint8* cache [MAX_ARCH_DELEGATE_PARAMS + 1] = {NULL};
+ int i = 0;
+
+ for (i = 0; i < sig->param_count; ++i)
+ if (!mono_is_regsize_var (sig->params [i]))
+ return NULL;
- mono_debug_add_delegate_trampoline (start, code - start);
+ code = cache [sig->param_count];
+ if (code)
+ return code;
+
+ if (mono_aot_only) {
+ char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+ start = mono_aot_get_trampoline (name);
+ g_free (name);
+ } else {
+ start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
+ }
mono_memory_barrier ();
#endif /* MONO_ARCH_SIMD_INTRINSICS */
}
+/*MONO_ARCH_HAVE_HANDLER_BLOCK_GUARD*/
+gpointer
+mono_arch_install_handler_block_guard (MonoJitInfo *ji, MonoJitExceptionInfo *clause, MonoContext *ctx, gpointer new_value)
+{
+ int offset;
+ gpointer *sp, old_value;
+ char *bp;
+ const unsigned char *handler;
+
+ /*Decode the first instruction to figure out where did we store the spvar*/
+ /*Our jit MUST generate the following:
+ mov %esp, -?(%ebp)
+ Which is encoded as: 0x89 mod_rm.
+ mod_rm (esp, ebp, imm) which can be: (imm will never be zero)
+ mod (reg + imm8): 01 reg(esp): 100 rm(ebp): 101 -> 01100101 (0x65)
+ mod (reg + imm32): 10 reg(esp): 100 rm(ebp): 101 -> 10100101 (0xA5)
+ */
+ handler = clause->handler_start;
+
+ if (*handler != 0x89)
+ return NULL;
+
+ ++handler;
+
+ if (*handler == 0x65)
+ offset = *(signed char*)(handler + 1);
+ else if (*handler == 0xA5)
+ offset = *(int*)(handler + 1);
+ else
+ return NULL;
+
+ /*Load the spvar*/
+ bp = MONO_CONTEXT_GET_BP (ctx);
+ sp = *(gpointer*)(bp + offset);
+
+ old_value = *sp;
+ if (old_value < ji->code_start || (char*)old_value > ((char*)ji->code_start + ji->code_size))
+ return old_value;
+
+ *sp = new_value;
+
+ return old_value;
+}
+
+/*
+ * mono_aot_emit_load_got_addr:
+ *
+ * Emit code to load the got address.
+ * On x86, the result is placed into EBX.
+ */
+guint8*
+mono_arch_emit_load_got_addr (guint8 *start, guint8 *code, MonoCompile *cfg, MonoJumpInfo **ji)
+{
+ x86_call_imm (code, 0);
+ /*
+ * The patch needs to point to the pop, since the GOT offset needs
+ * to be added to that address.
+ */
+ if (cfg)
+ mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ else
+ *ji = mono_patch_info_list_prepend (*ji, code - start, MONO_PATCH_INFO_GOT_OFFSET, NULL);
+ x86_pop_reg (code, MONO_ARCH_GOT_REG);
+ x86_alu_reg_imm (code, X86_ADD, MONO_ARCH_GOT_REG, 0xf0f0f0f0);
+
+ return code;
+}
+
+/*
+ * mono_ppc_emit_load_aotconst:
+ *
+ * Emit code to load the contents of the GOT slot identified by TRAMP_TYPE and
+ * TARGET from the mscorlib GOT in full-aot code.
+ * On x86, the GOT address is assumed to be in EBX, and the result is placed into
+ * EAX.
+ */
+guint8*
+mono_arch_emit_load_aotconst (guint8 *start, guint8 *code, MonoJumpInfo **ji, int tramp_type, gconstpointer target)
+{
+ /* Load the mscorlib got address */
+ x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_GOT_REG, sizeof (gpointer), 4);
+ *ji = mono_patch_info_list_prepend (*ji, code - start, tramp_type, target);
+ /* arch_emit_got_access () patches this */
+ x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0xf0f0f0f0, 4);
+
+ return code;
+}
+
#if __APPLE__
#define DBG_SIGNAL SIGBUS
#else