* Dietmar Maurer (dietmar@ximian.com)
*
* Copyright 2002-2003 Ximian, Inc.
- * Coprygith 2003-2010 Novell, Inc.
+ * Copyright 2003-2010 Novell, Inc.
*/
#define MONO_LLVM_IN_MINI 1
static gpointer mono_jit_compile_method_with_opt (MonoMethod *method, guint32 opt, MonoException **ex);
-#ifdef __native_client_codegen__
-/* Default alignment for Native Client is 32-byte. */
-guint8 nacl_align_byte = 0xe0;
-#endif
static guint32 default_opt = 0;
static gboolean default_opt_set = FALSE;
gboolean mono_dont_free_global_codeman;
+gpointer
+mono_realloc_native_code (MonoCompile *cfg)
+{
+#if defined(__default_codegen__)
+ return g_realloc (cfg->native_code, cfg->code_size);
+#elif defined(__native_client_codegen__)
+ guint old_padding;
+ gpointer native_code;
+ guint alignment_check;
+
+ /* Save the old alignment offset so we can re-align after the realloc. */
+ old_padding = (guint)(cfg->native_code - cfg->native_code_alloc);
+
+ cfg->native_code_alloc = g_realloc ( cfg->native_code_alloc,
+ cfg->code_size + kNaClAlignment );
+
+ /* Align native_code to next nearest kNaClAlignment byte. */
+ native_code = (guint)cfg->native_code_alloc + kNaClAlignment;
+ native_code = (guint)native_code & ~kNaClAlignmentMask;
+
+ /* Shift the data to be 32-byte aligned again. */
+ memmove (native_code, cfg->native_code_alloc + old_padding, cfg->code_size);
+
+ alignment_check = (guint)native_code & kNaClAlignmentMask;
+ g_assert (alignment_check == 0);
+ return native_code;
+#else
+ g_assert_not_reached ();
+ return cfg->native_code;
+#endif
+}
+
#ifdef __native_client_codegen__
/* Prevent instructions from straddling a 32-byte alignment boundary. */
* output. Unlike mono_pmip which returns a string, this routine
* prints the value on the standard output.
*/
+#ifdef __GNUC__
+/* Prevent the linker from optimizing this away in embedding setups to help debugging */
+ __attribute__((used))
+#endif
void
mono_print_method_from_ip (void *ip)
{
}
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+/* Given the temporary buffer (allocated by mono_global_codeman_reserve) into
+ * which we are generating code, return a pointer to the destination in the
+ * dynamic code segment into which the code will be copied when
+ * mono_global_codeman_commit is called.
+ * LOCKING: Acquires the jit lock.
+ */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ void *dest;
+ mono_jit_lock ();
+ dest = nacl_code_manager_get_code_dest (global_codeman, data);
+ mono_jit_unlock ();
+ return dest;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+ mono_jit_lock ();
+ mono_code_manager_commit (global_codeman, data, size, newsize);
+ mono_jit_unlock ();
+}
+
+/*
+ * Convenience function which calls mono_global_codeman_commit to validate and
+ * copy the code. The caller sets *buf_base and *buf_size to the start and size
+ * of the buffer (allocated by mono_global_codeman_reserve), and *code_end to
+ * the byte after the last instruction byte. On return, *buf_base will point to
+ * the start of the copied in the code segment, and *code_end will point after
+ * the end of the copied code.
+ */
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+ guint8 *tmp = nacl_global_codeman_get_dest (*buf_base);
+ mono_global_codeman_commit (*buf_base, buf_size, *code_end - *buf_base);
+ *code_end = tmp + (*code_end - *buf_base);
+ *buf_base = tmp;
+}
+#else
+/* no-op versions of Native Client functions */
+void*
+nacl_global_codeman_get_dest (void *data)
+{
+ return data;
+}
+
+void
+mono_global_codeman_commit (void *data, int size, int newsize)
+{
+}
+
+void
+nacl_global_codeman_validate (guint8 **buf_base, int buf_size, guint8 **code_end)
+{
+}
+
+#endif /* __native_client__ */
+
/**
* mono_create_unwind_op:
*
return mono_compile_create_var_for_vreg (cfg, type, opcode, dreg);
}
+/*
+ * Transform a MonoInst into a load from the variable of index var_index.
+ */
+void
+mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index) {
+ memset (dest, 0, sizeof (MonoInst));
+ dest->inst_i0 = cfg->varinfo [var_index];
+ dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype);
+ type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest);
+ dest->klass = dest->inst_i0->klass;
+}
+
+#endif
+
void
mono_mark_vreg_as_ref (MonoCompile *cfg, int vreg)
{
cfg->vreg_is_mp [vreg] = TRUE;
}
-/*
- * Transform a MonoInst into a load from the variable of index var_index.
- */
-void
-mono_compile_make_var_load (MonoCompile *cfg, MonoInst *dest, gssize var_index) {
- memset (dest, 0, sizeof (MonoInst));
- dest->inst_i0 = cfg->varinfo [var_index];
- dest->opcode = mini_type_to_ldind (cfg, dest->inst_i0->inst_vtype);
- type_to_eval_stack_type (cfg, dest->inst_i0->inst_vtype, dest);
- dest->klass = dest->inst_i0->klass;
-}
-
-#endif
-
static MonoType*
type_from_stack_type (MonoInst *ins) {
switch (ins->type) {
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
vars = mono_varlist_sort (cfg, vars, 0);
offset = 0;
- *stack_align = sizeof (gpointer);
+ *stack_align = sizeof(mgreg_t);
for (l = vars; l; l = l->next) {
vmv = l->data;
inst = cfg->varinfo [vmv->idx];
case MONO_TYPE_PTR:
case MONO_TYPE_I:
case MONO_TYPE_U:
-#if SIZEOF_REGISTER == 4
+#if SIZEOF_VOID_P == 4
case MONO_TYPE_I4:
#else
case MONO_TYPE_I8:
{
if (ins == NULL) {
ins = bb->code;
+ if (ins)
+ ins->prev = ins_to_insert;
bb->code = ins_to_insert;
ins_to_insert->next = ins;
if (bb->last_ins == NULL)
if ((jit_tls = TlsGetValue (mono_jit_tls_id)))
return jit_tls->lmf;
-
- g_assert_not_reached ();
+ /*
+ * We do not assert here because this function can be called from
+ * mini-gc.c on a thread that has not executed any managed code, yet
+ * (the thread object allocation can trigger a collection).
+ */
return NULL;
#endif
}
switch (ji->type) {
case MONO_PATCH_INFO_RVA:
case MONO_PATCH_INFO_LDSTR:
- case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
case MONO_PATCH_INFO_LDTOKEN:
case MONO_PATCH_INFO_DECLSEC:
return (ji->type << 8) | ji->data.token->token;
+ case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
+ return (ji->type << 8) | ji->data.token->token | (ji->data.token->has_context ? (gsize)ji->data.token->context.class_inst : 0);
case MONO_PATCH_INFO_INTERNAL_METHOD:
return (ji->type << 8) | g_str_hash (ji->data.name);
case MONO_PATCH_INFO_VTABLE:
target = patch_info->data.inst->inst_c0 + code;
break;
case MONO_PATCH_INFO_IP:
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* Need to transform to the destination address, it's */
+ /* emitted as an immediate in the code. */
+ target = nacl_inverse_modify_patch_target(ip);
+#else
target = ip;
+#endif
break;
case MONO_PATCH_INFO_METHOD_REL:
target = code + patch_info->data.offset;
}
case MONO_PATCH_INFO_METHOD_JUMP:
target = mono_create_jump_trampoline (domain, patch_info->data.method, FALSE);
+#if defined(__native_client__) && defined(__native_client_codegen__)
+#if defined(TARGET_AMD64)
+ /* This target is an absolute address, not relative to the */
+ /* current code being emitted on AMD64. */
+ target = nacl_inverse_modify_patch_target(target);
+#endif
+#endif
break;
case MONO_PATCH_INFO_METHOD:
if (patch_info->data.method == method) {
gpointer *jump_table;
int i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak, but we don't care if we're */
+ /* not deleting JIT'd methods anyway */
+ jump_table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (method && method->dynamic) {
jump_table = mono_code_manager_reserve (mono_dynamic_code_hash_lookup (domain, method)->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
jump_table = mono_domain_code_reserve (domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
}
+#endif
- for (i = 0; i < patch_info->data.table->table_size; i++)
+ for (i = 0; i < patch_info->data.table->table_size; i++) {
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* 'code' is relative to the current code blob, we */
+ /* need to do this transform on it to make the */
+ /* pointers in this table absolute */
+ jump_table [i] = nacl_inverse_modify_patch_target (code) + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#else
jump_table [i] = code + GPOINTER_TO_INT (patch_info->data.table->table [i]);
+#endif
+ }
+
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* jump_table is in the data section, we need to transform */
+ /* it here so when it gets modified in amd64_patch it will */
+ /* then point back to the absolute data address */
+ target = nacl_inverse_modify_patch_target (jump_table);
+#else
target = jump_table;
+#endif
break;
}
case MONO_PATCH_INFO_METHODCONST:
target = mono_gc_get_card_table (&card_table_shift_bits, &card_table_mask);
break;
}
+ case MONO_PATCH_INFO_CASTCLASS_CACHE: {
+ target = mono_domain_alloc0 (domain, sizeof (gpointer));
+ break;
+ }
default:
g_assert_not_reached ();
}
}
case MONO_PATCH_INFO_SWITCH: {
gpointer *table;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* This memory will leak. */
+ /* TODO: can we free this when */
+ /* making the final jump table? */
+ table = g_malloc0 (sizeof(gpointer) * patch_info->data.table->table_size);
+#else
if (cfg->method->dynamic) {
table = mono_code_manager_reserve (cfg->dynamic_info->code_mp, sizeof (gpointer) * patch_info->data.table->table_size);
} else {
table = mono_domain_code_reserve (cfg->domain, sizeof (gpointer) * patch_info->data.table->table_size);
}
+#endif
for (i = 0; i < patch_info->data.table->table_size; i++) {
/* Might be NULL if the switch is eliminated */
GSList *list;
MonoDomain *domain = cfg->domain;
unsigned char *ip = cfg->native_code + patch_info->ip.i;
+#if defined(__native_client__) && defined(__native_client_codegen__)
+ /* When this jump target gets evaluated, the method */
+ /* will be installed in the dynamic code section, */
+ /* not at the location of cfg->native_code. */
+ ip = nacl_inverse_modify_patch_target (cfg->native_code) + patch_info->ip.i;
+#endif
mono_domain_lock (domain);
if (!domain_jit_info (domain)->jump_target_hash)
MonoBasicBlock *bb;
int max_epilog_size;
guint8 *code;
+ MonoDomain *code_domain;
+
+ if (mono_using_xdebug)
+ /*
+ * Recent gdb versions have trouble processing symbol files containing
+ * overlapping address ranges, so allocate all code from the code manager
+ * of the root domain. (#666152).
+ */
+ code_domain = mono_get_root_domain ();
+ else
+ code_domain = cfg->domain;
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ void *code_dest;
+
+ /* This keeps patch targets from being transformed during
+ * ordinary method compilation, for local branches and jumps.
+ */
+ nacl_allow_target_modification (FALSE);
+#endif
for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
cfg->spill_count = 0;
}
}
+#ifdef __native_client_codegen__
+ mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
+#endif
mono_arch_emit_exceptions (cfg);
max_epilog_size = 0;
mono_dynamic_code_hash_insert (cfg->domain, cfg->method, cfg->dynamic_info);
mono_domain_unlock (cfg->domain);
- code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
+ if (mono_using_xdebug)
+ /* See the comment for cfg->code_domain */
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
+ else
+ code = mono_code_manager_reserve (cfg->dynamic_info->code_mp, cfg->code_size + unwindlen);
} else {
guint unwindlen = 0;
#ifdef MONO_ARCH_HAVE_UNWIND_TABLE
unwindlen = mono_arch_unwindinfo_get_size (cfg->arch.unwindinfo);
#endif
- code = mono_domain_code_reserve (cfg->domain, cfg->code_size + unwindlen);
+ code = mono_domain_code_reserve (code_domain, cfg->code_size + unwindlen);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ nacl_allow_target_modification (TRUE);
+#endif
memcpy (code, cfg->native_code, cfg->code_len);
-#ifdef __native_client_codegen__
+#if defined(__default_codegen__)
+ g_free (cfg->native_code);
+#elif defined(__native_client_codegen__)
if (cfg->native_code_alloc) {
g_free (cfg->native_code_alloc);
cfg->native_code_alloc = 0;
else if (cfg->native_code) {
g_free (cfg->native_code);
}
-#else
- g_free (cfg->native_code);
-#endif
+#endif /* __native_client_codegen__ */
cfg->native_code = code;
code = cfg->native_code + cfg->code_len;
#ifdef MONO_ARCH_HAVE_SAVE_UNWIND_INFO
mono_arch_save_unwind_info (cfg);
#endif
-
-#ifdef __native_client_codegen__
+
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ if (!cfg->compile_aot) {
+ if (cfg->method->dynamic) {
+ code_dest = nacl_code_manager_get_code_dest(cfg->dynamic_info->code_mp, cfg->native_code);
+ } else {
+ code_dest = nacl_domain_get_code_dest(cfg->domain, cfg->native_code);
+ }
+ }
+#endif
+
+#if defined(__native_client_codegen__)
mono_nacl_fix_patches (cfg->native_code, cfg->patch_info);
#endif
mono_arch_patch_code (cfg->method, cfg->domain, cfg->native_code, cfg->patch_info, cfg->run_cctors);
if (cfg->method->dynamic) {
- mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
+ if (mono_using_xdebug)
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ else
+ mono_code_manager_commit (cfg->dynamic_info->code_mp, cfg->native_code, cfg->code_size, cfg->code_len);
} else {
- mono_domain_code_commit (cfg->domain, cfg->native_code, cfg->code_size, cfg->code_len);
+ mono_domain_code_commit (code_domain, cfg->native_code, cfg->code_size, cfg->code_len);
}
+#if defined(__native_client_codegen__) && defined(__native_client__)
+ cfg->native_code = code_dest;
+#endif
mono_profiler_code_buffer_new (cfg->native_code, cfg->code_len, MONO_PROFILER_CODE_BUFFER_METHOD, cfg->method);
mono_arch_flush_icache (cfg->native_code, cfg->code_len);
}
#endif
- /* The hard-guard page has been hit: there is not much we can do anymore
- * Print a hopefully clear message and abort.
- */
if (jit_tls->stack_size &&
ABS ((guint8*)fault_addr - ((guint8*)jit_tls->end_of_stack - jit_tls->stack_size)) < 8192 * sizeof (gpointer)) {
- const char *method;
- /* we don't do much now, but we can warn the user with a useful message */
- fprintf (stderr, "Stack overflow: IP: %p, fault addr: %p\n", mono_arch_ip_from_context (ctx), (gpointer)info->si_addr);
- if (ji && ji->method)
- method = mono_method_full_name (ji->method, TRUE);
- else
- method = "Unmanaged";
- fprintf (stderr, "At %s\n", method);
- _exit (1);
+ /*
+ * The hard-guard page has been hit: there is not much we can do anymore
+ * Print a hopefully clear message and abort.
+ */
+ mono_handle_hard_stack_ovf (jit_tls, ji, ctx, (guint8*)info->si_addr);
+ g_assert_not_reached ();
} else {
/* The original handler might not like that it is executed on an altstack... */
if (!ji && mono_chain_signal (SIG_HANDLER_PARAMS))
debug_options.no_gdb_backtrace = TRUE;
else if (!strcmp (arg, "suspend-on-sigsegv"))
debug_options.suspend_on_sigsegv = TRUE;
+ else if (!strcmp (arg, "suspend-on-unhandled"))
+ debug_options.suspend_on_unhandled = TRUE;
else if (!strcmp (arg, "dont-free-domains"))
mono_dont_free_domains = TRUE;
else if (!strcmp (arg, "dyn-runtime-invoke"))
debug_options.better_cast_details = TRUE;
else {
fprintf (stderr, "Invalid option for the MONO_DEBUG env variable: %s\n", arg);
- fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n");
+ fprintf (stderr, "Available options: 'handle-sigint', 'keep-delegates', 'reverse-pinvoke-exceptions', 'collect-pagefault-stats', 'break-on-unverified', 'no-gdb-backtrace', 'dont-free-domains', 'suspend-on-sigsegv', 'suspend-on-unhandled', 'dyn-runtime-invoke', 'gdb', 'explicit-null-checks', 'init-stacks'\n");
exit (1);
}
}
callbacks.create_ftnptr = mini_create_ftnptr;
callbacks.get_addr_from_ftnptr = mini_get_addr_from_ftnptr;
callbacks.get_runtime_build_info = mono_get_runtime_build_info;
+ callbacks.set_cast_details = mono_set_cast_details;
#ifdef MONO_ARCH_HAVE_IMT
if (mono_use_imt) {
mono_unwind_init ();
- mini_gc_init ();
-
if (getenv ("MONO_XDEBUG")) {
char *xdebug_opts = getenv ("MONO_XDEBUG");
mono_xdebug_init (xdebug_opts);
register_icall (mono_load_remote_field_new, "mono_load_remote_field_new", "object object ptr ptr", FALSE);
register_icall (mono_store_remote_field_new, "mono_store_remote_field_new", "void object ptr ptr object", FALSE);
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ register_icall (mono_nacl_gc, "mono_nacl_gc", "void", TRUE);
+#endif
/*
* NOTE, NOTE, NOTE, NOTE:
* when adding emulation for some opcodes, remember to also add a dummy
mono_register_opcode_emulation (OP_LCONV_TO_R_UN, "__emul_lconv_to_r8_un", "double long", mono_lconv_to_r8_un, FALSE);
#endif
#ifdef MONO_ARCH_EMULATE_FREM
+#if defined(__default_codegen__)
mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", fmod, FALSE);
+#elif defined(__native_client_codegen__)
+ mono_register_opcode_emulation (OP_FREM, "__emul_frem", "double double double", mono_fmod, FALSE);
+#endif
#endif
#ifdef MONO_ARCH_SOFT_FLOAT