amd64_mov_reg_membase (code, AMD64_R12, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_R11, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#endif
if (mono_running_on_valgrind ()) {
/* Prevent 'Address 0x... is just below the stack ptr.' errors */
/* jump to the saved IP */
amd64_jump_reg (code, AMD64_R11);
+ nacl_global_codeman_validate(&start, 256, &code);
+
mono_arch_flush_icache (start, code - start);
if (info)
guint32 pos;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* call_filter (MonoContext *ctx, unsigned long eip) */
code = start;
amd64_mov_reg_membase (code, AMD64_R12, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, r15), 8);
+#endif
#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rdi), 8);
amd64_mov_reg_membase (code, AMD64_RSI, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoContext, rsi), 8);
amd64_leave (code);
amd64_ret (code);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
mono_arch_flush_icache (start, code - start);
guint8 *code;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
- int i, buf_size, stack_size, arg_offsets [16], regs_offset;
+ int i, stack_size, arg_offsets [16], regs_offset;
+ const guint kMaxCodeSize = NACL_SIZE (256, 512);
- buf_size = 256;
- start = code = mono_global_codeman_reserve (buf_size);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* The stack is unaligned on entry */
stack_size = 192 + 8;
*/
arg_offsets [0] = 0;
- arg_offsets [1] = sizeof (gpointer);
- arg_offsets [2] = sizeof (gpointer) * 2;
- arg_offsets [3] = sizeof (gpointer) * 3;
- regs_offset = sizeof (gpointer) * 4;
+ arg_offsets [1] = sizeof(mgreg_t);
+ arg_offsets [2] = sizeof(mgreg_t) * 2;
+ arg_offsets [3] = sizeof(mgreg_t) * 3;
+ regs_offset = sizeof(mgreg_t) * 4;
/* Save registers */
for (i = 0; i < AMD64_NREG; ++i)
if (i != AMD64_RSP)
- amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof (gpointer)), i, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
/* Save RSP */
- amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof (gpointer));
- amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof (gpointer)), X86_EAX, 8);
+ amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, stack_size + sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, regs_offset + (AMD64_RSP * sizeof(mgreg_t)), X86_EAX, sizeof(mgreg_t));
/* Set arg1 == regs */
amd64_lea_membase (code, AMD64_RAX, AMD64_RSP, regs_offset);
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [0], AMD64_RAX, sizeof(mgreg_t));
/* Set arg2 == eip */
if (llvm_abs)
amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
else
- amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, 8);
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, 8);
+ amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, stack_size, sizeof(mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [1], AMD64_RAX, sizeof(mgreg_t));
/* Set arg3 == exc/ex_token_index */
if (resume_unwind)
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [2], 0, sizeof(mgreg_t));
else
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [2], AMD64_ARG_REG1, sizeof(mgreg_t));
/* Set arg4 == rethrow/pc offset */
if (resume_unwind) {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], 0, sizeof(mgreg_t));
} else if (corlib) {
- amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, 8);
+ amd64_mov_membase_reg (code, AMD64_RSP, arg_offsets [3], AMD64_ARG_REG2, sizeof(mgreg_t));
if (llvm_abs)
/*
* The caller is LLVM code which passes the absolute address not a pc offset,
*/
amd64_neg_membase (code, AMD64_RSP, arg_offsets [3]);
} else {
- amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, 8);
+ amd64_mov_membase_imm (code, AMD64_RSP, arg_offsets [3], rethrow, sizeof(mgreg_t));
}
if (aot) {
- ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? (llvm_abs ? "mono_amd64_throw_corlib_exception_abs" : "mono_amd64_throw_corlib_exception") : "mono_amd64_throw_exception");
+ ji = mono_patch_info_list_prepend (ji, code - start, MONO_PATCH_INFO_JIT_ICALL_ADDR, corlib ? "mono_amd64_throw_corlib_exception" : "mono_amd64_throw_exception");
amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
} else {
amd64_mov_reg_imm (code, AMD64_R11, resume_unwind ? (mono_amd64_resume_unwind) : (corlib ? (gpointer)mono_amd64_throw_corlib_exception : (gpointer)mono_amd64_throw_exception));
mono_arch_flush_icache (start, code - start);
- g_assert ((code - start) < buf_size);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
if (info)
*info = mono_tramp_info_create (g_strdup (tramp_name), start, code - start, ji, unwind_ops);
gboolean
mono_arch_find_jit_info (MonoDomain *domain, MonoJitTlsData *jit_tls,
MonoJitInfo *ji, MonoContext *ctx,
- MonoContext *new_ctx, MonoLMF **lmf,
+ MonoContext *new_ctx, MonoLMF **lmf,
+ mgreg_t **save_locations,
StackFrameInfo *frame)
{
gpointer ip = MONO_CONTEXT_GET_IP (ctx);
*new_ctx = *ctx;
if (ji != NULL) {
- gssize regs [MONO_MAX_IREGS + 1];
+ mgreg_t regs [MONO_MAX_IREGS + 1];
guint8 *cfa;
guint32 unwind_info_len;
guint8 *unwind_info;
unwind_info = mono_aot_get_unwind_info (ji, &unwind_info_len);
else
unwind_info = mono_get_cached_unwind_info (ji->used_regs, &unwind_info_len);
+
+ frame->unwind_info = unwind_info;
+ frame->unwind_info_len = unwind_info_len;
regs [AMD64_RAX] = new_ctx->rax;
regs [AMD64_RBX] = new_ctx->rbx;
mono_unwind_frame (unwind_info, unwind_info_len, ji->code_start,
(guint8*)ji->code_start + ji->code_size,
- ip, regs, MONO_MAX_IREGS + 1, &cfa);
+ ip, regs, MONO_MAX_IREGS + 1,
+ save_locations, MONO_MAX_IREGS, &cfa);
new_ctx->rax = regs [AMD64_RAX];
new_ctx->rbx = regs [AMD64_RBX];
new_ctx->r15 = regs [AMD64_R15];
/* The CFA becomes the new SP value */
- new_ctx->rsp = (gssize)cfa;
+ new_ctx->rsp = (mgreg_t)cfa;
/* Adjust IP */
new_ctx->rip --;
* The rsp field is set just before the call which transitioned to native
* code. Obtain the rip from the stack.
*/
- rip = *(guint64*)((*lmf)->rsp - sizeof (gpointer));
+ rip = *(guint64*)((*lmf)->rsp - sizeof(mgreg_t));
}
ji = mini_jit_info_table_find (domain, (gpointer)rip, NULL);
- g_assert (ji);
+ /*
+ * FIXME: ji == NULL can happen when a managed-to-native wrapper is interrupted
+ * in the soft debugger suspend code, since (*lmf)->rsp no longer points to the
+ * return address.
+ */
+ //g_assert (ji);
+ if (!ji)
+ return FALSE;
/* Adjust IP */
rip --;
void
mono_arch_sigctx_to_monoctx (void *sigctx, MonoContext *mctx)
{
+#if defined(__native_client_codegen__) || defined(__native_client__)
+ printf("WARNING: mono_arch_sigctx_to_monoctx() called!\n");
+#endif
+
#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
void
mono_arch_monoctx_to_sigctx (MonoContext *mctx, void *sigctx)
{
+#if defined(__native_client__) || defined(__native_client_codegen__)
+ printf("WARNING: mono_arch_monoctx_to_sigctx() called!\n");
+#endif
+
#if defined(MONO_ARCH_USE_SIGACTION)
ucontext_t *ctx = (ucontext_t*)sigctx;
gpointer throw_trampoline;
MonoJumpInfo *ji = NULL;
GSList *unwind_ops = NULL;
+ const guint kMaxCodeSize = NACL_SIZE (128, 256);
- start = code = mono_global_codeman_reserve (128);
+ start = code = mono_global_codeman_reserve (kMaxCodeSize);
/* We are in the frame of a managed method after a call */
/*
/* Return to original code */
amd64_jump_reg (code, AMD64_R11);
- g_assert ((code - start) < 128);
+ g_assert ((code - start) < kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
if (info)
*info = mono_tramp_info_create (g_strdup_printf ("throw_pending_exception"), start, code - start, ji, unwind_ops);
*(gpointer*)(lmf->rsp - 8) = throw_pending_exception;
}
+GSList*
+mono_amd64_get_exception_trampolines (gboolean aot)
+{
+ MonoTrampInfo *info;
+ GSList *tramps = NULL;
+
+ mono_arch_get_throw_pending_exception (&info, aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ /* LLVM needs different throw trampolines */
+ get_throw_trampoline (&info, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ get_throw_trampoline (&info, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", aot);
+ tramps = g_slist_prepend (tramps, info);
+
+ get_throw_trampoline (&info, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
+ tramps = g_slist_prepend (tramps, info);
+
+ return tramps;
+}
+
void
mono_arch_exceptions_init (void)
{
- guint8 *tramp;
+ GSList *tramps, *l;
+ gpointer tramp;
if (mono_aot_only) {
throw_pending_exception = mono_aot_get_trampoline ("throw_pending_exception");
+ tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_trampoline");
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
+ tramp = mono_aot_get_trampoline ("llvm_throw_corlib_exception_abs_trampoline");
+ mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
+ tramp = mono_aot_get_trampoline ("llvm_resume_unwind_trampoline");
+ mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
} else {
/* Call this to avoid initialization races */
throw_pending_exception = mono_arch_get_throw_pending_exception (NULL, FALSE);
- /* LLVM needs different throw trampolines */
- tramp = get_throw_trampoline (NULL, FALSE, TRUE, FALSE, FALSE, "llvm_throw_corlib_exception_trampoline", FALSE);
- mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_trampoline", NULL, TRUE);
-
- tramp = get_throw_trampoline (NULL, FALSE, TRUE, TRUE, FALSE, "llvm_throw_corlib_exception_abs_trampoline", FALSE);
- mono_register_jit_icall (tramp, "llvm_throw_corlib_exception_abs_trampoline", NULL, TRUE);
+ tramps = mono_amd64_get_exception_trampolines (FALSE);
+ for (l = tramps; l; l = l->next) {
+ MonoTrampInfo *info = l->data;
- tramp = get_throw_trampoline (NULL, FALSE, TRUE, TRUE, TRUE, "llvm_resume_unwind_trampoline", FALSE);
- mono_register_jit_icall (tramp, "llvm_resume_unwind_trampoline", NULL, TRUE);
+ mono_register_jit_icall (info->code, g_strdup (info->name), NULL, TRUE);
+ mono_save_trampoline_xdebug_info (info);
+ mono_tramp_info_free (info);
+ }
+ g_slist_free (tramps);
}
}
static guint8* saved = NULL;
guint8 *code, *start;
int cont_reg = AMD64_R9; /* register usable on both call conventions */
+ const guint kMaxCodeSize = NACL_SIZE (64, 128);
+
if (saved)
return (MonoContinuationRestore)saved;
- code = start = mono_global_codeman_reserve (64);
+ code = start = mono_global_codeman_reserve (kMaxCodeSize);
/* the signature is: restore (MonoContinuation *cont, int state, MonoLMF **lmf_addr) */
/* cont is in AMD64_ARG_REG1 ($rcx or $rdi)
* state is in AMD64_ARG_REG2 ($rdx or $rsi)
amd64_mov_reg_membase (code, AMD64_R12, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r12), 8);
amd64_mov_reg_membase (code, AMD64_R13, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r13), 8);
amd64_mov_reg_membase (code, AMD64_R14, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r14), 8);
+#if !defined(__native_client_codegen__)
amd64_mov_reg_membase (code, AMD64_R15, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, r15), 8);
+#endif
#ifdef TARGET_WIN32
amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rdi), 8);
amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RCX, G_STRUCT_OFFSET (MonoLMF, rsi), 8);
/* state is already in rax */
amd64_jump_membase (code, cont_reg, G_STRUCT_OFFSET (MonoContinuation, return_ip));
- g_assert ((code - start) <= 64);
+ g_assert ((code - start) <= kMaxCodeSize);
+
+ nacl_global_codeman_validate(&start, kMaxCodeSize, &code);
+
saved = start;
return (MonoContinuationRestore)saved;
}
#endif
+/*
+ * mono_arch_setup_resume_sighandler_ctx:
+ *
+ * Setup CTX so execution continues at FUNC.
+ */
+void
+mono_arch_setup_resume_sighandler_ctx (MonoContext *ctx, gpointer func)
+{
+ /*
+ * When resuming from a signal handler, the stack should be misaligned, just like right after
+ * a call.
+ */
+ if ((((guint64)MONO_CONTEXT_GET_SP (ctx)) % 16) == 0)
+ MONO_CONTEXT_SET_SP (ctx, (guint64)MONO_CONTEXT_GET_SP (ctx) - 8);
+ MONO_CONTEXT_SET_IP (ctx, func);
+}