2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/marshal.h>
19 #include <mono/metadata/tabledefs.h>
20 #include <mono/metadata/mono-debug-debugger.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/gc-internals.h>
23 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/utils/memcheck.h>
28 #include "mini-amd64.h"
29 #include "debugger-agent.h"
31 #if defined(__native_client_codegen__) && defined(__native_client__)
33 #include <nacl/nacl_dyncode.h>
36 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
38 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
41 * mono_arch_get_unbox_trampoline:
43 * @addr: pointer to native code for @m
45 * when value type methods are called through the vtable we need to unbox the
46 * this argument. This method returns a pointer to a trampoline which does
47 * unboxing before calling the method
50 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
54 int this_reg, size = NACL_SIZE (20, 32);
56 MonoDomain *domain = mono_domain_get ();
58 this_reg = mono_arch_get_this_arg_reg (NULL);
60 start = code = (guint8 *)mono_domain_code_reserve (domain, size);
62 unwind_ops = mono_arch_get_cie_program ();
64 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
65 /* FIXME: Optimize this */
66 amd64_mov_reg_imm (code, AMD64_RAX, addr);
67 amd64_jump_reg (code, AMD64_RAX);
68 g_assert ((code - start) < size);
70 nacl_domain_code_validate (domain, &start, size, &code);
72 mono_arch_flush_icache (start, code - start);
73 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
75 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
81 * mono_arch_get_static_rgctx_trampoline:
83 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
86 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
92 MonoDomain *domain = mono_domain_get ();
94 #ifdef MONO_ARCH_NOMAP32BIT
97 /* AOTed code could still have a non-32 bit address */
98 if ((((guint64)addr) >> 32) == 0)
99 buf_len = NACL_SIZE (16, 32);
101 buf_len = NACL_SIZE (30, 32);
104 start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
106 unwind_ops = mono_arch_get_cie_program ();
108 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
109 amd64_jump_code (code, addr);
110 g_assert ((code - start) < buf_len);
112 nacl_domain_code_validate (domain, &start, buf_len, &code);
113 mono_arch_flush_icache (start, code - start);
114 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
116 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
122 // Workaround lack of Valgrind support for 64-bit Windows
123 #define VALGRIND_DISCARD_TRANSLATIONS(...)
127 * mono_arch_patch_callsite:
129 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
130 * points to the pc right after the call.
133 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
135 #if defined(__default_codegen__)
138 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
142 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
143 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
144 if (code [-5] != 0xe8) {
146 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
147 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
150 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
152 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
154 * This might happen with LLVM or when calling AOTed code. Create a thunk.
156 guint8 *thunk_start, *thunk_code;
158 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
159 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
160 *(guint64*)thunk_code = (guint64)addr;
162 g_assert ((((guint64)(addr)) >> 32) == 0);
163 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
164 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
167 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
168 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
172 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
173 /* call *<OFFSET>(%rip) */
174 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
176 InterlockedExchangePointer (got_entry, addr);
177 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
180 #elif defined(__native_client__)
181 /* These are essentially the same 2 cases as above, modified for NaCl*/
183 /* Target must be bundle-aligned */
184 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
185 /* Return target must be bundle-aligned */
186 g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
188 if (orig_code[-5] == 0xe8) {
191 gint32 offset = (gint32)addr - (gint32)orig_code;
192 guint8 buf[sizeof(gint32)];
193 *((gint32*)(buf)) = offset;
194 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
198 else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
200 guint8 buf[sizeof(gint32)];
201 *((gint32 *)(buf)) = addr;
202 /* orig_code[-14] is the start of the immediate. */
203 ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
207 g_assert_not_reached ();
215 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
218 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
219 * we add a thunk every time.
220 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
221 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
223 guint8 *thunk_start, *thunk_code;
225 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
226 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
227 *(guint64*)thunk_code = (guint64)addr;
229 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
230 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
235 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
238 gpointer *plt_jump_table_entry;
240 #if defined(__default_codegen__)
241 /* A PLT entry: jmp *<DISP>(%rip) */
242 g_assert (code [0] == 0xff);
243 g_assert (code [1] == 0x25);
245 disp = *(gint32*)(code + 2);
247 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
248 #elif defined(__native_client_codegen__)
250 /* mov <DISP>(%rip), %r11d */
253 /* Verify the 'mov' */
254 g_assert (code [0] == 0x45);
255 g_assert (code [1] == 0x8b);
256 g_assert (code [2] == 0x1d);
258 disp = *(gint32*)(code + 3);
260 /* 7 = 3 (mov opcode) + 4 (disp) */
261 /* This needs to resolve to the target of the RIP-relative offset */
262 plt_jump_table_entry = (gpointer*)(code + 7 + disp);
264 #endif /* __native_client_codegen__ */
266 InterlockedExchangePointer (plt_jump_table_entry, addr);
270 stack_unaligned (MonoTrampolineType tramp_type)
272 printf ("%d\n", tramp_type);
273 g_assert_not_reached ();
277 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
280 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
281 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
282 int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
284 GSList *unwind_ops = NULL;
285 MonoJumpInfo *ji = NULL;
286 const guint kMaxCodeSize = NACL_SIZE (630, 630*2);
288 #if defined(__native_client_codegen__)
289 const guint kNaClTrampOffset = 17;
292 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
297 code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
299 /* Compute stack frame size and offsets */
301 rbp_offset = -offset;
303 offset += sizeof(mgreg_t);
304 rax_offset = -offset;
306 offset += sizeof(mgreg_t);
309 offset += sizeof(mgreg_t);
310 r11_save_offset = -offset;
312 offset += sizeof(mgreg_t);
313 tramp_offset = -offset;
315 offset += sizeof(gpointer);
316 arg_offset = -offset;
318 offset += sizeof(mgreg_t);
319 res_offset = -offset;
321 offset += sizeof (MonoContext);
322 ctx_offset = -offset;
323 saved_regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
324 saved_fpregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, fregs);
326 offset += sizeof (MonoLMFTramp);
327 lmf_offset = -offset;
330 /* Reserve space where the callee can save the argument registers */
331 offset += 4 * sizeof (mgreg_t);
334 framesize = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
336 // CFA = sp + 16 (the trampoline address is on the stack)
338 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
339 // IP saved at CFA - 8
340 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
342 orig_rsp_to_rbp_offset = 0;
343 r11_save_code = code;
344 /* Reserve space for the mov_membase_reg to save R11 */
346 after_r11_save_code = code;
348 /* Pop the return address off the stack */
349 amd64_pop_reg (code, AMD64_R11);
350 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
352 cfa_offset -= sizeof(mgreg_t);
353 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
356 * Allocate a new stack frame
358 amd64_push_reg (code, AMD64_RBP);
359 cfa_offset += sizeof(mgreg_t);
360 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
361 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
363 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
364 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
365 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
366 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
368 /* Compute the trampoline address from the return address */
370 #if defined(__default_codegen__)
371 /* 7 = length of call *<offset>(rip) */
372 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
373 #elif defined(__native_client_codegen__)
374 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
377 /* 5 = length of amd64_call_membase () */
378 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
380 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
382 /* Save all registers */
383 for (i = 0; i < AMD64_NREG; ++i) {
384 if (i == AMD64_RBP) {
385 /* RAX is already saved */
386 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
387 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
388 } else if (i == AMD64_RIP) {
390 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
392 amd64_mov_reg_imm (code, AMD64_R11, 0);
393 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
394 } else if (i == AMD64_RSP) {
395 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
396 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
397 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
398 } else if (i != AMD64_R11) {
399 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
401 /* We have to save R11 right at the start of
402 the trampoline code because it's used as a
404 /* This happens before the frame is set up, so it goes into the redzone */
405 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, i, sizeof(mgreg_t));
406 g_assert (r11_save_code == after_r11_save_code);
408 /* Copy from the save slot into the register array slot */
409 amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
410 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
412 /* cfa = rbp + cfa_offset */
413 mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
415 for (i = 0; i < 8; ++i)
416 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
418 /* Check that the stack is aligned */
419 #if defined(__default_codegen__)
420 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
421 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
422 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
424 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
426 amd64_mov_reg_imm (code, AMD64_R11, 0);
427 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
429 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
430 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
431 amd64_call_reg (code, AMD64_R11);
433 mono_amd64_patch (br [0], code);
434 //amd64_breakpoint (code);
437 if (tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
438 /* Obtain the trampoline argument which is encoded in the instruction stream */
440 /* Load the GOT offset */
441 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
442 #if defined(__default_codegen__)
444 * r11 points to a call *<offset>(%rip) instruction, load the
445 * pc-relative offset from the instruction itself.
447 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
448 /* 7 is the length of the call, 8 is the offset to the next got slot */
449 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
450 #elif defined(__native_client_codegen__)
451 /* The arg is hidden in a "push imm32" instruction, */
452 /* add one to skip the opcode. */
453 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
455 /* Compute the address of the GOT slot */
456 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
458 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
460 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
461 #if defined(__default_codegen__)
462 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
463 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
464 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
466 x86_branch8 (code, X86_CC_NE, 6, FALSE);
467 /* 32 bit immediate */
468 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
470 x86_jump8 (code, 10);
471 /* 64 bit immediate */
472 mono_amd64_patch (br [0], code);
473 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
474 mono_amd64_patch (br [1], code);
475 #elif defined(__native_client_codegen__)
476 /* All args are 32-bit pointers in NaCl */
477 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
480 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
482 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
483 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
490 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
492 amd64_mov_reg_imm (code, AMD64_R11, 0);
493 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
495 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
496 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
497 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
498 /* Save pointer to context */
499 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, ctx_offset);
500 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, ctx), AMD64_R11, sizeof(mgreg_t));
503 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
505 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
507 amd64_call_reg (code, AMD64_R11);
510 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof(gpointer));
511 /* Save previous_lmf */
512 /* Set the lowest bit to signal that this LMF has the ip field set */
513 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
514 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
515 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof(gpointer));
516 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
518 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
519 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
523 /* Arg1 is the pointer to the saved registers */
524 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
526 /* Arg2 is the address of the calling code */
528 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
530 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
532 /* Arg3 is the method/vtable ptr */
533 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
535 /* Arg4 is the trampoline address */
536 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
539 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
540 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
542 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
543 amd64_mov_reg_imm (code, AMD64_R11, tramp);
545 amd64_call_reg (code, AMD64_R11);
546 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
549 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
550 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof(gpointer));
551 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof(gpointer));
552 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
555 * Save rax to the stack, after the leave instruction, this will become part of
558 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
559 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
561 /* Check for thread interruption */
562 /* This is not perf critical code so no need to check the interrupt flag */
564 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
567 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
569 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
571 amd64_call_reg (code, AMD64_R11);
573 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
575 amd64_branch8 (code, X86_CC_Z, -1, 1);
579 * We have an exception we want to throw in the caller's frame, so pop
580 * the trampoline frame and throw from the caller.
583 /* We are in the parent frame, the exception is in rax */
585 * EH is initialized after trampolines, so get the address of the variable
586 * which contains throw_exception, and load it from there.
589 /* Not really a jit icall */
590 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
592 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
594 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
595 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
597 * We still have the original return value on the top of the stack, so the
598 * throw trampoline will use that as the throw site.
600 amd64_jump_reg (code, AMD64_R11);
603 mono_amd64_patch (br_ex_check, code);
605 /* Restore argument registers, r10 (imt method/rgxtx)
606 and rax (needed for direct calls to C vararg functions). */
607 for (i = 0; i < AMD64_NREG; ++i)
608 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
609 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
610 for (i = 0; i < 8; ++i)
611 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
615 cfa_offset -= sizeof (mgreg_t);
616 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
618 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
620 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
623 /* call the compiled method using the saved rax */
624 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
627 g_assert ((code - buf) <= kMaxCodeSize);
629 nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
631 mono_arch_flush_icache (buf, code - buf);
632 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
634 tramp_name = mono_get_generic_trampoline_name (tramp_type);
635 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
642 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
644 guint8 *code, *buf, *tramp;
646 gboolean far_addr = FALSE;
648 tramp = mono_get_trampoline_code (tramp_type);
650 #if defined(__default_codegen__)
651 if ((((guint64)arg1) >> 32) == 0)
656 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
658 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
659 #ifndef MONO_ARCH_NOMAP32BIT
660 g_assert_not_reached ();
664 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
666 #elif defined(__native_client_codegen__)
668 /* Aligning the call site below could */
669 /* add up to kNaClAlignment-1 bytes */
670 size += (kNaClAlignment-1);
671 size = NACL_BUNDLE_ALIGN_UP (size);
672 buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
677 amd64_mov_reg_imm (code, AMD64_R11, tramp);
678 amd64_call_reg (code, AMD64_R11);
680 amd64_call_code (code, tramp);
682 /* The trampoline code will obtain the argument from the instruction stream */
683 #if defined(__default_codegen__)
684 if ((((guint64)arg1) >> 32) == 0) {
686 *(guint32*)(code + 1) = (gint64)arg1;
690 *(guint64*)(code + 1) = (gint64)arg1;
693 #elif defined(__native_client_codegen__)
694 /* For NaCl, all tramp args are 32-bit because they're pointers */
695 *code = 0x68; /* push imm32 */
696 *(guint32*)(code + 1) = (gint32)arg1;
700 g_assert ((code - buf) <= size);
705 nacl_domain_code_validate(domain, &buf, size, &code);
707 mono_arch_flush_icache (buf, size);
708 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
714 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
718 guint8 **rgctx_null_jumps;
723 MonoJumpInfo *ji = NULL;
726 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
727 index = MONO_RGCTX_SLOT_INDEX (slot);
729 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
730 for (depth = 0; ; ++depth) {
731 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
733 if (index < size - 1)
738 tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
740 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
742 unwind_ops = mono_arch_get_cie_program ();
744 rgctx_null_jumps = (guint8 **)g_malloc (sizeof (guint8*) * (depth + 2));
748 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
750 /* load rgctx ptr from vtable */
751 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
752 /* is the rgctx ptr null? */
753 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
754 /* if yes, jump to actual trampoline */
755 rgctx_null_jumps [0] = code;
756 amd64_branch8 (code, X86_CC_Z, -1, 1);
759 for (i = 0; i < depth; ++i) {
760 /* load ptr to next array */
761 if (mrgctx && i == 0)
762 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
764 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
765 /* is the ptr null? */
766 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
767 /* if yes, jump to actual trampoline */
768 rgctx_null_jumps [i + 1] = code;
769 amd64_branch8 (code, X86_CC_Z, -1, 1);
773 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
774 /* is the slot null? */
775 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
776 /* if yes, jump to actual trampoline */
777 rgctx_null_jumps [depth + 1] = code;
778 amd64_branch8 (code, X86_CC_Z, -1, 1);
779 /* otherwise return */
782 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
783 mono_amd64_patch (rgctx_null_jumps [i], code);
785 g_free (rgctx_null_jumps);
787 /* move the rgctx pointer to the VTABLE register */
788 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
791 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
792 amd64_jump_reg (code, AMD64_R11);
794 tramp = (guint8 *)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
796 /* jump to the actual trampoline */
797 amd64_jump_code (code, tramp);
800 nacl_global_codeman_validate (&buf, tramp_size, &code);
801 mono_arch_flush_icache (buf, code - buf);
802 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
804 g_assert (code - buf <= tramp_size);
806 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
807 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
814 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
816 /* FIXME: This is not thread safe */
817 guint8 *code = (guint8 *)ji->code_start;
819 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
820 amd64_mov_reg_imm (code, AMD64_R11, func);
822 x86_push_imm (code, (guint64)func_arg);
823 amd64_call_reg (code, AMD64_R11);
828 handler_block_trampoline_helper (gpointer *ptr)
830 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_native_tls_get_value (mono_jit_tls_id);
831 *ptr = jit_tls->handler_block_return_address;
835 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
837 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
840 MonoJumpInfo *ji = NULL;
845 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
847 unwind_ops = mono_arch_get_cie_program ();
850 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
852 if (mono_get_jit_tls_offset () != -1) {
853 code = mono_amd64_emit_tls_get (code, MONO_AMD64_ARG_REG1, mono_get_jit_tls_offset ());
854 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, MONO_AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
855 /* Simulate a call */
856 amd64_push_reg (code, AMD64_RAX);
857 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
858 amd64_jump_code (code, tramp);
860 /*Slow path uses a c helper*/
861 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RSP, 8);
862 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
863 amd64_push_reg (code, AMD64_RAX);
864 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 16);
865 amd64_push_reg (code, AMD64_RAX);
866 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, 24);
867 amd64_jump_code (code, handler_block_trampoline_helper);
870 mono_arch_flush_icache (buf, code - buf);
871 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
872 g_assert (code - buf <= tramp_size);
874 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
880 * mono_arch_get_call_target:
882 * Return the address called by the code before CODE if exists.
885 mono_arch_get_call_target (guint8 *code)
887 if (code [-5] == 0xe8) {
888 gint32 disp = *(gint32*)(code - 4);
889 guint8 *target = code + disp;
898 * mono_arch_get_plt_info_offset:
900 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
903 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
905 #if defined(__native_client__) || defined(__native_client_codegen__)
906 /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
907 /* See aot-compiler.c arch_emit_plt_entry for details. */
908 return *(guint32*)(plt_entry + 18);
910 return *(guint32*)(plt_entry + 6);
915 * mono_arch_create_sdb_trampoline:
917 * Return a trampoline which captures the current context, passes it to
918 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
919 * then restores the (potentially changed) context.
922 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
924 int tramp_size = 256;
925 int i, framesize, ctx_offset, cfa_offset, gregs_offset;
927 GSList *unwind_ops = NULL;
928 MonoJumpInfo *ji = NULL;
930 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
934 /* Reserve space where the callee can save the argument registers */
935 framesize += 4 * sizeof (mgreg_t);
938 ctx_offset = framesize;
939 framesize += sizeof (MonoContext);
941 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
945 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 8);
946 // IP saved at CFA - 8
947 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
949 amd64_push_reg (code, AMD64_RBP);
950 cfa_offset += sizeof(mgreg_t);
951 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
952 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
954 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
955 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
956 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
958 gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
960 /* Initialize a MonoContext structure on the stack */
961 for (i = 0; i < AMD64_NREG; ++i) {
962 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
963 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), i, sizeof (mgreg_t));
965 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
966 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
967 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
968 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RSP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
969 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
970 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
972 /* Call the single step/breakpoint function in sdb */
973 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
977 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_single_step_from_context");
979 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_breakpoint_from_context");
982 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_single_step_from_context);
984 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_breakpoint_from_context);
986 amd64_call_reg (code, AMD64_R11);
988 /* Restore registers from ctx */
989 for (i = 0; i < AMD64_NREG; ++i) {
990 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
991 amd64_mov_reg_membase (code, i, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), sizeof (mgreg_t));
993 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), sizeof (mgreg_t));
994 amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
995 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), sizeof (mgreg_t));
996 amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
999 cfa_offset -= sizeof (mgreg_t);
1000 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
1003 mono_arch_flush_icache (code, code - buf);
1004 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
1005 g_assert (code - buf <= tramp_size);
1007 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
1008 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
1013 #if defined(ENABLE_GSHAREDVT) && defined(MONO_ARCH_GSHAREDVT_SUPPORTED)
1015 #include "../../../mono-extensions/mono/mini/tramp-amd64-gsharedvt.c"
1020 mono_amd64_start_gsharedvt_call (GSharedVtCallInfo *info, gpointer *caller, gpointer *callee, gpointer mrgctx_reg)
1022 g_assert_not_reached ();
1027 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
1029 g_assert_not_reached ();
1034 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
1040 #endif /* !ENABLE_GSHAREDVT */