2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
20 #ifdef HAVE_VALGRIND_MEMCHECK_H
21 #include <valgrind/memcheck.h>
25 #include "mini-amd64.h"
27 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
29 static guint8* nullified_class_init_trampoline;
32 * mono_arch_get_unbox_trampoline:
34 * @addr: pointer to native code for @m
36 * when value type methods are called through the vtable we need to unbox the
37 * this argument. This method returns a pointer to a trampoline which does
38 * unboxing before calling the method
41 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
46 MonoDomain *domain = mono_domain_get ();
48 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), NULL);
50 mono_domain_lock (domain);
51 start = code = mono_code_manager_reserve (domain->code_mp, 20);
52 mono_domain_unlock (domain);
54 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
55 /* FIXME: Optimize this */
56 amd64_mov_reg_imm (code, AMD64_RAX, addr);
57 amd64_jump_reg (code, AMD64_RAX);
58 g_assert ((code - start) < 20);
60 mono_arch_flush_icache (start, code - start);
66 * mono_arch_patch_callsite:
68 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
69 * points to the pc right after the call.
72 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
76 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
80 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
81 if (code [-5] != 0xe8) {
83 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
84 #ifdef HAVE_VALGRIND_MEMCHECK_H
85 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
89 if ((((guint64)(addr)) >> 32) != 0) {
90 /* Print some diagnostics */
91 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
93 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
94 fprintf (stderr, "Addr: %p\n", addr);
95 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
97 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
98 g_assert_not_reached ();
100 g_assert ((((guint64)(orig_code)) >> 32) == 0);
102 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
103 #ifdef HAVE_VALGRIND_MEMCHECK_H
104 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
109 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
110 /* call *<OFFSET>(%rip) */
111 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
113 InterlockedExchangePointer (got_entry, addr);
114 #ifdef HAVE_VALGRIND_MEMCHECK_H
115 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
122 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
125 gpointer *plt_jump_table_entry;
127 /* A PLT entry: jmp *<DISP>(%rip) */
128 g_assert (code [0] == 0xff);
129 g_assert (code [1] == 0x25);
131 disp = *(gint32*)(code + 2);
133 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
135 InterlockedExchangePointer (plt_jump_table_entry, addr);
139 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
142 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
150 * A given byte sequence can match more than case here, so we have to be
151 * really careful about the ordering of the cases. Longer sequences
154 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
155 gpointer *vtable_slot;
157 /* call *<OFFSET>(%rip) */
158 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
159 g_assert (vtable_slot);
161 *vtable_slot = nullified_class_init_trampoline;
162 } else if (code [-2] == 0xe8) {
164 guint8 *buf = code - 2;
171 } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
173 /* happens on machines without MAP_32BIT like freebsd */
174 /* amd64_set_reg_template is 10 bytes long */
175 guint8* buf = code - 10;
177 /* FIXME: Make this thread safe */
178 /* Padding code suggested by the AMD64 Opt Manual */
192 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
193 /* Already changed by another thread */
196 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
197 code [4], code [5], code [6]);
198 g_assert_not_reached ();
203 mono_arch_nullify_plt_entry (guint8 *code)
205 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
209 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
211 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
212 int i, lmf_offset, offset, res_offset, arg_offset, tramp_offset, saved_regs_offset;
213 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset;
216 if (tramp_type == MONO_TRAMPOLINE_JUMP)
221 code = buf = mono_global_codeman_reserve (524);
223 framesize = 524 + sizeof (MonoLMF);
224 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
226 if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
227 static int byte_offset = -1;
228 static guint8 bitmask;
233 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
235 amd64_test_membase_imm_size (code, MONO_ARCH_VTABLE_REG, byte_offset, bitmask, 1);
237 amd64_branch8 (code, X86_CC_Z, -1, 1);
241 x86_patch (jump, code);
244 orig_rsp_to_rbp_offset = 0;
245 r11_save_code = code;
246 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
248 after_r11_save_code = code;
251 * The generic class init trampoline is called directly by
252 * JITted code, there is no specific trampoline. The lazy
253 * fetch trampolines behave like generic class init
256 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
257 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
258 /* Pop the return address off the stack */
259 amd64_pop_reg (code, AMD64_R11);
260 orig_rsp_to_rbp_offset += 8;
264 * Allocate a new stack frame
266 amd64_push_reg (code, AMD64_RBP);
267 orig_rsp_to_rbp_offset -= 8;
268 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
269 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
272 rbp_offset = - offset;
275 tramp_offset = - offset;
278 arg_offset = - offset;
280 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
281 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
282 /* Compute the trampoline address from the return address */
283 /* 5 = length of amd64_call_membase () */
284 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
285 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
287 amd64_mov_membase_imm (code, AMD64_RBP, tramp_offset, 0, 8);
291 res_offset = - offset;
293 /* Save all registers */
295 offset += AMD64_NREG * 8;
296 saved_regs_offset = - offset;
297 for (i = 0; i < AMD64_NREG; ++i) {
298 if (i == AMD64_RBP) {
299 /* RAX is already saved */
300 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
301 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
302 } else if (i != AMD64_R11) {
303 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
306 /* We have to save R11 right at the start of
307 the trampoline code because it's used as a
309 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
310 g_assert (r11_save_code == after_r11_save_code);
314 saved_fpregs_offset = - offset;
315 for (i = 0; i < 8; ++i)
316 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
318 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
319 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
320 /* Obtain the trampoline argument which is encoded in the instruction stream */
321 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
322 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
323 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
324 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
326 x86_branch8 (code, X86_CC_NE, 6, FALSE);
327 /* 32 bit immediate */
328 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
330 x86_jump8 (code, 10);
331 /* 64 bit immediate */
332 mono_amd64_patch (br [0], code);
333 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
334 mono_amd64_patch (br [1], code);
335 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
337 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, MONO_ARCH_VTABLE_REG, 8);
342 offset += sizeof (MonoLMF);
343 lmf_offset = - offset;
347 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
349 amd64_mov_reg_imm (code, AMD64_R11, 0);
350 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
352 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
353 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
355 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
356 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
357 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
359 if (tramp_type == MONO_TRAMPOLINE_GENERIC || tramp_type == MONO_TRAMPOLINE_JUMP) {
360 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
361 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
363 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
365 /* Save callee saved regs */
366 #ifdef PLATFORM_WIN32
367 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
368 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
370 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
371 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
372 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
373 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
374 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
376 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
377 amd64_call_reg (code, AMD64_R11);
380 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
381 /* Save previous_lmf */
382 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
383 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
384 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
385 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
387 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
388 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
392 /* Arg1 is the pointer to the saved registers */
393 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
395 /* Arg2 is the address of the calling code */
397 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
399 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
401 /* Arg3 is the method/vtable ptr */
402 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
404 /* Arg4 is the trampoline address */
405 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
407 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
408 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
409 amd64_call_reg (code, AMD64_RAX);
411 /* Check for thread interruption */
412 /* This is not perf critical code so no need to check the interrupt flag */
414 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
416 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
417 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
418 amd64_call_reg (code, AMD64_RAX);
419 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
423 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
424 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
425 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
426 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
428 /* Restore argument registers, r10 (needed to pass rgctx to
429 static shared generic methods) and r11 (imt register for
431 for (i = 0; i < AMD64_NREG; ++i)
432 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11)
433 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
435 for (i = 0; i < 8; ++i)
436 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
441 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT ||
442 tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT ||
443 tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
446 /* call the compiled method */
447 amd64_jump_reg (code, X86_EAX);
449 g_assert ((code - buf) <= 524);
451 mono_arch_flush_icache (buf, code - buf);
453 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
454 /* Initialize the nullified class init trampoline used in the AOT case */
455 nullified_class_init_trampoline = code = mono_global_codeman_reserve (16);
463 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
465 guint8 *code, *buf, *tramp;
468 tramp = mono_get_trampoline_code (tramp_type);
470 if ((((guint64)arg1) >> 32) == 0)
475 mono_domain_lock (domain);
476 code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 1);
477 mono_domain_unlock (domain);
479 amd64_call_code (code, tramp);
480 /* The trampoline code will obtain the argument from the instruction stream */
481 if ((((guint64)arg1) >> 32) == 0) {
483 *(guint32*)(code + 1) = (gint64)arg1;
487 *(guint64*)(code + 1) = (gint64)arg1;
491 g_assert ((code - buf) <= size);
496 mono_arch_flush_icache (buf, size);
502 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
504 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_RGCTX_LAZY_FETCH);
506 guint8 **rgctx_null_jumps;
514 for (depth = 0; ; ++depth) {
515 int size = mono_class_rgctx_get_array_size (depth, FALSE);
517 if (index < size - 1)
522 tramp_size = 32 + 8 * depth;
524 code = buf = mono_global_codeman_reserve (tramp_size);
526 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
528 /* load rgctx ptr from vtable */
529 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
530 /* is the rgctx ptr null? */
531 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
532 /* if yes, jump to actual trampoline */
533 rgctx_null_jumps [0] = buf;
534 amd64_branch8 (buf, X86_CC_Z, -1, 1);
536 for (i = 0; i < depth; ++i) {
537 /* load ptr to next array */
538 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, 0, 8);
539 /* is the ptr null? */
540 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
541 /* if yes, jump to actual trampoline */
542 rgctx_null_jumps [i + 1] = buf;
543 amd64_branch8 (buf, X86_CC_Z, -1, 1);
547 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
548 /* is the slot null? */
549 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
550 /* if yes, jump to actual trampoline */
551 rgctx_null_jumps [depth + 1] = buf;
552 amd64_branch8 (buf, X86_CC_Z, -1, 1);
553 /* otherwise return */
556 for (i = 0; i <= depth + 1; ++i)
557 x86_patch (rgctx_null_jumps [i], buf);
559 g_free (rgctx_null_jumps);
561 /* move the rgctx pointer to the VTABLE register */
562 amd64_mov_reg_reg (buf, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
563 /* store the slot in RAX */
564 amd64_mov_reg_imm (buf, AMD64_RAX, slot);
565 /* jump to the actual trampoline */
566 amd64_jump_code (buf, tramp);
568 mono_arch_flush_icache (code, buf - code);
570 g_assert (buf - code <= tramp_size);
576 mono_arch_get_rgctx_lazy_fetch_offset (gpointer *regs)
578 return (guint32)(gulong)(regs [AMD64_RAX]);
582 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
584 /* FIXME: This is not thread safe */
585 guint8 *code = ji->code_start;
587 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
588 amd64_mov_reg_imm (code, AMD64_R11, func);
590 x86_push_imm (code, (guint64)func_arg);
591 amd64_call_reg (code, AMD64_R11);