2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
20 #ifdef HAVE_VALGRIND_MEMCHECK_H
21 #include <valgrind/memcheck.h>
25 #include "mini-amd64.h"
27 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
29 static guint8* nullified_class_init_trampoline;
32 * mono_arch_get_unbox_trampoline:
33 * @gsctx: the generic sharing context
35 * @addr: pointer to native code for @m
37 * when value type methods are called through the vtable we need to unbox the
38 * this argument. This method returns a pointer to a trampoline which does
39 * unboxing before calling the method
42 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
47 MonoDomain *domain = mono_domain_get ();
49 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
51 mono_domain_lock (domain);
52 start = code = mono_code_manager_reserve (domain->code_mp, 20);
53 mono_domain_unlock (domain);
55 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
56 /* FIXME: Optimize this */
57 amd64_mov_reg_imm (code, AMD64_RAX, addr);
58 amd64_jump_reg (code, AMD64_RAX);
59 g_assert ((code - start) < 20);
61 mono_arch_flush_icache (start, code - start);
67 * mono_arch_patch_callsite:
69 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
70 * points to the pc right after the call.
73 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
77 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
81 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
82 if (code [-5] != 0xe8) {
84 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
85 #ifdef HAVE_VALGRIND_MEMCHECK_H
86 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
90 if ((((guint64)(addr)) >> 32) != 0) {
91 /* Print some diagnostics */
92 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
94 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
95 fprintf (stderr, "Addr: %p\n", addr);
96 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
98 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
99 g_assert_not_reached ();
101 g_assert ((((guint64)(orig_code)) >> 32) == 0);
103 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
104 #ifdef HAVE_VALGRIND_MEMCHECK_H
105 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
110 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
111 /* call *<OFFSET>(%rip) */
112 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
114 InterlockedExchangePointer (got_entry, addr);
115 #ifdef HAVE_VALGRIND_MEMCHECK_H
116 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
123 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
126 gpointer *plt_jump_table_entry;
128 /* A PLT entry: jmp *<DISP>(%rip) */
129 g_assert (code [0] == 0xff);
130 g_assert (code [1] == 0x25);
132 disp = *(gint32*)(code + 2);
134 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
136 InterlockedExchangePointer (plt_jump_table_entry, addr);
140 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
143 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
151 * A given byte sequence can match more than case here, so we have to be
152 * really careful about the ordering of the cases. Longer sequences
155 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
156 gpointer *vtable_slot;
158 /* call *<OFFSET>(%rip) */
159 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
160 g_assert (vtable_slot);
162 *vtable_slot = nullified_class_init_trampoline;
163 } else if (code [-2] == 0xe8) {
165 guint8 *buf = code - 2;
172 } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
174 /* happens on machines without MAP_32BIT like freebsd */
175 /* amd64_set_reg_template is 10 bytes long */
176 guint8* buf = code - 10;
178 /* FIXME: Make this thread safe */
179 /* Padding code suggested by the AMD64 Opt Manual */
193 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
194 /* Already changed by another thread */
197 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
198 code [4], code [5], code [6]);
199 g_assert_not_reached ();
204 mono_arch_nullify_plt_entry (guint8 *code)
206 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
210 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
212 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
213 int i, lmf_offset, offset, res_offset, arg_offset, tramp_offset, saved_regs_offset;
214 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset;
217 if (tramp_type == MONO_TRAMPOLINE_JUMP)
222 code = buf = mono_global_codeman_reserve (524);
224 framesize = 524 + sizeof (MonoLMF);
225 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
227 if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
228 static int byte_offset = -1;
229 static guint8 bitmask;
234 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
236 amd64_test_membase_imm_size (code, MONO_ARCH_VTABLE_REG, byte_offset, bitmask, 1);
238 amd64_branch8 (code, X86_CC_Z, -1, 1);
242 x86_patch (jump, code);
245 orig_rsp_to_rbp_offset = 0;
246 r11_save_code = code;
247 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
249 after_r11_save_code = code;
252 * The generic class init trampoline is called directly by
253 * JITted code, there is no specific trampoline. The lazy
254 * fetch trampolines behave like generic class init
257 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
258 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
259 /* Pop the return address off the stack */
260 amd64_pop_reg (code, AMD64_R11);
261 orig_rsp_to_rbp_offset += 8;
265 * Allocate a new stack frame
267 amd64_push_reg (code, AMD64_RBP);
268 orig_rsp_to_rbp_offset -= 8;
269 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
270 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
273 rbp_offset = - offset;
276 tramp_offset = - offset;
279 arg_offset = - offset;
281 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
282 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
283 /* Compute the trampoline address from the return address */
284 /* 5 = length of amd64_call_membase () */
285 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
286 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
288 amd64_mov_membase_imm (code, AMD64_RBP, tramp_offset, 0, 8);
292 res_offset = - offset;
294 /* Save all registers */
296 offset += AMD64_NREG * 8;
297 saved_regs_offset = - offset;
298 for (i = 0; i < AMD64_NREG; ++i) {
299 if (i == AMD64_RBP) {
300 /* RAX is already saved */
301 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
302 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
303 } else if (i != AMD64_R11) {
304 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
307 /* We have to save R11 right at the start of
308 the trampoline code because it's used as a
310 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
311 g_assert (r11_save_code == after_r11_save_code);
315 saved_fpregs_offset = - offset;
316 for (i = 0; i < 8; ++i)
317 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
319 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
320 tramp_type != MONO_TRAMPOLINE_RGCTX_LAZY_FETCH) {
321 /* Obtain the trampoline argument which is encoded in the instruction stream */
322 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
323 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
324 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
325 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
327 x86_branch8 (code, X86_CC_NE, 6, FALSE);
328 /* 32 bit immediate */
329 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
331 x86_jump8 (code, 10);
332 /* 64 bit immediate */
333 mono_amd64_patch (br [0], code);
334 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
335 mono_amd64_patch (br [1], code);
336 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
338 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, MONO_ARCH_VTABLE_REG, 8);
343 offset += sizeof (MonoLMF);
344 lmf_offset = - offset;
348 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
350 amd64_mov_reg_imm (code, AMD64_R11, 0);
351 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
353 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
354 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
356 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
357 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
358 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
360 if (tramp_type == MONO_TRAMPOLINE_GENERIC || tramp_type == MONO_TRAMPOLINE_JUMP) {
361 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
362 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
364 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
366 /* Save callee saved regs */
367 #ifdef PLATFORM_WIN32
368 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
369 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
371 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
372 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
373 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
374 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
375 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
377 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
378 amd64_call_reg (code, AMD64_R11);
381 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
382 /* Save previous_lmf */
383 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
384 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
385 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
386 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
388 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
389 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
393 /* Arg1 is the pointer to the saved registers */
394 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
396 /* Arg2 is the address of the calling code */
398 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
400 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
402 /* Arg3 is the method/vtable ptr */
403 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
405 /* Arg4 is the trampoline address */
406 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
408 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
409 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
410 amd64_call_reg (code, AMD64_RAX);
412 /* Check for thread interruption */
413 /* This is not perf critical code so no need to check the interrupt flag */
415 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
417 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
418 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
419 amd64_call_reg (code, AMD64_RAX);
420 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
424 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
425 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
426 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
427 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
429 /* Restore argument registers, r10 (needed to pass rgctx to
430 static shared generic methods) and r11 (imt register for
432 for (i = 0; i < AMD64_NREG; ++i)
433 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11)
434 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
436 for (i = 0; i < 8; ++i)
437 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
442 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT ||
443 tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT ||
444 tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
447 /* call the compiled method */
448 amd64_jump_reg (code, X86_EAX);
450 g_assert ((code - buf) <= 524);
452 mono_arch_flush_icache (buf, code - buf);
454 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
455 /* Initialize the nullified class init trampoline used in the AOT case */
456 nullified_class_init_trampoline = code = mono_global_codeman_reserve (16);
464 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
466 guint8 *code, *buf, *tramp;
469 tramp = mono_get_trampoline_code (tramp_type);
471 if ((((guint64)arg1) >> 32) == 0)
476 mono_domain_lock (domain);
477 code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 1);
478 mono_domain_unlock (domain);
480 amd64_call_code (code, tramp);
481 /* The trampoline code will obtain the argument from the instruction stream */
482 if ((((guint64)arg1) >> 32) == 0) {
484 *(guint32*)(code + 1) = (gint64)arg1;
488 *(guint64*)(code + 1) = (gint64)arg1;
492 g_assert ((code - buf) <= size);
497 mono_arch_flush_icache (buf, size);
503 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
505 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_RGCTX_LAZY_FETCH);
507 guint8 **rgctx_null_jumps;
515 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
516 index = MONO_RGCTX_SLOT_INDEX (slot);
518 index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
519 for (depth = 0; ; ++depth) {
520 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
522 if (index < size - 1)
527 tramp_size = 36 + 8 * depth;
529 code = buf = mono_global_codeman_reserve (tramp_size);
531 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
535 amd64_mov_reg_reg (buf, AMD64_RAX, AMD64_ARG_REG1, 8);
537 /* load rgctx ptr from vtable */
538 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
539 /* is the rgctx ptr null? */
540 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
541 /* if yes, jump to actual trampoline */
542 rgctx_null_jumps [0] = buf;
543 amd64_branch8 (buf, X86_CC_Z, -1, 1);
546 for (i = 0; i < depth; ++i) {
547 /* load ptr to next array */
548 if (mrgctx && i == 0)
549 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, sizeof (MonoMethodRuntimeGenericContext), 8);
551 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, 0, 8);
552 /* is the ptr null? */
553 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
554 /* if yes, jump to actual trampoline */
555 rgctx_null_jumps [i + 1] = buf;
556 amd64_branch8 (buf, X86_CC_Z, -1, 1);
560 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
561 /* is the slot null? */
562 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
563 /* if yes, jump to actual trampoline */
564 rgctx_null_jumps [depth + 1] = buf;
565 amd64_branch8 (buf, X86_CC_Z, -1, 1);
566 /* otherwise return */
569 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
570 x86_patch (rgctx_null_jumps [i], buf);
572 g_free (rgctx_null_jumps);
574 /* move the rgctx pointer to the VTABLE register */
575 amd64_mov_reg_reg (buf, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
576 /* store the slot in RAX */
577 amd64_mov_reg_imm (buf, AMD64_RAX, slot);
578 /* jump to the actual trampoline */
579 amd64_jump_code (buf, tramp);
581 mono_arch_flush_icache (code, buf - code);
583 g_assert (buf - code <= tramp_size);
589 mono_arch_get_rgctx_lazy_fetch_offset (gpointer *regs)
591 return (guint32)(gulong)(regs [AMD64_RAX]);
595 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
597 /* FIXME: This is not thread safe */
598 guint8 *code = ji->code_start;
600 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
601 amd64_mov_reg_imm (code, AMD64_R11, func);
603 x86_push_imm (code, (guint64)func_arg);
604 amd64_call_reg (code, AMD64_R11);