2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug-debugger.h>
18 #include <mono/arch/amd64/amd64-codegen.h>
20 #ifdef HAVE_VALGRIND_MEMCHECK_H
21 #include <valgrind/memcheck.h>
25 #include "mini-amd64.h"
27 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
29 static guint8* nullified_class_init_trampoline;
32 * mono_arch_get_unbox_trampoline:
33 * @gsctx: the generic sharing context
35 * @addr: pointer to native code for @m
37 * when value type methods are called through the vtable we need to unbox the
38 * this argument. This method returns a pointer to a trampoline which does
39 * unboxing before calling the method
42 mono_arch_get_unbox_trampoline (MonoGenericSharingContext *gsctx, MonoMethod *m, gpointer addr)
47 MonoDomain *domain = mono_domain_get ();
49 this_reg = mono_arch_get_this_arg_reg (mono_method_signature (m), gsctx, NULL);
51 mono_domain_lock (domain);
52 start = code = mono_code_manager_reserve (domain->code_mp, 20);
53 mono_domain_unlock (domain);
55 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
56 /* FIXME: Optimize this */
57 amd64_mov_reg_imm (code, AMD64_RAX, addr);
58 amd64_jump_reg (code, AMD64_RAX);
59 g_assert ((code - start) < 20);
61 mono_arch_flush_icache (start, code - start);
67 * mono_arch_patch_callsite:
69 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
70 * points to the pc right after the call.
73 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
77 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
81 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
82 if (code [-5] != 0xe8) {
84 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
85 #ifdef HAVE_VALGRIND_MEMCHECK_H
86 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
90 if ((((guint64)(addr)) >> 32) != 0) {
91 /* Print some diagnostics */
92 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
94 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (ji->method, TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
95 fprintf (stderr, "Addr: %p\n", addr);
96 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
98 fprintf (stderr, "Callee: %s\n", mono_method_full_name (ji->method, TRUE));
99 g_assert_not_reached ();
101 g_assert ((((guint64)(orig_code)) >> 32) == 0);
103 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
104 #ifdef HAVE_VALGRIND_MEMCHECK_H
105 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
110 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
111 /* call *<OFFSET>(%rip) */
112 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
114 InterlockedExchangePointer (got_entry, addr);
115 #ifdef HAVE_VALGRIND_MEMCHECK_H
116 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
123 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
126 gpointer *plt_jump_table_entry;
128 /* A PLT entry: jmp *<DISP>(%rip) */
129 g_assert (code [0] == 0xff);
130 g_assert (code [1] == 0x25);
132 disp = *(gint32*)(code + 2);
134 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
136 InterlockedExchangePointer (plt_jump_table_entry, addr);
140 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
143 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 7, buf, sizeof (buf));
151 * A given byte sequence can match more than case here, so we have to be
152 * really careful about the ordering of the cases. Longer sequences
155 if ((code [-4] == 0x41) && (code [-3] == 0xff) && (code [-2] == 0x15)) {
156 gpointer *vtable_slot;
158 /* call *<OFFSET>(%rip) */
159 vtable_slot = mono_arch_get_vcall_slot_addr (code + 3, (gpointer*)regs);
160 g_assert (vtable_slot);
162 *vtable_slot = nullified_class_init_trampoline;
163 } else if (code [-2] == 0xe8) {
165 guint8 *buf = code - 2;
172 } else if ((code [0] == 0x41) && (code [1] == 0xff)) {
174 /* happens on machines without MAP_32BIT like freebsd */
175 /* amd64_set_reg_template is 10 bytes long */
176 guint8* buf = code - 10;
178 /* FIXME: Make this thread safe */
179 /* Padding code suggested by the AMD64 Opt Manual */
193 } else if (code [0] == 0x90 || code [0] == 0xeb || code [0] == 0x66) {
194 /* Already changed by another thread */
197 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
198 code [4], code [5], code [6]);
199 g_assert_not_reached ();
204 mono_arch_nullify_plt_entry (guint8 *code)
206 if (mono_aot_only && !nullified_class_init_trampoline)
207 nullified_class_init_trampoline = mono_aot_get_named_code ("nullified_class_init_trampoline");
209 mono_arch_patch_plt_entry (code, nullified_class_init_trampoline);
213 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
218 return mono_arch_create_trampoline_code_full (tramp_type, &code_size, &ji, FALSE);
222 mono_arch_create_trampoline_code_full (MonoTrampolineType tramp_type, guint32 *code_size, MonoJumpInfo **ji, gboolean aot)
224 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
225 int i, lmf_offset, offset, res_offset, arg_offset, tramp_offset, saved_regs_offset;
226 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset;
229 if (tramp_type == MONO_TRAMPOLINE_JUMP)
234 code = buf = mono_global_codeman_reserve (524);
238 framesize = 524 + sizeof (MonoLMF);
239 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
241 if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
242 static int byte_offset = -1;
243 static guint8 bitmask;
248 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
250 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
252 amd64_branch8 (code, X86_CC_Z, -1, 1);
256 x86_patch (jump, code);
259 orig_rsp_to_rbp_offset = 0;
260 r11_save_code = code;
261 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
263 after_r11_save_code = code;
266 * The generic class init trampoline is called directly by
267 * JITted code, there is no specific trampoline.
269 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
270 /* Pop the return address off the stack */
271 amd64_pop_reg (code, AMD64_R11);
272 orig_rsp_to_rbp_offset += 8;
276 * Allocate a new stack frame
278 amd64_push_reg (code, AMD64_RBP);
279 orig_rsp_to_rbp_offset -= 8;
280 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, 8);
281 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
284 rbp_offset = - offset;
287 tramp_offset = - offset;
290 arg_offset = - offset;
292 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
293 /* Compute the trampoline address from the return address */
295 /* 7 = length of call *<offset>(rip) */
296 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
298 /* 5 = length of amd64_call_membase () */
299 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
301 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, 8);
303 amd64_mov_membase_imm (code, AMD64_RBP, tramp_offset, 0, 8);
307 res_offset = - offset;
309 /* Save all registers */
311 offset += AMD64_NREG * 8;
312 saved_regs_offset = - offset;
313 for (i = 0; i < AMD64_NREG; ++i) {
314 if (i == AMD64_RBP) {
315 /* RAX is already saved */
316 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, 8);
317 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), AMD64_RAX, 8);
318 } else if (i != AMD64_R11) {
319 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * 8), i, 8);
321 /* We have to save R11 right at the start of
322 the trampoline code because it's used as a
324 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * 8), i, 8);
325 g_assert (r11_save_code == after_r11_save_code);
329 saved_fpregs_offset = - offset;
330 for (i = 0; i < 8; ++i)
331 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * 8), i);
333 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
334 /* Obtain the trampoline argument which is encoded in the instruction stream */
336 /* Load the GOT offset */
337 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
338 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 7, 4);
339 /* Compute the address of the GOT slot */
340 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, 8);
342 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
344 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, 8);
345 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
346 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
347 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
349 x86_branch8 (code, X86_CC_NE, 6, FALSE);
350 /* 32 bit immediate */
351 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
353 x86_jump8 (code, 10);
354 /* 64 bit immediate */
355 mono_amd64_patch (br [0], code);
356 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
357 mono_amd64_patch (br [1], code);
359 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
361 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * 8), 8);
362 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, 8);
367 offset += sizeof (MonoLMF);
368 lmf_offset = - offset;
372 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, 8);
374 amd64_mov_reg_imm (code, AMD64_R11, 0);
375 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, 8);
377 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, 8);
378 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, 8);
380 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, 8);
381 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
382 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, 8);
384 if (tramp_type == MONO_TRAMPOLINE_JIT || tramp_type == MONO_TRAMPOLINE_JUMP) {
385 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, arg_offset, 8);
386 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), AMD64_R11, 8);
388 amd64_mov_membase_imm (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, 8);
390 /* Save callee saved regs */
391 #ifdef PLATFORM_WIN32
392 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rdi), AMD64_RDI, 8);
393 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsi), AMD64_RSI, 8);
395 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, rbx), AMD64_RBX, 8);
396 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r12), AMD64_R12, 8);
397 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r13), AMD64_R13, 8);
398 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r14), AMD64_R14, 8);
399 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, r15), AMD64_R15, 8);
402 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
403 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
405 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
407 amd64_call_reg (code, AMD64_R11);
410 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
411 /* Save previous_lmf */
412 /* Set the lowest bit to 1 to signal that this LMF has the ip field set */
413 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, 8);
414 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 1, 8);
415 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, 8);
417 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
418 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, 8);
422 /* Arg1 is the pointer to the saved registers */
423 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
425 /* Arg2 is the address of the calling code */
427 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, 8);
429 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
431 /* Arg3 is the method/vtable ptr */
432 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, 8);
434 /* Arg4 is the trampoline address */
435 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, 8);
438 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
439 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
440 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
442 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
443 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
445 amd64_call_reg (code, AMD64_RAX);
447 /* Check for thread interruption */
448 /* This is not perf critical code so no need to check the interrupt flag */
450 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
452 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, 8);
454 *ji = mono_patch_info_list_prepend (*ji, code - buf, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
455 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RIP, 0, 8);
457 amd64_mov_reg_imm (code, AMD64_RAX, (guint8*)mono_thread_force_interruption_checkpoint);
459 amd64_call_reg (code, AMD64_RAX);
460 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, 8);
464 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), 8);
465 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 1, 8);
466 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), 8);
467 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, 8);
469 /* Restore argument registers, r10 (needed to pass rgctx to
470 static shared generic methods) and r11 (imt register for
472 for (i = 0; i < AMD64_NREG; ++i)
473 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_R11)
474 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * 8), 8);
477 * FIXME: When using aot-only, the called code might be a C vararg function
478 * which uses %rax as well.
479 * We could restore it, but we would have to use another register to store the
480 * target address, and we don't have any left.
481 * Also, the default AOT plt trampolines overwrite 'rax'.
484 for (i = 0; i < 8; ++i)
485 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * 8));
490 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT ||
491 tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT ||
492 tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
495 /* call the compiled method */
496 amd64_jump_reg (code, AMD64_RAX);
499 g_assert ((code - buf) <= 524);
501 mono_arch_flush_icache (buf, code - buf);
503 *code_size = code - buf;
505 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
508 /* Initialize the nullified class init trampoline used in the AOT case */
509 nullified_class_init_trampoline = mono_arch_get_nullified_class_init_trampoline (&code_len);
516 mono_arch_get_nullified_class_init_trampoline (guint32 *code_len)
520 code = buf = mono_global_codeman_reserve (16);
523 mono_arch_flush_icache (buf, code - buf);
525 *code_len = code - buf;
531 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
533 guint8 *code, *buf, *tramp;
536 tramp = mono_get_trampoline_code (tramp_type);
538 if ((((guint64)arg1) >> 32) == 0)
543 mono_domain_lock (domain);
544 code = buf = mono_code_manager_reserve_align (domain->code_mp, size, 1);
545 mono_domain_unlock (domain);
547 amd64_call_code (code, tramp);
548 /* The trampoline code will obtain the argument from the instruction stream */
549 if ((((guint64)arg1) >> 32) == 0) {
551 *(guint32*)(code + 1) = (gint64)arg1;
555 *(guint64*)(code + 1) = (gint64)arg1;
559 g_assert ((code - buf) <= size);
564 mono_arch_flush_icache (buf, size);
570 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
574 guint8 **rgctx_null_jumps;
580 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
581 index = MONO_RGCTX_SLOT_INDEX (slot);
583 index += sizeof (MonoMethodRuntimeGenericContext) / sizeof (gpointer);
584 for (depth = 0; ; ++depth) {
585 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
587 if (index < size - 1)
592 tramp_size = 36 + 8 * depth;
594 code = buf = mono_global_codeman_reserve (tramp_size);
596 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
600 amd64_mov_reg_reg (buf, AMD64_RAX, AMD64_ARG_REG1, 8);
602 /* load rgctx ptr from vtable */
603 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 8);
604 /* is the rgctx ptr null? */
605 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
606 /* if yes, jump to actual trampoline */
607 rgctx_null_jumps [0] = buf;
608 amd64_branch8 (buf, X86_CC_Z, -1, 1);
611 for (i = 0; i < depth; ++i) {
612 /* load ptr to next array */
613 if (mrgctx && i == 0)
614 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, sizeof (MonoMethodRuntimeGenericContext), 8);
616 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, 0, 8);
617 /* is the ptr null? */
618 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
619 /* if yes, jump to actual trampoline */
620 rgctx_null_jumps [i + 1] = buf;
621 amd64_branch8 (buf, X86_CC_Z, -1, 1);
625 amd64_mov_reg_membase (buf, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), 8);
626 /* is the slot null? */
627 amd64_test_reg_reg (buf, AMD64_RAX, AMD64_RAX);
628 /* if yes, jump to actual trampoline */
629 rgctx_null_jumps [depth + 1] = buf;
630 amd64_branch8 (buf, X86_CC_Z, -1, 1);
631 /* otherwise return */
634 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
635 x86_patch (rgctx_null_jumps [i], buf);
637 g_free (rgctx_null_jumps);
639 /* move the rgctx pointer to the VTABLE register */
640 amd64_mov_reg_reg (buf, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, 8);
642 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
644 /* jump to the actual trampoline */
645 amd64_jump_code (buf, tramp);
647 mono_arch_flush_icache (code, buf - code);
649 g_assert (buf - code <= tramp_size);
655 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
657 /* FIXME: This is not thread safe */
658 guint8 *code = ji->code_start;
660 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
661 amd64_mov_reg_imm (code, AMD64_R11, func);
663 x86_push_imm (code, (guint64)func_arg);
664 amd64_call_reg (code, AMD64_R11);