2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/marshal.h>
19 #include <mono/metadata/tabledefs.h>
20 #include <mono/metadata/mono-debug-debugger.h>
21 #include <mono/metadata/monitor.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/metadata/gc-internal.h>
24 #include <mono/arch/amd64/amd64-codegen.h>
26 #include <mono/utils/memcheck.h>
29 #include "mini-amd64.h"
31 #if defined(__native_client_codegen__) && defined(__native_client__)
33 #include <nacl/nacl_dyncode.h>
36 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
39 * mono_arch_get_unbox_trampoline:
41 * @addr: pointer to native code for @m
43 * when value type methods are called through the vtable we need to unbox the
44 * this argument. This method returns a pointer to a trampoline which does
45 * unboxing before calling the method
48 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
51 int this_reg, size = NACL_SIZE (20, 32);
53 MonoDomain *domain = mono_domain_get ();
55 this_reg = mono_arch_get_this_arg_reg (NULL);
57 start = code = mono_domain_code_reserve (domain, size);
59 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
60 /* FIXME: Optimize this */
61 amd64_mov_reg_imm (code, AMD64_RAX, addr);
62 amd64_jump_reg (code, AMD64_RAX);
63 g_assert ((code - start) < size);
65 nacl_domain_code_validate (domain, &start, size, &code);
67 mono_arch_flush_icache (start, code - start);
68 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
74 * mono_arch_get_static_rgctx_trampoline:
76 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
79 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
84 MonoDomain *domain = mono_domain_get ();
86 #ifdef MONO_ARCH_NOMAP32BIT
89 /* AOTed code could still have a non-32 bit address */
90 if ((((guint64)addr) >> 32) == 0)
91 buf_len = NACL_SIZE (16, 32);
93 buf_len = NACL_SIZE (30, 32);
96 start = code = mono_domain_code_reserve (domain, buf_len);
98 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
99 amd64_jump_code (code, addr);
100 g_assert ((code - start) < buf_len);
102 nacl_domain_code_validate (domain, &start, buf_len, &code);
103 mono_arch_flush_icache (start, code - start);
104 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
110 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
112 guint8 *code, *start;
118 start = code = mono_domain_code_reserve (domain, buf_len);
120 this_reg = mono_arch_get_this_arg_reg (NULL);
123 amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
124 /* Load vtable address */
125 amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
126 amd64_jump_membase (code, AMD64_RAX, vt_offset);
129 g_assert ((code - start) < buf_len);
131 nacl_domain_code_validate (domain, &start, buf_len, &code);
133 mono_arch_flush_icache (start, code - start);
134 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
140 * mono_arch_patch_callsite:
142 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
143 * points to the pc right after the call.
146 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
148 #if defined(__default_codegen__)
151 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
155 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
156 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
157 if (code [-5] != 0xe8) {
159 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
160 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
163 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
165 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
166 #ifdef MONO_ARCH_NOMAP32BIT
167 /* Print some diagnostics */
168 MonoJitInfo *ji = mono_jit_info_table_find (mono_domain_get (), (char*)orig_code);
170 fprintf (stderr, "At %s, offset 0x%zx\n", mono_method_full_name (jinfo_get_method (ji), TRUE), (guint8*)orig_code - (guint8*)ji->code_start);
171 fprintf (stderr, "Addr: %p\n", addr);
172 ji = mono_jit_info_table_find (mono_domain_get (), (char*)addr);
174 fprintf (stderr, "Callee: %s\n", mono_method_full_name (jinfo_get_method (ji), TRUE));
175 g_assert_not_reached ();
178 * This might happen when calling AOTed code. Create a thunk.
180 guint8 *thunk_start, *thunk_code;
182 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
183 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
184 *(guint64*)thunk_code = (guint64)addr;
186 g_assert ((((guint64)(addr)) >> 32) == 0);
187 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
188 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
192 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
193 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
197 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
198 /* call *<OFFSET>(%rip) */
199 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
201 InterlockedExchangePointer (got_entry, addr);
202 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
205 #elif defined(__native_client__)
206 /* These are essentially the same 2 cases as above, modified for NaCl*/
208 /* Target must be bundle-aligned */
209 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
210 /* Return target must be bundle-aligned */
211 g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
213 if (orig_code[-5] == 0xe8) {
216 gint32 offset = (gint32)addr - (gint32)orig_code;
217 guint8 buf[sizeof(gint32)];
218 *((gint32*)(buf)) = offset;
219 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
223 else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
225 guint8 buf[sizeof(gint32)];
226 *((gint32 *)(buf)) = addr;
227 /* orig_code[-14] is the start of the immediate. */
228 ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
232 g_assert_not_reached ();
240 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
243 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
244 * we add a thunk every time.
245 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
246 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
248 guint8 *thunk_start, *thunk_code;
250 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
251 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
252 *(guint64*)thunk_code = (guint64)addr;
254 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
255 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
260 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
263 gpointer *plt_jump_table_entry;
265 #if defined(__default_codegen__)
266 /* A PLT entry: jmp *<DISP>(%rip) */
267 g_assert (code [0] == 0xff);
268 g_assert (code [1] == 0x25);
270 disp = *(gint32*)(code + 2);
272 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
273 #elif defined(__native_client_codegen__)
275 /* mov <DISP>(%rip), %r11d */
278 /* Verify the 'mov' */
279 g_assert (code [0] == 0x45);
280 g_assert (code [1] == 0x8b);
281 g_assert (code [2] == 0x1d);
283 disp = *(gint32*)(code + 3);
285 /* 7 = 3 (mov opcode) + 4 (disp) */
286 /* This needs to resolve to the target of the RIP-relative offset */
287 plt_jump_table_entry = (gpointer*)(code + 7 + disp);
289 #endif /* __native_client_codegen__ */
292 InterlockedExchangePointer (plt_jump_table_entry, addr);
296 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
300 MonoJitInfo *ji = NULL;
303 /* code - 9 might be before the start of the method */
304 /* FIXME: Avoid this expensive call somehow */
305 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
308 mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
315 if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
316 /* call OFFSET(%rip) */
317 g_assert_not_reached ();
318 *displacement = *(guint32*)(code + 3);
319 return (gpointer*)(code + disp + 7);
321 g_assert_not_reached ();
327 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
331 vt = get_vcall_slot (code, regs, &displacement);
334 return (gpointer*)((char*)vt + displacement);
338 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
341 MonoJitInfo *ji = NULL;
343 gpointer tramp = mini_get_nullified_class_init_trampoline ();
346 /* code - 7 might be before the start of the method */
347 /* FIXME: Avoid this expensive call somehow */
348 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
351 can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
357 * A given byte sequence can match more than case here, so we have to be
358 * really careful about the ordering of the cases. Longer sequences
361 if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
362 gpointer *vtable_slot;
364 /* call *<OFFSET>(%rip) */
365 vtable_slot = get_vcall_slot_addr (code, regs);
366 g_assert (vtable_slot);
368 *vtable_slot = tramp;
369 } else if (buf [2] == 0xe8) {
371 //guint8 *buf = code - 2;
374 * It would be better to replace the call with nops, but that doesn't seem
375 * to work on SMP machines even when the whole call is inside a cache line.
376 * Patching the call address seems to work.
386 mono_arch_patch_callsite (code - 5, code, tramp);
387 } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
389 /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
390 mono_arch_patch_callsite (code - 13, code, tramp);
391 } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
392 /* Already changed by another thread */
395 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
396 buf [4], buf [5], buf [6]);
397 g_assert_not_reached ();
402 stack_unaligned (MonoTrampolineType tramp_type)
404 printf ("%d\n", tramp_type);
405 g_assert_not_reached ();
409 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
412 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
413 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
414 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
416 GSList *unwind_ops = NULL;
417 MonoJumpInfo *ji = NULL;
418 const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
420 #if defined(__native_client_codegen__)
421 const guint kNaClTrampOffset = 17;
424 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
429 code = buf = mono_global_codeman_reserve (kMaxCodeSize);
431 framesize = kMaxCodeSize + sizeof (MonoLMFTramp);
432 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
434 orig_rsp_to_rbp_offset = 0;
435 r11_save_code = code;
436 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
438 after_r11_save_code = code;
440 // CFA = sp + 16 (the trampoline address is on the stack)
442 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
443 // IP saved at CFA - 8
444 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
446 /* Pop the return address off the stack */
447 amd64_pop_reg (code, AMD64_R11);
448 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
450 cfa_offset -= sizeof(mgreg_t);
451 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
454 * Allocate a new stack frame
456 amd64_push_reg (code, AMD64_RBP);
457 cfa_offset += sizeof(mgreg_t);
458 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
459 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
461 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
462 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
463 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
464 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
467 rbp_offset = - offset;
469 offset += sizeof(mgreg_t);
470 rax_offset = - offset;
472 offset += sizeof(mgreg_t);
473 tramp_offset = - offset;
475 offset += sizeof(gpointer);
476 arg_offset = - offset;
478 /* Compute the trampoline address from the return address */
480 #if defined(__default_codegen__)
481 /* 7 = length of call *<offset>(rip) */
482 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
483 #elif defined(__native_client_codegen__)
484 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
487 /* 5 = length of amd64_call_membase () */
488 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
490 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
492 offset += sizeof(mgreg_t);
493 res_offset = - offset;
495 /* Save all registers */
497 offset += AMD64_NREG * sizeof(mgreg_t);
498 saved_regs_offset = - offset;
499 for (i = 0; i < AMD64_NREG; ++i) {
500 if (i == AMD64_RBP) {
501 /* RAX is already saved */
502 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
503 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
504 } else if (i != AMD64_R11) {
505 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
507 /* We have to save R11 right at the start of
508 the trampoline code because it's used as a
510 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
511 g_assert (r11_save_code == after_r11_save_code);
514 offset += 8 * sizeof(mgreg_t);
515 saved_fpregs_offset = - offset;
516 for (i = 0; i < 8; ++i)
517 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
519 /* Check that the stack is aligned */
520 #if defined(__default_codegen__)
521 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
522 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
523 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
525 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
527 amd64_mov_reg_imm (code, AMD64_R11, 0);
528 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
530 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
531 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
532 amd64_call_reg (code, AMD64_R11);
534 mono_amd64_patch (br [0], code);
535 //amd64_breakpoint (code);
538 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
539 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
540 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT &&
541 tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
542 /* Obtain the trampoline argument which is encoded in the instruction stream */
544 /* Load the GOT offset */
545 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
546 #if defined(__default_codegen__)
548 * r11 points to a call *<offset>(%rip) instruction, load the
549 * pc-relative offset from the instruction itself.
551 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
552 /* 7 is the length of the call, 8 is the offset to the next got slot */
553 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
554 #elif defined(__native_client_codegen__)
555 /* The arg is hidden in a "push imm32" instruction, */
556 /* add one to skip the opcode. */
557 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
559 /* Compute the address of the GOT slot */
560 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
562 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
564 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
565 #if defined(__default_codegen__)
566 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
567 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
568 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
570 x86_branch8 (code, X86_CC_NE, 6, FALSE);
571 /* 32 bit immediate */
572 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
574 x86_jump8 (code, 10);
575 /* 64 bit immediate */
576 mono_amd64_patch (br [0], code);
577 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
578 mono_amd64_patch (br [1], code);
579 #elif defined(__native_client_codegen__)
580 /* All args are 32-bit pointers in NaCl */
581 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
584 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
586 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
587 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
592 offset += sizeof (MonoLMFTramp);
593 lmf_offset = - offset;
597 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
599 amd64_mov_reg_imm (code, AMD64_R11, 0);
600 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
602 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, sizeof(mgreg_t));
603 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, sizeof(mgreg_t));
605 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
606 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
607 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
608 /* Save pointer to registers */
609 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset);
610 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, regs), AMD64_R11, sizeof(mgreg_t));
613 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
615 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
617 amd64_call_reg (code, AMD64_R11);
620 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof(gpointer));
621 /* Save previous_lmf */
622 /* Set the lowest bit to signal that this LMF has the ip field set */
623 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
624 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
625 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof(gpointer));
626 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
628 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
629 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
633 /* Arg1 is the pointer to the saved registers */
634 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
636 /* Arg2 is the address of the calling code */
638 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
640 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
642 /* Arg3 is the method/vtable ptr */
643 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
645 /* Arg4 is the trampoline address */
646 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
649 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
650 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
652 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
653 amd64_mov_reg_imm (code, AMD64_R11, tramp);
655 amd64_call_reg (code, AMD64_R11);
657 /* Check for thread interruption */
658 /* This is not perf critical code so no need to check the interrupt flag */
660 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
662 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
664 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
666 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
668 amd64_call_reg (code, AMD64_R11);
670 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
673 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
674 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof(gpointer));
675 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof(gpointer));
676 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
679 * Save rax to the stack, after the leave instruction, this will become part of
682 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
684 /* Restore argument registers, r10 (imt method/rgxtx)
685 and rax (needed for direct calls to C vararg functions). */
686 for (i = 0; i < AMD64_NREG; ++i)
687 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
688 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
690 for (i = 0; i < 8; ++i)
691 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
696 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
698 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
701 /* call the compiled method using the saved rax */
702 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
705 g_assert ((code - buf) <= kMaxCodeSize);
707 nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
709 mono_arch_flush_icache (buf, code - buf);
710 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
713 tramp_name = mono_get_generic_trampoline_name (tramp_type);
714 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
722 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
725 int size = NACL_SIZE (16, 32);
727 code = buf = mono_global_codeman_reserve (size);
730 nacl_global_codeman_validate(&buf, size, &code);
732 mono_arch_flush_icache (buf, code - buf);
733 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
736 *info = mono_tramp_info_create ("nullified_class_init_trampoline", buf, code - buf, NULL, NULL);
742 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
744 guint8 *code, *buf, *tramp;
746 gboolean far_addr = FALSE;
748 tramp = mono_get_trampoline_code (tramp_type);
750 #if defined(__default_codegen__)
751 if ((((guint64)arg1) >> 32) == 0)
756 code = buf = mono_domain_code_reserve_align (domain, size, 1);
758 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
759 #ifndef MONO_ARCH_NOMAP32BIT
760 g_assert_not_reached ();
764 code = buf = mono_domain_code_reserve_align (domain, size, 1);
766 #elif defined(__native_client_codegen__)
768 /* Aligning the call site below could */
769 /* add up to kNaClAlignment-1 bytes */
770 size += (kNaClAlignment-1);
771 size = NACL_BUNDLE_ALIGN_UP (size);
772 buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
777 amd64_mov_reg_imm (code, AMD64_R11, tramp);
778 amd64_call_reg (code, AMD64_R11);
780 amd64_call_code (code, tramp);
782 /* The trampoline code will obtain the argument from the instruction stream */
783 #if defined(__default_codegen__)
784 if ((((guint64)arg1) >> 32) == 0) {
786 *(guint32*)(code + 1) = (gint64)arg1;
790 *(guint64*)(code + 1) = (gint64)arg1;
793 #elif defined(__native_client_codegen__)
794 /* For NaCl, all tramp args are 32-bit because they're pointers */
795 *code = 0x68; /* push imm32 */
796 *(guint32*)(code + 1) = (gint32)arg1;
800 g_assert ((code - buf) <= size);
805 nacl_domain_code_validate(domain, &buf, size, &code);
807 mono_arch_flush_icache (buf, size);
808 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
814 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
818 guint8 **rgctx_null_jumps;
823 MonoJumpInfo *ji = NULL;
824 GSList *unwind_ops = NULL;
826 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
827 index = MONO_RGCTX_SLOT_INDEX (slot);
829 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
830 for (depth = 0; ; ++depth) {
831 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
833 if (index < size - 1)
838 tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
840 code = buf = mono_global_codeman_reserve (tramp_size);
842 unwind_ops = mono_arch_get_cie_program ();
844 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
848 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
850 /* load rgctx ptr from vtable */
851 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
852 /* is the rgctx ptr null? */
853 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
854 /* if yes, jump to actual trampoline */
855 rgctx_null_jumps [0] = code;
856 amd64_branch8 (code, X86_CC_Z, -1, 1);
859 for (i = 0; i < depth; ++i) {
860 /* load ptr to next array */
861 if (mrgctx && i == 0)
862 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
864 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
865 /* is the ptr null? */
866 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
867 /* if yes, jump to actual trampoline */
868 rgctx_null_jumps [i + 1] = code;
869 amd64_branch8 (code, X86_CC_Z, -1, 1);
873 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
874 /* is the slot null? */
875 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
876 /* if yes, jump to actual trampoline */
877 rgctx_null_jumps [depth + 1] = code;
878 amd64_branch8 (code, X86_CC_Z, -1, 1);
879 /* otherwise return */
882 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
883 mono_amd64_patch (rgctx_null_jumps [i], code);
885 g_free (rgctx_null_jumps);
887 /* move the rgctx pointer to the VTABLE register */
888 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
891 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
892 amd64_jump_reg (code, AMD64_R11);
894 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
896 /* jump to the actual trampoline */
897 amd64_jump_code (code, tramp);
900 nacl_global_codeman_validate (&buf, tramp_size, &code);
901 mono_arch_flush_icache (buf, code - buf);
902 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
904 g_assert (code - buf <= tramp_size);
907 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
908 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
916 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
920 static int byte_offset = -1;
921 static guint8 bitmask;
924 GSList *unwind_ops = NULL;
925 MonoJumpInfo *ji = NULL;
929 code = buf = mono_global_codeman_reserve (tramp_size);
932 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
934 amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
936 amd64_branch8 (code, X86_CC_Z, -1, 1);
940 x86_patch (jump, code);
943 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
944 amd64_jump_reg (code, AMD64_R11);
946 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
948 /* jump to the actual trampoline */
949 amd64_jump_code (code, tramp);
952 nacl_global_codeman_validate (&buf, tramp_size, &code);
954 mono_arch_flush_icache (buf, code - buf);
955 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
957 g_assert (code - buf <= tramp_size);
960 *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
966 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
970 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid, *jump_sync_thin_hash = NULL;
972 int owner_offset, nest_offset, dummy;
973 MonoJumpInfo *ji = NULL;
974 GSList *unwind_ops = NULL;
975 int obj_reg = MONO_AMD64_ARG_REG1;
976 int sync_reg = MONO_AMD64_ARG_REG2;
977 int tid_reg = MONO_AMD64_ARG_REG3;
979 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == obj_reg);
981 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
982 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
983 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
984 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
985 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
989 code = buf = mono_global_codeman_reserve (tramp_size);
991 unwind_ops = mono_arch_get_cie_program ();
993 if (!aot && mono_thread_get_tls_offset () != -1) {
994 /* MonoObject* obj is in obj_reg */
996 amd64_test_reg_reg (code, obj_reg, obj_reg);
997 /* if yes, jump to actual trampoline */
998 jump_obj_null = code;
999 amd64_branch8 (code, X86_CC_Z, -1, 1);
1001 /* load obj->synchronization to sync_reg */
1002 amd64_mov_reg_membase (code, sync_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation), 8);
1004 if (mono_gc_is_moving ()) {
1005 /*if bit zero is set it's a thin hash*/
1006 /*FIXME use testb encoding*/
1007 amd64_test_reg_imm (code, sync_reg, 0x01);
1008 jump_sync_thin_hash = code;
1009 amd64_branch8 (code, X86_CC_NE, -1, 1);
1011 /*clear bits used by the gc*/
1012 amd64_alu_reg_imm (code, X86_AND, sync_reg, ~0x3);
1015 /* is synchronization null? */
1016 amd64_test_reg_reg (code, sync_reg, sync_reg);
1017 /* if yes, jump to actual trampoline */
1018 jump_sync_null = code;
1019 amd64_branch8 (code, X86_CC_Z, -1, 1);
1021 /* load MonoInternalThread* into tid_reg */
1022 code = mono_amd64_emit_tls_get (code, tid_reg, mono_thread_get_tls_offset ());
1023 /* load TID into tid_reg */
1024 amd64_mov_reg_membase (code, tid_reg, tid_reg, MONO_STRUCT_OFFSET (MonoInternalThread, tid), 8);
1026 /* is synchronization->owner null? */
1027 amd64_alu_membase_imm_size (code, X86_CMP, sync_reg, owner_offset, 0, 8);
1028 /* if not, jump to next case */
1030 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1032 /* if yes, try a compare-exchange with the TID */
1033 g_assert (tid_reg != X86_EAX);
1035 amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
1036 /* compare and exchange */
1037 amd64_prefix (code, X86_LOCK_PREFIX);
1038 amd64_cmpxchg_membase_reg_size (code, sync_reg, owner_offset, tid_reg, 8);
1039 /* if not successful, jump to actual trampoline */
1040 jump_cmpxchg_failed = code;
1041 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1042 /* if successful, return */
1045 /* next case: synchronization->owner is not null */
1046 x86_patch (jump_tid, code);
1047 /* is synchronization->owner == TID? */
1048 amd64_alu_membase_reg_size (code, X86_CMP, sync_reg, owner_offset, tid_reg, 8);
1049 /* if not, jump to actual trampoline */
1050 jump_other_owner = code;
1051 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1052 /* if yes, increment nest */
1053 amd64_inc_membase_size (code, sync_reg, nest_offset, 4);
1057 x86_patch (jump_obj_null, code);
1058 if (jump_sync_thin_hash)
1059 x86_patch (jump_sync_thin_hash, code);
1060 x86_patch (jump_sync_null, code);
1061 x86_patch (jump_cmpxchg_failed, code);
1062 x86_patch (jump_other_owner, code);
1065 /* jump to the actual trampoline */
1066 if (MONO_AMD64_ARG_REG1 != obj_reg)
1067 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, obj_reg, sizeof (mgreg_t));
1070 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
1071 amd64_jump_reg (code, AMD64_R11);
1073 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
1075 /* jump to the actual trampoline */
1076 amd64_jump_code (code, tramp);
1079 nacl_global_codeman_validate (&buf, tramp_size, &code);
1081 mono_arch_flush_icache (code, code - buf);
1082 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
1083 g_assert (code - buf <= tramp_size);
1086 *info = mono_tramp_info_create ("monitor_enter_trampoline", buf, code - buf, ji, unwind_ops);
1092 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1096 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned, *jump_sync_thin_hash = NULL;
1099 int owner_offset, nest_offset, entry_count_offset;
1100 MonoJumpInfo *ji = NULL;
1101 GSList *unwind_ops = NULL;
1102 int obj_reg = MONO_AMD64_ARG_REG1;
1103 int sync_reg = MONO_AMD64_ARG_REG2;
1105 g_assert (obj_reg == MONO_ARCH_MONITOR_OBJECT_REG);
1107 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
1108 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
1109 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
1110 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
1111 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
1112 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
1113 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
1117 code = buf = mono_global_codeman_reserve (tramp_size);
1119 unwind_ops = mono_arch_get_cie_program ();
1121 if (!aot && mono_thread_get_tls_offset () != -1) {
1122 /* MonoObject* obj is in obj_reg */
1124 amd64_test_reg_reg (code, obj_reg, obj_reg);
1125 /* if yes, jump to actual trampoline */
1126 jump_obj_null = code;
1127 amd64_branch8 (code, X86_CC_Z, -1, 1);
1129 /* load obj->synchronization to RCX */
1130 amd64_mov_reg_membase (code, sync_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation), 8);
1132 if (mono_gc_is_moving ()) {
1133 /*if bit zero is set it's a thin hash*/
1134 /*FIXME use testb encoding*/
1135 amd64_test_reg_imm (code, sync_reg, 0x01);
1136 jump_sync_thin_hash = code;
1137 amd64_branch8 (code, X86_CC_NE, -1, 1);
1139 /*clear bits used by the gc*/
1140 amd64_alu_reg_imm (code, X86_AND, sync_reg, ~0x3);
1143 /* is synchronization null? */
1144 amd64_test_reg_reg (code, sync_reg, sync_reg);
1145 /* if yes, jump to actual trampoline */
1146 jump_sync_null = code;
1147 amd64_branch8 (code, X86_CC_Z, -1, 1);
1149 /* next case: synchronization is not null */
1150 /* load MonoInternalThread* into RAX */
1151 code = mono_amd64_emit_tls_get (code, AMD64_RAX, mono_thread_get_tls_offset ());
1152 /* load TID into RAX */
1153 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_STRUCT_OFFSET (MonoInternalThread, tid), 8);
1154 /* is synchronization->owner == TID */
1155 amd64_alu_membase_reg_size (code, X86_CMP, sync_reg, owner_offset, AMD64_RAX, 8);
1156 /* if no, jump to actual trampoline */
1157 jump_not_owned = code;
1158 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1160 /* next case: synchronization->owner == TID */
1161 /* is synchronization->nest == 1 */
1162 amd64_alu_membase_imm_size (code, X86_CMP, sync_reg, nest_offset, 1, 4);
1163 /* if not, jump to next case */
1165 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1166 /* if yes, is synchronization->entry_count zero? */
1167 amd64_alu_membase_imm_size (code, X86_CMP, sync_reg, entry_count_offset, 0, 4);
1168 /* if not, jump to actual trampoline */
1169 jump_have_waiters = code;
1170 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
1171 /* if yes, set synchronization->owner to null and return */
1172 amd64_mov_membase_imm (code, sync_reg, owner_offset, 0, 8);
1175 /* next case: synchronization->nest is not 1 */
1176 x86_patch (jump_next, code);
1177 /* decrease synchronization->nest and return */
1178 amd64_dec_membase_size (code, sync_reg, nest_offset, 4);
1181 x86_patch (jump_obj_null, code);
1182 x86_patch (jump_have_waiters, code);
1183 x86_patch (jump_not_owned, code);
1184 x86_patch (jump_sync_null, code);
1187 /* jump to the actual trampoline */
1188 if (MONO_AMD64_ARG_REG1 != obj_reg)
1189 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, obj_reg, sizeof (mgreg_t));
1192 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
1193 amd64_jump_reg (code, AMD64_R11);
1195 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
1196 amd64_jump_code (code, tramp);
1199 nacl_global_codeman_validate (&buf, tramp_size, &code);
1201 mono_arch_flush_icache (code, code - buf);
1202 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
1203 g_assert (code - buf <= tramp_size);
1206 *info = mono_tramp_info_create ("monitor_exit_trampoline", buf, code - buf, ji, unwind_ops);
1212 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1214 /* FIXME: This is not thread safe */
1215 guint8 *code = ji->code_start;
1217 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
1218 amd64_mov_reg_imm (code, AMD64_R11, func);
1220 x86_push_imm (code, (guint64)func_arg);
1221 amd64_call_reg (code, AMD64_R11);
1226 handler_block_trampoline_helper (gpointer *ptr)
1228 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1229 *ptr = jit_tls->handler_block_return_address;
1233 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
1235 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
1237 int tramp_size = 64;
1238 MonoJumpInfo *ji = NULL;
1239 GSList *unwind_ops = NULL;
1243 code = buf = mono_global_codeman_reserve (tramp_size);
1246 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1248 if (mono_get_jit_tls_offset () != -1) {
1249 code = mono_amd64_emit_tls_get (code, MONO_AMD64_ARG_REG1, mono_get_jit_tls_offset ());
1250 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, MONO_AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
1251 /* Simulate a call */
1252 amd64_push_reg (code, AMD64_RAX);
1253 amd64_jump_code (code, tramp);
1255 /*Slow path uses a c helper*/
1256 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RSP, 8);
1257 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
1258 amd64_push_reg (code, AMD64_RAX);
1259 amd64_push_reg (code, AMD64_RAX);
1260 amd64_jump_code (code, handler_block_trampoline_helper);
1263 mono_arch_flush_icache (buf, code - buf);
1264 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
1265 g_assert (code - buf <= tramp_size);
1268 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
1274 * mono_arch_get_call_target:
1276 * Return the address called by the code before CODE if exists.
1279 mono_arch_get_call_target (guint8 *code)
1281 if (code [-5] == 0xe8) {
1282 gint32 disp = *(gint32*)(code - 4);
1283 guint8 *target = code + disp;
1292 * mono_arch_get_plt_info_offset:
1294 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
1297 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
1299 #if defined(__native_client__) || defined(__native_client_codegen__)
1300 /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
1301 /* See aot-compiler.c arch_emit_plt_entry for details. */
1302 return *(guint32*)(plt_entry + 18);
1304 return *(guint32*)(plt_entry + 6);