2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
16 #include <mono/metadata/abi-details.h>
17 #include <mono/metadata/appdomain.h>
18 #include <mono/metadata/marshal.h>
19 #include <mono/metadata/tabledefs.h>
20 #include <mono/metadata/mono-debug-debugger.h>
21 #include <mono/metadata/monitor.h>
22 #include <mono/metadata/profiler-private.h>
23 #include <mono/metadata/gc-internal.h>
24 #include <mono/arch/amd64/amd64-codegen.h>
26 #include <mono/utils/memcheck.h>
29 #include "mini-amd64.h"
30 #include "debugger-agent.h"
32 #if defined(__native_client_codegen__) && defined(__native_client__)
34 #include <nacl/nacl_dyncode.h>
37 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
39 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
42 * mono_arch_get_unbox_trampoline:
44 * @addr: pointer to native code for @m
46 * when value type methods are called through the vtable we need to unbox the
47 * this argument. This method returns a pointer to a trampoline which does
48 * unboxing before calling the method
51 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
54 int this_reg, size = NACL_SIZE (20, 32);
56 MonoDomain *domain = mono_domain_get ();
58 this_reg = mono_arch_get_this_arg_reg (NULL);
60 start = code = mono_domain_code_reserve (domain, size);
62 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
63 /* FIXME: Optimize this */
64 amd64_mov_reg_imm (code, AMD64_RAX, addr);
65 amd64_jump_reg (code, AMD64_RAX);
66 g_assert ((code - start) < size);
68 nacl_domain_code_validate (domain, &start, size, &code);
70 mono_arch_flush_icache (start, code - start);
71 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
77 * mono_arch_get_static_rgctx_trampoline:
79 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
82 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
87 MonoDomain *domain = mono_domain_get ();
89 #ifdef MONO_ARCH_NOMAP32BIT
92 /* AOTed code could still have a non-32 bit address */
93 if ((((guint64)addr) >> 32) == 0)
94 buf_len = NACL_SIZE (16, 32);
96 buf_len = NACL_SIZE (30, 32);
99 start = code = mono_domain_code_reserve (domain, buf_len);
101 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
102 amd64_jump_code (code, addr);
103 g_assert ((code - start) < buf_len);
105 nacl_domain_code_validate (domain, &start, buf_len, &code);
106 mono_arch_flush_icache (start, code - start);
107 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
113 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
115 guint8 *code, *start;
121 start = code = mono_domain_code_reserve (domain, buf_len);
123 this_reg = mono_arch_get_this_arg_reg (NULL);
126 amd64_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
127 /* Load vtable address */
128 amd64_mov_reg_membase (code, AMD64_RAX, this_reg, 0, 8);
129 amd64_jump_membase (code, AMD64_RAX, vt_offset);
132 g_assert ((code - start) < buf_len);
134 nacl_domain_code_validate (domain, &start, buf_len, &code);
136 mono_arch_flush_icache (start, code - start);
137 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_IMT_TRAMPOLINE, NULL);
143 * mono_arch_patch_callsite:
145 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
146 * points to the pc right after the call.
149 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
151 #if defined(__default_codegen__)
154 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
158 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
159 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
160 if (code [-5] != 0xe8) {
162 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
163 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
166 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
168 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
170 * This might happen with LLVM or when calling AOTed code. Create a thunk.
172 guint8 *thunk_start, *thunk_code;
174 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
175 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
176 *(guint64*)thunk_code = (guint64)addr;
178 g_assert ((((guint64)(addr)) >> 32) == 0);
179 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
180 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
183 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
184 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
188 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
189 /* call *<OFFSET>(%rip) */
190 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
192 InterlockedExchangePointer (got_entry, addr);
193 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
196 #elif defined(__native_client__)
197 /* These are essentially the same 2 cases as above, modified for NaCl*/
199 /* Target must be bundle-aligned */
200 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
201 /* Return target must be bundle-aligned */
202 g_assert (((guint32)orig_code & kNaClAlignmentMask) == 0);
204 if (orig_code[-5] == 0xe8) {
207 gint32 offset = (gint32)addr - (gint32)orig_code;
208 guint8 buf[sizeof(gint32)];
209 *((gint32*)(buf)) = offset;
210 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
214 else if (is_nacl_call_reg_sequence (orig_code - 10) && orig_code[-16] == 0x41 && orig_code[-15] == 0xbb) {
216 guint8 buf[sizeof(gint32)];
217 *((gint32 *)(buf)) = addr;
218 /* orig_code[-14] is the start of the immediate. */
219 ret = nacl_dyncode_modify (orig_code - 14, buf, sizeof(gint32));
223 g_assert_not_reached ();
231 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
234 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
235 * we add a thunk every time.
236 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
237 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
239 guint8 *thunk_start, *thunk_code;
241 thunk_start = thunk_code = mono_domain_code_reserve (mono_domain_get (), 32);
242 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
243 *(guint64*)thunk_code = (guint64)addr;
245 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
246 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
251 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
254 gpointer *plt_jump_table_entry;
256 #if defined(__default_codegen__)
257 /* A PLT entry: jmp *<DISP>(%rip) */
258 g_assert (code [0] == 0xff);
259 g_assert (code [1] == 0x25);
261 disp = *(gint32*)(code + 2);
263 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
264 #elif defined(__native_client_codegen__)
266 /* mov <DISP>(%rip), %r11d */
269 /* Verify the 'mov' */
270 g_assert (code [0] == 0x45);
271 g_assert (code [1] == 0x8b);
272 g_assert (code [2] == 0x1d);
274 disp = *(gint32*)(code + 3);
276 /* 7 = 3 (mov opcode) + 4 (disp) */
277 /* This needs to resolve to the target of the RIP-relative offset */
278 plt_jump_table_entry = (gpointer*)(code + 7 + disp);
280 #endif /* __native_client_codegen__ */
283 InterlockedExchangePointer (plt_jump_table_entry, addr);
287 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
291 MonoJitInfo *ji = NULL;
294 /* code - 9 might be before the start of the method */
295 /* FIXME: Avoid this expensive call somehow */
296 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
299 mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
306 if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
307 /* call OFFSET(%rip) */
308 g_assert_not_reached ();
309 *displacement = *(guint32*)(code + 3);
310 return (gpointer*)(code + disp + 7);
312 g_assert_not_reached ();
318 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
322 vt = get_vcall_slot (code, regs, &displacement);
325 return (gpointer*)((char*)vt + displacement);
329 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
332 MonoJitInfo *ji = NULL;
334 gpointer tramp = mini_get_nullified_class_init_trampoline ();
337 /* code - 7 might be before the start of the method */
338 /* FIXME: Avoid this expensive call somehow */
339 ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
342 can_write = mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 7, buf, sizeof (buf));
348 * A given byte sequence can match more than case here, so we have to be
349 * really careful about the ordering of the cases. Longer sequences
352 if ((buf [0] == 0x41) && (buf [1] == 0xff) && (buf [2] == 0x15)) {
353 gpointer *vtable_slot;
355 /* call *<OFFSET>(%rip) */
356 vtable_slot = get_vcall_slot_addr (code, regs);
357 g_assert (vtable_slot);
359 *vtable_slot = tramp;
360 } else if (buf [2] == 0xe8) {
362 //guint8 *buf = code - 2;
365 * It would be better to replace the call with nops, but that doesn't seem
366 * to work on SMP machines even when the whole call is inside a cache line.
367 * Patching the call address seems to work.
377 mono_arch_patch_callsite (code - 5, code, tramp);
378 } else if ((buf [5] == 0xff) && x86_modrm_mod (buf [6]) == 3 && x86_modrm_reg (buf [6]) == 2) {
380 /* Generated by the LLVM JIT or on platforms without MAP_32BIT set */
381 mono_arch_patch_callsite (code - 13, code, tramp);
382 } else if (buf [4] == 0x90 || buf [5] == 0xeb || buf [6] == 0x66) {
383 /* Already changed by another thread */
386 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", buf [0], buf [1], buf [2], buf [3],
387 buf [4], buf [5], buf [6]);
388 g_assert_not_reached ();
393 stack_unaligned (MonoTrampolineType tramp_type)
395 printf ("%d\n", tramp_type);
396 g_assert_not_reached ();
400 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
403 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code;
404 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, tramp_offset, saved_regs_offset;
405 int saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
407 GSList *unwind_ops = NULL;
408 MonoJumpInfo *ji = NULL;
409 const guint kMaxCodeSize = NACL_SIZE (600, 600*2);
411 #if defined(__native_client_codegen__)
412 const guint kNaClTrampOffset = 17;
415 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
420 code = buf = mono_global_codeman_reserve (kMaxCodeSize);
422 framesize = kMaxCodeSize + sizeof (MonoLMFTramp);
423 framesize = (framesize + (MONO_ARCH_FRAME_ALIGNMENT - 1)) & ~ (MONO_ARCH_FRAME_ALIGNMENT - 1);
425 orig_rsp_to_rbp_offset = 0;
426 r11_save_code = code;
427 /* Reserve 5 bytes for the mov_membase_reg to save R11 */
429 after_r11_save_code = code;
431 // CFA = sp + 16 (the trampoline address is on the stack)
433 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
434 // IP saved at CFA - 8
435 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
437 /* Pop the return address off the stack */
438 amd64_pop_reg (code, AMD64_R11);
439 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
441 cfa_offset -= sizeof(mgreg_t);
442 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
445 * Allocate a new stack frame
447 amd64_push_reg (code, AMD64_RBP);
448 cfa_offset += sizeof(mgreg_t);
449 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
450 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
452 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
453 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
454 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
455 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
458 rbp_offset = - offset;
460 offset += sizeof(mgreg_t);
461 rax_offset = - offset;
463 offset += sizeof(mgreg_t);
464 tramp_offset = - offset;
466 offset += sizeof(gpointer);
467 arg_offset = - offset;
469 /* Compute the trampoline address from the return address */
471 #if defined(__default_codegen__)
472 /* 7 = length of call *<offset>(rip) */
473 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
474 #elif defined(__native_client_codegen__)
475 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, kNaClTrampOffset);
478 /* 5 = length of amd64_call_membase () */
479 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
481 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
483 offset += sizeof(mgreg_t);
484 res_offset = - offset;
486 /* Save all registers */
488 offset += AMD64_NREG * sizeof(mgreg_t);
489 saved_regs_offset = - offset;
490 for (i = 0; i < AMD64_NREG; ++i) {
491 if (i == AMD64_RBP) {
492 /* RAX is already saved */
493 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
494 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
495 } else if (i != AMD64_R11) {
496 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
498 /* We have to save R11 right at the start of
499 the trampoline code because it's used as a
501 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, saved_regs_offset + orig_rsp_to_rbp_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
502 g_assert (r11_save_code == after_r11_save_code);
505 offset += 8 * sizeof(mgreg_t);
506 saved_fpregs_offset = - offset;
507 for (i = 0; i < 8; ++i)
508 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
510 /* Check that the stack is aligned */
511 #if defined(__default_codegen__)
512 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
513 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
514 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
516 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
518 amd64_mov_reg_imm (code, AMD64_R11, 0);
519 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
521 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
522 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
523 amd64_call_reg (code, AMD64_R11);
525 mono_amd64_patch (br [0], code);
526 //amd64_breakpoint (code);
529 if (tramp_type != MONO_TRAMPOLINE_GENERIC_CLASS_INIT &&
530 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER &&
531 tramp_type != MONO_TRAMPOLINE_MONITOR_ENTER_V4 &&
532 tramp_type != MONO_TRAMPOLINE_MONITOR_EXIT &&
533 tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
534 /* Obtain the trampoline argument which is encoded in the instruction stream */
536 /* Load the GOT offset */
537 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
538 #if defined(__default_codegen__)
540 * r11 points to a call *<offset>(%rip) instruction, load the
541 * pc-relative offset from the instruction itself.
543 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
544 /* 7 is the length of the call, 8 is the offset to the next got slot */
545 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
546 #elif defined(__native_client_codegen__)
547 /* The arg is hidden in a "push imm32" instruction, */
548 /* add one to skip the opcode. */
549 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, kNaClTrampOffset+1, 4);
551 /* Compute the address of the GOT slot */
552 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
554 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
556 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
557 #if defined(__default_codegen__)
558 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
559 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
560 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
562 x86_branch8 (code, X86_CC_NE, 6, FALSE);
563 /* 32 bit immediate */
564 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
566 x86_jump8 (code, 10);
567 /* 64 bit immediate */
568 mono_amd64_patch (br [0], code);
569 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
570 mono_amd64_patch (br [1], code);
571 #elif defined(__native_client_codegen__)
572 /* All args are 32-bit pointers in NaCl */
573 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
576 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
578 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
579 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
584 offset += sizeof (MonoLMFTramp);
585 lmf_offset = - offset;
589 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
591 amd64_mov_reg_imm (code, AMD64_R11, 0);
592 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
594 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, framesize, sizeof(mgreg_t));
595 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rbp), AMD64_R11, sizeof(mgreg_t));
597 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
598 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
599 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
600 /* Save pointer to registers */
601 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset);
602 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, regs), AMD64_R11, sizeof(mgreg_t));
605 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
607 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
609 amd64_call_reg (code, AMD64_R11);
612 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof(gpointer));
613 /* Save previous_lmf */
614 /* Set the lowest bit to signal that this LMF has the ip field set */
615 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
616 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
617 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof(gpointer));
618 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
620 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
621 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
625 /* Arg1 is the pointer to the saved registers */
626 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
628 /* Arg2 is the address of the calling code */
630 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
632 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
634 /* Arg3 is the method/vtable ptr */
635 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
637 /* Arg4 is the trampoline address */
638 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
641 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
642 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
644 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
645 amd64_mov_reg_imm (code, AMD64_R11, tramp);
647 amd64_call_reg (code, AMD64_R11);
649 /* Check for thread interruption */
650 /* This is not perf critical code so no need to check the interrupt flag */
652 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
654 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
656 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
658 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint);
660 amd64_call_reg (code, AMD64_R11);
662 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
665 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
666 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof(gpointer));
667 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof(gpointer));
668 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
671 * Save rax to the stack, after the leave instruction, this will become part of
674 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
676 /* Restore argument registers, r10 (imt method/rgxtx)
677 and rax (needed for direct calls to C vararg functions). */
678 for (i = 0; i < AMD64_NREG; ++i)
679 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
680 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
682 for (i = 0; i < 8; ++i)
683 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
688 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
690 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
693 /* call the compiled method using the saved rax */
694 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
697 g_assert ((code - buf) <= kMaxCodeSize);
699 nacl_global_codeman_validate (&buf, kMaxCodeSize, &code);
701 mono_arch_flush_icache (buf, code - buf);
702 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
704 tramp_name = mono_get_generic_trampoline_name (tramp_type);
705 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
712 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
715 int size = NACL_SIZE (16, 32);
717 code = buf = mono_global_codeman_reserve (size);
720 nacl_global_codeman_validate(&buf, size, &code);
722 mono_arch_flush_icache (buf, code - buf);
723 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
725 *info = mono_tramp_info_create ("nullified_class_init_trampoline", buf, code - buf, NULL, NULL);
731 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
733 guint8 *code, *buf, *tramp;
735 gboolean far_addr = FALSE;
737 tramp = mono_get_trampoline_code (tramp_type);
739 #if defined(__default_codegen__)
740 if ((((guint64)arg1) >> 32) == 0)
745 code = buf = mono_domain_code_reserve_align (domain, size, 1);
747 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
748 #ifndef MONO_ARCH_NOMAP32BIT
749 g_assert_not_reached ();
753 code = buf = mono_domain_code_reserve_align (domain, size, 1);
755 #elif defined(__native_client_codegen__)
757 /* Aligning the call site below could */
758 /* add up to kNaClAlignment-1 bytes */
759 size += (kNaClAlignment-1);
760 size = NACL_BUNDLE_ALIGN_UP (size);
761 buf = mono_domain_code_reserve_align (domain, size, kNaClAlignment);
766 amd64_mov_reg_imm (code, AMD64_R11, tramp);
767 amd64_call_reg (code, AMD64_R11);
769 amd64_call_code (code, tramp);
771 /* The trampoline code will obtain the argument from the instruction stream */
772 #if defined(__default_codegen__)
773 if ((((guint64)arg1) >> 32) == 0) {
775 *(guint32*)(code + 1) = (gint64)arg1;
779 *(guint64*)(code + 1) = (gint64)arg1;
782 #elif defined(__native_client_codegen__)
783 /* For NaCl, all tramp args are 32-bit because they're pointers */
784 *code = 0x68; /* push imm32 */
785 *(guint32*)(code + 1) = (gint32)arg1;
789 g_assert ((code - buf) <= size);
794 nacl_domain_code_validate(domain, &buf, size, &code);
796 mono_arch_flush_icache (buf, size);
797 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
803 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
807 guint8 **rgctx_null_jumps;
812 MonoJumpInfo *ji = NULL;
813 GSList *unwind_ops = NULL;
815 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
816 index = MONO_RGCTX_SLOT_INDEX (slot);
818 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
819 for (depth = 0; ; ++depth) {
820 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
822 if (index < size - 1)
827 tramp_size = NACL_SIZE (64 + 8 * depth, 128 + 8 * depth);
829 code = buf = mono_global_codeman_reserve (tramp_size);
831 unwind_ops = mono_arch_get_cie_program ();
833 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
837 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
839 /* load rgctx ptr from vtable */
840 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
841 /* is the rgctx ptr null? */
842 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
843 /* if yes, jump to actual trampoline */
844 rgctx_null_jumps [0] = code;
845 amd64_branch8 (code, X86_CC_Z, -1, 1);
848 for (i = 0; i < depth; ++i) {
849 /* load ptr to next array */
850 if (mrgctx && i == 0)
851 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
853 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
854 /* is the ptr null? */
855 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
856 /* if yes, jump to actual trampoline */
857 rgctx_null_jumps [i + 1] = code;
858 amd64_branch8 (code, X86_CC_Z, -1, 1);
862 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
863 /* is the slot null? */
864 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
865 /* if yes, jump to actual trampoline */
866 rgctx_null_jumps [depth + 1] = code;
867 amd64_branch8 (code, X86_CC_Z, -1, 1);
868 /* otherwise return */
871 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
872 mono_amd64_patch (rgctx_null_jumps [i], code);
874 g_free (rgctx_null_jumps);
876 /* move the rgctx pointer to the VTABLE register */
877 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
880 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
881 amd64_jump_reg (code, AMD64_R11);
883 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
885 /* jump to the actual trampoline */
886 amd64_jump_code (code, tramp);
889 nacl_global_codeman_validate (&buf, tramp_size, &code);
890 mono_arch_flush_icache (buf, code - buf);
891 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
893 g_assert (code - buf <= tramp_size);
895 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
896 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
903 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
907 GSList *unwind_ops = NULL;
908 MonoJumpInfo *ji = NULL;
912 code = buf = mono_global_codeman_reserve (tramp_size);
914 /* Not used on amd64 */
915 amd64_breakpoint (code);
917 nacl_global_codeman_validate (&buf, tramp_size, &code);
919 mono_arch_flush_icache (buf, code - buf);
920 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
922 g_assert (code - buf <= tramp_size);
924 *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
929 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
932 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot)
936 guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid, *jump_sync_thin_hash = NULL;
937 guint8 *jump_lock_taken_true = NULL;
939 int status_offset, nest_offset;
940 MonoJumpInfo *ji = NULL;
941 GSList *unwind_ops = NULL;
942 int obj_reg = MONO_AMD64_ARG_REG1;
943 int lock_taken_reg = MONO_AMD64_ARG_REG2;
944 int sync_reg = MONO_AMD64_ARG_REG3;
945 int tid_reg = MONO_AMD64_ARG_REG4;
946 int status_reg = AMD64_RAX;
948 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == obj_reg);
949 #ifdef MONO_ARCH_MONITOR_LOCK_TAKEN_REG
950 g_assert (MONO_ARCH_MONITOR_LOCK_TAKEN_REG == lock_taken_reg);
955 mono_monitor_threads_sync_members_offset (&status_offset, &nest_offset);
956 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (status_offset) == sizeof (guint32));
957 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
958 status_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (status_offset);
959 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
963 code = buf = mono_global_codeman_reserve (tramp_size);
965 unwind_ops = mono_arch_get_cie_program ();
967 if (!aot && mono_thread_get_tls_offset () != -1) {
968 /* MonoObject* obj is in obj_reg */
970 amd64_test_reg_reg (code, obj_reg, obj_reg);
971 /* if yes, jump to actual trampoline */
972 jump_obj_null = code;
973 amd64_branch8 (code, X86_CC_Z, -1, 1);
976 amd64_test_membase_imm (code, lock_taken_reg, 0, 1);
977 /* if *lock_taken is 1, jump to actual trampoline */
978 jump_lock_taken_true = code;
979 x86_branch8 (code, X86_CC_NZ, -1, 1);
982 /* load obj->synchronization to sync_reg */
983 amd64_mov_reg_membase (code, sync_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation), 8);
985 if (mono_gc_is_moving ()) {
986 /*if bit zero is set it's a thin hash*/
987 /*FIXME use testb encoding*/
988 amd64_test_reg_imm (code, sync_reg, 0x01);
989 jump_sync_thin_hash = code;
990 amd64_branch8 (code, X86_CC_NE, -1, 1);
992 /*clear bits used by the gc*/
993 amd64_alu_reg_imm (code, X86_AND, sync_reg, ~0x3);
996 /* is synchronization null? */
997 amd64_test_reg_reg (code, sync_reg, sync_reg);
998 /* if yes, jump to actual trampoline */
999 jump_sync_null = code;
1000 amd64_branch8 (code, X86_CC_Z, -1, 1);
1002 /* load MonoInternalThread* into tid_reg */
1003 code = mono_amd64_emit_tls_get (code, tid_reg, mono_thread_get_tls_offset ());
1004 /* load TID into tid_reg */
1005 amd64_mov_reg_membase (code, tid_reg, tid_reg, MONO_STRUCT_OFFSET (MonoInternalThread, small_id), 4);
1007 /* is synchronization->owner free */
1008 amd64_mov_reg_membase (code, status_reg, sync_reg, status_offset, 4);
1009 amd64_test_reg_imm_size (code, status_reg, OWNER_MASK, 4);
1010 /* if not, jump to next case */
1012 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1014 /* if yes, try a compare-exchange with the TID */
1015 g_assert (tid_reg != X86_EAX);
1016 /* Form new status in tid_reg */
1017 amd64_alu_reg_reg_size (code, X86_OR, tid_reg, status_reg, 4);
1018 /* compare and exchange */
1019 amd64_prefix (code, X86_LOCK_PREFIX);
1020 amd64_cmpxchg_membase_reg_size (code, sync_reg, status_offset, tid_reg, 4);
1021 /* if not successful, jump to actual trampoline */
1022 jump_cmpxchg_failed = code;
1023 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1024 /* if successful, return */
1026 amd64_mov_membase_imm (code, lock_taken_reg, 0, 1, 1);
1029 /* next case: synchronization->owner is not null */
1030 x86_patch (jump_tid, code);
1031 /* is synchronization->owner == TID? */
1032 amd64_alu_reg_imm_size (code, X86_AND, status_reg, OWNER_MASK, 4);
1033 amd64_alu_reg_reg_size (code, X86_CMP, status_reg, tid_reg, 4);
1034 /* if not, jump to actual trampoline */
1035 jump_other_owner = code;
1036 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1037 /* if yes, increment nest */
1038 amd64_inc_membase_size (code, sync_reg, nest_offset, 4);
1041 amd64_mov_membase_imm (code, lock_taken_reg, 0, 1, 1);
1044 x86_patch (jump_obj_null, code);
1045 if (jump_sync_thin_hash)
1046 x86_patch (jump_sync_thin_hash, code);
1047 x86_patch (jump_sync_null, code);
1048 x86_patch (jump_cmpxchg_failed, code);
1049 x86_patch (jump_other_owner, code);
1051 x86_patch (jump_lock_taken_true, code);
1054 /* jump to the actual trampoline */
1055 if (MONO_AMD64_ARG_REG1 != obj_reg)
1056 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, obj_reg, sizeof (mgreg_t));
1060 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter_v4");
1062 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_enter");
1063 amd64_jump_reg (code, AMD64_R11);
1066 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER_V4, mono_get_root_domain (), NULL);
1068 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_ENTER, mono_get_root_domain (), NULL);
1070 /* jump to the actual trampoline */
1071 amd64_jump_code (code, tramp);
1074 nacl_global_codeman_validate (&buf, tramp_size, &code);
1076 mono_arch_flush_icache (code, code - buf);
1077 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
1078 g_assert (code - buf <= tramp_size);
1081 *info = mono_tramp_info_create ("monitor_enter_v4_trampoline", buf, code - buf, ji, unwind_ops);
1083 *info = mono_tramp_info_create ("monitor_enter_trampoline", buf, code - buf, ji, unwind_ops);
1089 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1093 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned, *jump_cmpxchg_failed;
1094 guint8 *jump_next, *jump_sync_thin_hash = NULL;
1096 int status_offset, nest_offset;
1097 MonoJumpInfo *ji = NULL;
1098 GSList *unwind_ops = NULL;
1099 int obj_reg = MONO_AMD64_ARG_REG1;
1100 int sync_reg = MONO_AMD64_ARG_REG2;
1101 int status_reg = MONO_AMD64_ARG_REG3;
1103 g_assert (obj_reg == MONO_ARCH_MONITOR_OBJECT_REG);
1105 mono_monitor_threads_sync_members_offset (&status_offset, &nest_offset);
1106 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (status_offset) == sizeof (guint32));
1107 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
1108 status_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (status_offset);
1109 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
1113 code = buf = mono_global_codeman_reserve (tramp_size);
1115 unwind_ops = mono_arch_get_cie_program ();
1117 if (!aot && mono_thread_get_tls_offset () != -1) {
1118 /* MonoObject* obj is in obj_reg */
1120 amd64_test_reg_reg (code, obj_reg, obj_reg);
1121 /* if yes, jump to actual trampoline */
1122 jump_obj_null = code;
1123 amd64_branch8 (code, X86_CC_Z, -1, 1);
1125 /* load obj->synchronization to RCX */
1126 amd64_mov_reg_membase (code, sync_reg, obj_reg, MONO_STRUCT_OFFSET (MonoObject, synchronisation), 8);
1128 if (mono_gc_is_moving ()) {
1129 /*if bit zero is set it's a thin hash*/
1130 /*FIXME use testb encoding*/
1131 amd64_test_reg_imm (code, sync_reg, 0x01);
1132 jump_sync_thin_hash = code;
1133 amd64_branch8 (code, X86_CC_NE, -1, 1);
1135 /*clear bits used by the gc*/
1136 amd64_alu_reg_imm (code, X86_AND, sync_reg, ~0x3);
1139 /* is synchronization null? */
1140 amd64_test_reg_reg (code, sync_reg, sync_reg);
1141 /* if yes, jump to actual trampoline */
1142 jump_sync_null = code;
1143 amd64_branch8 (code, X86_CC_Z, -1, 1);
1145 /* next case: synchronization is not null */
1146 /* load MonoInternalThread* into RAX */
1147 code = mono_amd64_emit_tls_get (code, AMD64_RAX, mono_thread_get_tls_offset ());
1148 /* load TID into RAX */
1149 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_STRUCT_OFFSET (MonoInternalThread, small_id), 4);
1150 /* is synchronization->owner == TID */
1151 amd64_mov_reg_membase (code, status_reg, sync_reg, status_offset, 4);
1152 amd64_alu_reg_reg_size (code, X86_XOR, AMD64_RAX, status_reg, 4);
1153 amd64_test_reg_imm_size (code, AMD64_RAX, OWNER_MASK, 4);
1155 /* if no, jump to actual trampoline */
1156 jump_not_owned = code;
1157 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1159 /* next case: synchronization->owner == TID */
1160 /* is synchronization->nest == 1 */
1161 amd64_alu_membase_imm_size (code, X86_CMP, sync_reg, nest_offset, 1, 4);
1162 /* if not, jump to next case */
1164 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1165 /* if yes, is synchronization->entry_count greater than zero */
1166 amd64_test_reg_imm_size (code, status_reg, ENTRY_COUNT_WAITERS, 4);
1167 /* if not, jump to actual trampoline */
1168 jump_have_waiters = code;
1169 amd64_branch8 (code, X86_CC_NZ, -1 , 1);
1170 /* if yes, try to set synchronization->owner to null and return */
1171 g_assert (status_reg != AMD64_RAX);
1172 /* old status in RAX */
1173 amd64_mov_reg_reg (code, AMD64_RAX, status_reg, 4);
1174 /* form new status */
1175 amd64_alu_reg_imm_size (code, X86_AND, status_reg, ENTRY_COUNT_MASK, 4);
1176 /* compare and exchange */
1177 amd64_prefix (code, X86_LOCK_PREFIX);
1178 amd64_cmpxchg_membase_reg_size (code, sync_reg, status_offset, status_reg, 4);
1179 /* if not successful, jump to actual trampoline */
1180 jump_cmpxchg_failed = code;
1181 amd64_branch8 (code, X86_CC_NZ, -1, 1);
1184 /* next case: synchronization->nest is not 1 */
1185 x86_patch (jump_next, code);
1186 /* decrease synchronization->nest and return */
1187 amd64_dec_membase_size (code, sync_reg, nest_offset, 4);
1190 if (jump_sync_thin_hash)
1191 x86_patch (jump_sync_thin_hash, code);
1192 x86_patch (jump_obj_null, code);
1193 x86_patch (jump_have_waiters, code);
1194 x86_patch (jump_not_owned, code);
1195 x86_patch (jump_cmpxchg_failed, code);
1196 x86_patch (jump_sync_null, code);
1199 /* jump to the actual trampoline */
1200 if (MONO_AMD64_ARG_REG1 != obj_reg)
1201 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, obj_reg, sizeof (mgreg_t));
1204 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_monitor_exit");
1205 amd64_jump_reg (code, AMD64_R11);
1207 tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_MONITOR_EXIT, mono_get_root_domain (), NULL);
1208 amd64_jump_code (code, tramp);
1211 nacl_global_codeman_validate (&buf, tramp_size, &code);
1213 mono_arch_flush_icache (code, code - buf);
1214 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_MONITOR, NULL);
1215 g_assert (code - buf <= tramp_size);
1217 *info = mono_tramp_info_create ("monitor_exit_trampoline", buf, code - buf, ji, unwind_ops);
1225 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean is_v4, gboolean aot)
1227 g_assert_not_reached ();
1232 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1234 g_assert_not_reached ();
1241 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1243 /* FIXME: This is not thread safe */
1244 guint8 *code = ji->code_start;
1246 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
1247 amd64_mov_reg_imm (code, AMD64_R11, func);
1249 x86_push_imm (code, (guint64)func_arg);
1250 amd64_call_reg (code, AMD64_R11);
1255 handler_block_trampoline_helper (gpointer *ptr)
1257 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1258 *ptr = jit_tls->handler_block_return_address;
1262 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
1264 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
1266 int tramp_size = 64;
1267 MonoJumpInfo *ji = NULL;
1268 GSList *unwind_ops = NULL;
1272 code = buf = mono_global_codeman_reserve (tramp_size);
1275 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1277 if (mono_get_jit_tls_offset () != -1) {
1278 code = mono_amd64_emit_tls_get (code, MONO_AMD64_ARG_REG1, mono_get_jit_tls_offset ());
1279 amd64_mov_reg_membase (code, MONO_AMD64_ARG_REG1, MONO_AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 8);
1280 /* Simulate a call */
1281 amd64_push_reg (code, AMD64_RAX);
1282 amd64_jump_code (code, tramp);
1284 /*Slow path uses a c helper*/
1285 amd64_mov_reg_reg (code, MONO_AMD64_ARG_REG1, AMD64_RSP, 8);
1286 amd64_mov_reg_imm (code, AMD64_RAX, tramp);
1287 amd64_push_reg (code, AMD64_RAX);
1288 amd64_push_reg (code, AMD64_RAX);
1289 amd64_jump_code (code, handler_block_trampoline_helper);
1292 mono_arch_flush_icache (buf, code - buf);
1293 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
1294 g_assert (code - buf <= tramp_size);
1296 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
1302 * mono_arch_get_call_target:
1304 * Return the address called by the code before CODE if exists.
1307 mono_arch_get_call_target (guint8 *code)
1309 if (code [-5] == 0xe8) {
1310 gint32 disp = *(gint32*)(code - 4);
1311 guint8 *target = code + disp;
1320 * mono_arch_get_plt_info_offset:
1322 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
1325 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
1327 #if defined(__native_client__) || defined(__native_client_codegen__)
1328 /* 18 = 3 (mov opcode) + 4 (disp) + 10 (nacljmp) + 1 (push opcode) */
1329 /* See aot-compiler.c arch_emit_plt_entry for details. */
1330 return *(guint32*)(plt_entry + 18);
1332 return *(guint32*)(plt_entry + 6);
1337 * mono_arch_create_sdb_trampoline:
1339 * Return a trampoline which captures the current context, passes it to
1340 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
1341 * then restores the (potentially changed) context.
1344 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
1346 int tramp_size = 256;
1347 int framesize, ctx_offset, cfa_offset;
1349 GSList *unwind_ops = NULL;
1350 MonoJumpInfo *ji = NULL;
1352 code = buf = mono_global_codeman_reserve (tramp_size);
1354 framesize = sizeof (MonoContext);
1355 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
1359 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 8);
1360 // IP saved at CFA - 8
1361 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
1363 amd64_push_reg (code, AMD64_RBP);
1364 cfa_offset += sizeof(mgreg_t);
1365 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
1366 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
1368 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
1369 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
1370 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
1374 /* Initialize a MonoContext structure on the stack */
1375 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rax), AMD64_RAX, sizeof (mgreg_t));
1376 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbx), AMD64_RBX, sizeof (mgreg_t));
1377 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rcx), AMD64_RCX, sizeof (mgreg_t));
1378 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdx), AMD64_RDX, sizeof (mgreg_t));
1379 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsi), AMD64_RSI, sizeof (mgreg_t));
1380 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdi), AMD64_RDI, sizeof (mgreg_t));
1381 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r8), AMD64_R8, sizeof (mgreg_t));
1382 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r9), AMD64_R9, sizeof (mgreg_t));
1383 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r10), AMD64_R10, sizeof (mgreg_t));
1384 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r11), AMD64_R11, sizeof (mgreg_t));
1385 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r12), AMD64_R12, sizeof (mgreg_t));
1386 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r13), AMD64_R13, sizeof (mgreg_t));
1387 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r14), AMD64_R14, sizeof (mgreg_t));
1388 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r15), AMD64_R15, sizeof (mgreg_t));
1390 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
1391 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbp), AMD64_R11, sizeof (mgreg_t));
1392 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
1393 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsp), AMD64_R11, sizeof (mgreg_t));
1394 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
1395 amd64_mov_membase_reg (code, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rip), AMD64_R11, sizeof (mgreg_t));
1397 /* Call the single step/breakpoint function in sdb */
1398 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
1402 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_single_step_from_context");
1404 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_breakpoint_from_context");
1405 amd64_call_reg (code, AMD64_R11);
1408 amd64_call_code (code, debugger_agent_single_step_from_context);
1410 amd64_call_code (code, debugger_agent_breakpoint_from_context);
1413 /* Restore registers from ctx */
1414 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rax), sizeof (mgreg_t));
1415 amd64_mov_reg_membase (code, AMD64_RBX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbx), sizeof (mgreg_t));
1416 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rcx), sizeof (mgreg_t));
1417 amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdx), sizeof (mgreg_t));
1418 amd64_mov_reg_membase (code, AMD64_RSI, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rsi), sizeof (mgreg_t));
1419 amd64_mov_reg_membase (code, AMD64_RDI, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rdi), sizeof (mgreg_t));
1420 amd64_mov_reg_membase (code, AMD64_R8, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r8), sizeof (mgreg_t));
1421 amd64_mov_reg_membase (code, AMD64_R9, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r9), sizeof (mgreg_t));
1422 amd64_mov_reg_membase (code, AMD64_R10, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r10), sizeof (mgreg_t));
1423 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r11), sizeof (mgreg_t));
1424 amd64_mov_reg_membase (code, AMD64_R12, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r12), sizeof (mgreg_t));
1425 amd64_mov_reg_membase (code, AMD64_R13, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r13), sizeof (mgreg_t));
1426 amd64_mov_reg_membase (code, AMD64_R14, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r14), sizeof (mgreg_t));
1427 amd64_mov_reg_membase (code, AMD64_R15, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, r15), sizeof (mgreg_t));
1429 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rbp), sizeof (mgreg_t));
1430 amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
1431 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, ctx_offset + MONO_STRUCT_OFFSET (MonoContext, rip), sizeof (mgreg_t));
1432 amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
1437 mono_arch_flush_icache (code, code - buf);
1438 g_assert (code - buf <= tramp_size);
1440 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
1441 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);