2 * tramp-amd64.c: JIT trampoline code for amd64
5 * Dietmar Maurer (dietmar@ximian.com)
6 * Zoltan Varga (vargaz@gmail.com)
8 * (C) 2001 Ximian, Inc.
9 * Copyright 2003-2011 Novell, Inc (http://www.novell.com)
10 * Copyright 2011 Xamarin, Inc (http://www.xamarin.com)
11 * Licensed under the MIT license. See LICENSE file in the project root for full license information.
17 #include <mono/metadata/abi-details.h>
18 #include <mono/metadata/appdomain.h>
19 #include <mono/metadata/marshal.h>
20 #include <mono/metadata/tabledefs.h>
21 #include <mono/metadata/profiler-private.h>
22 #include <mono/metadata/gc-internals.h>
23 #include <mono/arch/amd64/amd64-codegen.h>
25 #include <mono/utils/memcheck.h>
28 #include "mini-amd64.h"
29 #include "debugger-agent.h"
31 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
33 #define IS_REX(inst) (((inst) >= 0x40) && ((inst) <= 0x4f))
37 * mono_arch_get_unbox_trampoline:
39 * @addr: pointer to native code for @m
41 * when value type methods are called through the vtable we need to unbox the
42 * this argument. This method returns a pointer to a trampoline which does
43 * unboxing before calling the method
46 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
50 int this_reg, size = 20;
52 MonoDomain *domain = mono_domain_get ();
54 this_reg = mono_arch_get_this_arg_reg (NULL);
56 start = code = (guint8 *)mono_domain_code_reserve (domain, size);
58 unwind_ops = mono_arch_get_cie_program ();
60 amd64_alu_reg_imm (code, X86_ADD, this_reg, sizeof (MonoObject));
61 /* FIXME: Optimize this */
62 amd64_mov_reg_imm (code, AMD64_RAX, addr);
63 amd64_jump_reg (code, AMD64_RAX);
64 g_assert ((code - start) < size);
66 mono_arch_flush_icache (start, code - start);
67 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
69 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
75 * mono_arch_get_static_rgctx_trampoline:
77 * Create a trampoline which sets RGCTX_REG to MRGCTX, then jumps to ADDR.
80 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
86 MonoDomain *domain = mono_domain_get ();
88 #ifdef MONO_ARCH_NOMAP32BIT
91 /* AOTed code could still have a non-32 bit address */
92 if ((((guint64)addr) >> 32) == 0)
98 start = code = (guint8 *)mono_domain_code_reserve (domain, buf_len);
100 unwind_ops = mono_arch_get_cie_program ();
102 amd64_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
103 amd64_jump_code (code, addr);
104 g_assert ((code - start) < buf_len);
106 mono_arch_flush_icache (start, code - start);
107 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
109 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
113 #endif /* !DISABLE_JIT */
116 // Workaround lack of Valgrind support for 64-bit Windows
117 #define VALGRIND_DISCARD_TRANSLATIONS(...)
121 * mono_arch_patch_callsite:
123 * Patch the callsite whose address is given by ORIG_CODE so it calls ADDR. ORIG_CODE
124 * points to the pc right after the call.
127 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
131 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 14, buf, sizeof (buf));
135 /* mov 64-bit imm into r11 (followed by call reg?) or direct call*/
136 if (((code [-13] == 0x49) && (code [-12] == 0xbb)) || (code [-5] == 0xe8)) {
137 if (code [-5] != 0xe8) {
139 InterlockedExchangePointer ((gpointer*)(orig_code - 11), addr);
140 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 11, sizeof (gpointer));
143 gboolean disp_32bit = ((((gint64)addr - (gint64)orig_code)) < (1 << 30)) && ((((gint64)addr - (gint64)orig_code)) > -(1 << 30));
145 if ((((guint64)(addr)) >> 32) != 0 && !disp_32bit) {
147 * This might happen with LLVM or when calling AOTed code. Create a thunk.
149 guint8 *thunk_start, *thunk_code;
151 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
152 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
153 *(guint64*)thunk_code = (guint64)addr;
155 g_assert ((((guint64)(addr)) >> 32) == 0);
156 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
157 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
160 InterlockedExchange ((gint32*)(orig_code - 4), ((gint64)addr - (gint64)orig_code));
161 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, 4);
165 else if ((code [-7] == 0x41) && (code [-6] == 0xff) && (code [-5] == 0x15)) {
166 /* call *<OFFSET>(%rip) */
167 gpointer *got_entry = (gpointer*)((guint8*)orig_code + (*(guint32*)(orig_code - 4)));
169 InterlockedExchangePointer (got_entry, addr);
170 VALGRIND_DISCARD_TRANSLATIONS (orig_code - 5, sizeof (gpointer));
177 mono_arch_create_llvm_native_thunk (MonoDomain *domain, guint8 *addr)
180 * The caller is LLVM code and the call displacement might exceed 32 bits. We can't determine the caller address, so
181 * we add a thunk every time.
182 * Since the caller is also allocated using the domain code manager, hopefully the displacement will fit into 32 bits.
183 * FIXME: Avoid this if possible if !MONO_ARCH_NOMAP32BIT and ADDR is 32 bits.
185 guint8 *thunk_start, *thunk_code;
187 thunk_start = thunk_code = (guint8 *)mono_domain_code_reserve (mono_domain_get (), 32);
188 amd64_jump_membase (thunk_code, AMD64_RIP, 0);
189 *(guint64*)thunk_code = (guint64)addr;
191 mono_arch_flush_icache (thunk_start, thunk_code - thunk_start);
192 mono_profiler_code_buffer_new (thunk_start, thunk_code - thunk_start, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
195 #endif /* !DISABLE_JIT */
198 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
201 gpointer *plt_jump_table_entry;
203 /* A PLT entry: jmp *<DISP>(%rip) */
204 g_assert (code [0] == 0xff);
205 g_assert (code [1] == 0x25);
207 disp = *(gint32*)(code + 2);
209 plt_jump_table_entry = (gpointer*)(code + 6 + disp);
211 InterlockedExchangePointer (plt_jump_table_entry, addr);
216 stack_unaligned (MonoTrampolineType tramp_type)
218 printf ("%d\n", tramp_type);
219 g_assert_not_reached ();
223 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
226 guint8 *buf, *code, *tramp, *br [2], *r11_save_code, *after_r11_save_code, *br_ex_check;
227 int i, lmf_offset, offset, res_offset, arg_offset, rax_offset, ex_offset, tramp_offset, ctx_offset, saved_regs_offset;
228 int r11_save_offset, saved_fpregs_offset, rbp_offset, framesize, orig_rsp_to_rbp_offset, cfa_offset;
230 GSList *unwind_ops = NULL;
231 MonoJumpInfo *ji = NULL;
232 const guint kMaxCodeSize = 630;
234 if (tramp_type == MONO_TRAMPOLINE_JUMP || tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD)
239 code = buf = (guint8 *)mono_global_codeman_reserve (kMaxCodeSize);
241 /* Compute stack frame size and offsets */
243 rbp_offset = -offset;
245 offset += sizeof(mgreg_t);
246 rax_offset = -offset;
248 offset += sizeof(mgreg_t);
251 offset += sizeof(mgreg_t);
252 r11_save_offset = -offset;
254 offset += sizeof(mgreg_t);
255 tramp_offset = -offset;
257 offset += sizeof(gpointer);
258 arg_offset = -offset;
260 offset += sizeof(mgreg_t);
261 res_offset = -offset;
263 offset += sizeof (MonoContext);
264 ctx_offset = -offset;
265 saved_regs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
266 saved_fpregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, fregs);
268 offset += sizeof (MonoLMFTramp);
269 lmf_offset = -offset;
272 /* Reserve space where the callee can save the argument registers */
273 offset += 4 * sizeof (mgreg_t);
276 framesize = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
278 // CFA = sp + 16 (the trampoline address is on the stack)
280 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 16);
281 // IP saved at CFA - 8
282 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -8);
284 orig_rsp_to_rbp_offset = 0;
285 r11_save_code = code;
286 /* Reserve space for the mov_membase_reg to save R11 */
288 after_r11_save_code = code;
290 /* Pop the return address off the stack */
291 amd64_pop_reg (code, AMD64_R11);
292 orig_rsp_to_rbp_offset += sizeof(mgreg_t);
294 cfa_offset -= sizeof(mgreg_t);
295 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
298 * Allocate a new stack frame
300 amd64_push_reg (code, AMD64_RBP);
301 cfa_offset += sizeof(mgreg_t);
302 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
303 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
305 orig_rsp_to_rbp_offset -= sizeof(mgreg_t);
306 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
307 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
308 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
310 /* Compute the trampoline address from the return address */
312 /* 7 = length of call *<offset>(rip) */
313 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 7);
315 /* 5 = length of amd64_call_membase () */
316 amd64_alu_reg_imm (code, X86_SUB, AMD64_R11, 5);
318 amd64_mov_membase_reg (code, AMD64_RBP, tramp_offset, AMD64_R11, sizeof(gpointer));
320 /* Save all registers */
321 for (i = 0; i < AMD64_NREG; ++i) {
322 if (i == AMD64_RBP) {
323 /* RAX is already saved */
324 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, rbp_offset, sizeof(mgreg_t));
325 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_RAX, sizeof(mgreg_t));
326 } else if (i == AMD64_RIP) {
328 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
330 amd64_mov_reg_imm (code, AMD64_R11, 0);
331 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
332 } else if (i == AMD64_RSP) {
333 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
334 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
335 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), AMD64_R11, sizeof(mgreg_t));
336 } else if (i != AMD64_R11) {
337 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
339 /* We have to save R11 right at the start of
340 the trampoline code because it's used as a
342 /* This happens before the frame is set up, so it goes into the redzone */
343 amd64_mov_membase_reg (r11_save_code, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, i, sizeof(mgreg_t));
344 g_assert (r11_save_code == after_r11_save_code);
346 /* Copy from the save slot into the register array slot */
347 amd64_mov_reg_membase (code, i, AMD64_RSP, r11_save_offset + orig_rsp_to_rbp_offset, sizeof(mgreg_t));
348 amd64_mov_membase_reg (code, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), i, sizeof(mgreg_t));
350 /* cfa = rbp + cfa_offset */
351 mono_add_unwind_op_offset (unwind_ops, code, buf, i, - cfa_offset + saved_regs_offset + (i * sizeof (mgreg_t)));
353 for (i = 0; i < 8; ++i)
354 amd64_movsd_membase_reg (code, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)), i);
356 /* Check that the stack is aligned */
357 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof (mgreg_t));
358 amd64_alu_reg_imm (code, X86_AND, AMD64_R11, 15);
359 amd64_alu_reg_imm (code, X86_CMP, AMD64_R11, 0);
361 amd64_branch_disp (code, X86_CC_Z, 0, FALSE);
363 amd64_mov_reg_imm (code, AMD64_R11, 0);
364 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, 8);
366 amd64_mov_reg_imm (code, MONO_AMD64_ARG_REG1, tramp_type);
367 amd64_mov_reg_imm (code, AMD64_R11, stack_unaligned);
368 amd64_call_reg (code, AMD64_R11);
370 mono_amd64_patch (br [0], code);
371 //amd64_breakpoint (code);
373 if (tramp_type != MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
374 /* Obtain the trampoline argument which is encoded in the instruction stream */
376 /* Load the GOT offset */
377 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
379 * r11 points to a call *<offset>(%rip) instruction, load the
380 * pc-relative offset from the instruction itself.
382 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 3, 4);
383 /* 7 is the length of the call, 8 is the offset to the next got slot */
384 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_RAX, 7 + sizeof (gpointer), sizeof(gpointer));
385 /* Compute the address of the GOT slot */
386 amd64_alu_reg_reg_size (code, X86_ADD, AMD64_R11, AMD64_RAX, sizeof(gpointer));
388 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
390 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, tramp_offset, sizeof(gpointer));
391 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 5, 1);
392 amd64_widen_reg (code, AMD64_RAX, AMD64_RAX, TRUE, FALSE);
393 amd64_alu_reg_imm_size (code, X86_CMP, AMD64_RAX, 4, 1);
395 x86_branch8 (code, X86_CC_NE, 6, FALSE);
396 /* 32 bit immediate */
397 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 4);
399 x86_jump8 (code, 10);
400 /* 64 bit immediate */
401 mono_amd64_patch (br [0], code);
402 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 6, 8);
403 mono_amd64_patch (br [1], code);
405 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
407 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, saved_regs_offset + (MONO_AMD64_ARG_REG1 * sizeof(mgreg_t)), sizeof(mgreg_t));
408 amd64_mov_membase_reg (code, AMD64_RBP, arg_offset, AMD64_R11, sizeof(gpointer));
415 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 8, sizeof(gpointer));
417 amd64_mov_reg_imm (code, AMD64_R11, 0);
418 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rip), AMD64_R11, sizeof(mgreg_t));
420 amd64_mov_reg_reg (code, AMD64_R11, AMD64_RSP, sizeof(mgreg_t));
421 amd64_alu_reg_imm (code, X86_ADD, AMD64_R11, framesize + 16);
422 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, rsp), AMD64_R11, sizeof(mgreg_t));
423 /* Save pointer to context */
424 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, ctx_offset);
425 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, ctx), AMD64_R11, sizeof(mgreg_t));
428 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
430 amd64_mov_reg_imm (code, AMD64_R11, mono_get_lmf_addr);
432 amd64_call_reg (code, AMD64_R11);
435 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), AMD64_RAX, sizeof(gpointer));
436 /* Save previous_lmf */
437 /* Set the lowest bit to signal that this LMF has the ip field set */
438 /* Set the third lowest bit to signal that this is a MonoLMFTramp structure */
439 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RAX, 0, sizeof(gpointer));
440 amd64_alu_reg_imm_size (code, X86_ADD, AMD64_R11, 0x5, sizeof(gpointer));
441 amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), AMD64_R11, sizeof(gpointer));
443 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, lmf_offset);
444 amd64_mov_membase_reg (code, AMD64_RAX, 0, AMD64_R11, sizeof(gpointer));
448 /* Arg1 is the pointer to the saved registers */
449 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RBP, saved_regs_offset);
451 /* Arg2 is the address of the calling code */
453 amd64_mov_reg_membase (code, AMD64_ARG_REG2, AMD64_RBP, 8, sizeof(gpointer));
455 amd64_mov_reg_imm (code, AMD64_ARG_REG2, 0);
457 /* Arg3 is the method/vtable ptr */
458 amd64_mov_reg_membase (code, AMD64_ARG_REG3, AMD64_RBP, arg_offset, sizeof(gpointer));
460 /* Arg4 is the trampoline address */
461 amd64_mov_reg_membase (code, AMD64_ARG_REG4, AMD64_RBP, tramp_offset, sizeof(gpointer));
464 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
465 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
467 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
468 amd64_mov_reg_imm (code, AMD64_R11, tramp);
470 amd64_call_reg (code, AMD64_R11);
471 amd64_mov_membase_reg (code, AMD64_RBP, res_offset, AMD64_RAX, sizeof(mgreg_t));
474 amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof(gpointer));
475 amd64_alu_reg_imm_size (code, X86_SUB, AMD64_RCX, 0x5, sizeof(gpointer));
476 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, lmf_offset + MONO_STRUCT_OFFSET (MonoLMFTramp, lmf_addr), sizeof(gpointer));
477 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RCX, sizeof(gpointer));
480 * Save rax to the stack, after the leave instruction, this will become part of
483 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RBP, res_offset, sizeof(mgreg_t));
484 amd64_mov_membase_reg (code, AMD64_RBP, rax_offset, AMD64_RAX, sizeof(mgreg_t));
486 /* Check for thread interruption */
487 /* This is not perf critical code so no need to check the interrupt flag */
489 * Have to call the _force_ variant, since there could be a protected wrapper on the top of the stack.
492 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
494 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
496 amd64_call_reg (code, AMD64_R11);
498 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
500 amd64_branch8 (code, X86_CC_Z, -1, 1);
504 * We have an exception we want to throw in the caller's frame, so pop
505 * the trampoline frame and throw from the caller.
508 /* We are in the parent frame, the exception is in rax */
510 * EH is initialized after trampolines, so get the address of the variable
511 * which contains throw_exception, and load it from there.
514 /* Not really a jit icall */
515 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
517 amd64_mov_reg_imm (code, AMD64_R11, (guint8*)mono_get_throw_exception_addr ());
519 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0, sizeof(gpointer));
520 amd64_mov_reg_reg (code, AMD64_ARG_REG1, AMD64_RAX, sizeof(mgreg_t));
522 * We still have the original return value on the top of the stack, so the
523 * throw trampoline will use that as the throw site.
525 amd64_jump_reg (code, AMD64_R11);
528 mono_amd64_patch (br_ex_check, code);
530 /* Restore argument registers, r10 (imt method/rgxtx)
531 and rax (needed for direct calls to C vararg functions). */
532 for (i = 0; i < AMD64_NREG; ++i)
533 if (AMD64_IS_ARGUMENT_REG (i) || i == AMD64_R10 || i == AMD64_RAX)
534 amd64_mov_reg_membase (code, i, AMD64_RBP, saved_regs_offset + (i * sizeof(mgreg_t)), sizeof(mgreg_t));
535 for (i = 0; i < 8; ++i)
536 amd64_movsd_reg_membase (code, i, AMD64_RBP, saved_fpregs_offset + (i * sizeof(mgreg_t)));
540 cfa_offset -= sizeof (mgreg_t);
541 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
543 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
545 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RSP, rax_offset - sizeof(mgreg_t), sizeof(mgreg_t));
548 /* call the compiled method using the saved rax */
549 amd64_jump_membase (code, AMD64_RSP, rax_offset - sizeof(mgreg_t));
552 g_assert ((code - buf) <= kMaxCodeSize);
554 mono_arch_flush_icache (buf, code - buf);
555 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
557 tramp_name = mono_get_generic_trampoline_name (tramp_type);
558 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
565 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
567 guint8 *code, *buf, *tramp;
569 gboolean far_addr = FALSE;
571 tramp = mono_get_trampoline_code (tramp_type);
573 if ((((guint64)arg1) >> 32) == 0)
578 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
580 if (((gint64)tramp - (gint64)code) >> 31 != 0 && ((gint64)tramp - (gint64)code) >> 31 != -1) {
581 #ifndef MONO_ARCH_NOMAP32BIT
582 g_assert_not_reached ();
586 code = buf = (guint8 *)mono_domain_code_reserve_align (domain, size, 1);
590 amd64_mov_reg_imm (code, AMD64_R11, tramp);
591 amd64_call_reg (code, AMD64_R11);
593 amd64_call_code (code, tramp);
595 /* The trampoline code will obtain the argument from the instruction stream */
596 if ((((guint64)arg1) >> 32) == 0) {
598 *(guint32*)(code + 1) = (gint64)arg1;
602 *(guint64*)(code + 1) = (gint64)arg1;
606 g_assert ((code - buf) <= size);
611 mono_arch_flush_icache (buf, size);
612 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
618 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
622 guint8 **rgctx_null_jumps;
627 MonoJumpInfo *ji = NULL;
630 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
631 index = MONO_RGCTX_SLOT_INDEX (slot);
633 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
634 for (depth = 0; ; ++depth) {
635 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
637 if (index < size - 1)
642 tramp_size = 64 + 8 * depth;
644 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
646 unwind_ops = mono_arch_get_cie_program ();
648 rgctx_null_jumps = (guint8 **)g_malloc (sizeof (guint8*) * (depth + 2));
652 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
654 /* load rgctx ptr from vtable */
655 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_ARG_REG1, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), sizeof(gpointer));
656 /* is the rgctx ptr null? */
657 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
658 /* if yes, jump to actual trampoline */
659 rgctx_null_jumps [0] = code;
660 amd64_branch8 (code, X86_CC_Z, -1, 1);
663 for (i = 0; i < depth; ++i) {
664 /* load ptr to next array */
665 if (mrgctx && i == 0)
666 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, sizeof(gpointer));
668 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, 0, sizeof(gpointer));
669 /* is the ptr null? */
670 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
671 /* if yes, jump to actual trampoline */
672 rgctx_null_jumps [i + 1] = code;
673 amd64_branch8 (code, X86_CC_Z, -1, 1);
677 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_RAX, sizeof (gpointer) * (index + 1), sizeof(gpointer));
678 /* is the slot null? */
679 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
680 /* if yes, jump to actual trampoline */
681 rgctx_null_jumps [depth + 1] = code;
682 amd64_branch8 (code, X86_CC_Z, -1, 1);
683 /* otherwise return */
686 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
687 mono_amd64_patch (rgctx_null_jumps [i], code);
689 g_free (rgctx_null_jumps);
691 /* move the rgctx pointer to the VTABLE register */
692 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
695 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
696 amd64_jump_reg (code, AMD64_R11);
698 tramp = (guint8 *)mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
700 /* jump to the actual trampoline */
701 amd64_jump_code (code, tramp);
704 mono_arch_flush_icache (buf, code - buf);
705 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
707 g_assert (code - buf <= tramp_size);
709 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
710 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
717 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
721 MonoJumpInfo *ji = NULL;
727 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
729 unwind_ops = mono_arch_get_cie_program ();
731 // FIXME: Currently, we always go to the slow path.
732 /* This receives a <slot, trampoline> in the rgctx arg reg. */
733 /* Load trampoline addr */
734 amd64_mov_reg_membase (code, AMD64_R11, MONO_ARCH_RGCTX_REG, 8, 8);
735 /* move the rgctx pointer to the VTABLE register */
736 amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
737 /* Jump to the trampoline */
738 amd64_jump_reg (code, AMD64_R11);
740 mono_arch_flush_icache (buf, code - buf);
741 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
743 g_assert (code - buf <= tramp_size);
746 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
752 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
754 /* FIXME: This is not thread safe */
755 guint8 *code = (guint8 *)ji->code_start;
757 amd64_mov_reg_imm (code, AMD64_ARG_REG1, func_arg);
758 amd64_mov_reg_imm (code, AMD64_R11, func);
760 x86_push_imm (code, (guint64)func_arg);
761 amd64_call_reg (code, AMD64_R11);
763 #endif /* !DISABLE_JIT */
766 mono_amd64_handler_block_trampoline_helper (void)
768 MonoJitTlsData *jit_tls = (MonoJitTlsData *)mono_tls_get_jit_tls ();
769 return jit_tls->handler_block_return_address;
774 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
778 MonoJumpInfo *ji = NULL;
781 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
783 unwind_ops = mono_arch_get_cie_program ();
786 * This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
787 * We get here from the ret emitted by CEE_ENDFINALLY.
788 * The stack is misaligned.
790 /* Align the stack before the call to mono_amd64_handler_block_trampoline_helper() */
792 /* Also make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
793 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8 + 4 * 8);
795 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
798 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_amd64_handler_block_trampoline_helper");
799 amd64_call_reg (code, AMD64_R11);
801 amd64_mov_reg_imm (code, AMD64_RAX, mono_amd64_handler_block_trampoline_helper);
802 amd64_call_reg (code, AMD64_RAX);
804 /* Undo stack alignment */
806 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8 + 4 * 8);
808 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
810 /* Save the result to the stack */
811 amd64_push_reg (code, AMD64_RAX);
813 /* Make room for the "register parameter stack area" as specified by the Windows x64 ABI (4 64-bit registers) */
814 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 4 * 8);
817 char *name = g_strdup_printf ("trampoline_func_%d", MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
818 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, name);
819 amd64_mov_reg_reg (code, AMD64_RAX, AMD64_R11, 8);
821 amd64_mov_reg_imm (code, AMD64_RAX, mono_get_trampoline_func (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD));
823 /* The stack is aligned */
824 amd64_call_reg (code, AMD64_RAX);
826 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 4 * 8);
828 /* Load return address */
829 amd64_pop_reg (code, AMD64_RAX);
830 /* The stack is misaligned, thats what the code we branch to expects */
831 amd64_jump_reg (code, AMD64_RAX);
833 mono_arch_flush_icache (buf, code - buf);
834 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
835 g_assert (code - buf <= tramp_size);
837 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
841 #endif /* !DISABLE_JIT */
844 * mono_arch_get_call_target:
846 * Return the address called by the code before CODE if exists.
849 mono_arch_get_call_target (guint8 *code)
851 if (code [-5] == 0xe8) {
852 gint32 disp = *(gint32*)(code - 4);
853 guint8 *target = code + disp;
862 * mono_arch_get_plt_info_offset:
864 * Return the PLT info offset belonging to the plt entry PLT_ENTRY.
867 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
869 return *(guint32*)(plt_entry + 6);
874 * mono_arch_create_sdb_trampoline:
876 * Return a trampoline which captures the current context, passes it to
877 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
878 * then restores the (potentially changed) context.
881 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
883 int tramp_size = 512;
884 int i, framesize, ctx_offset, cfa_offset, gregs_offset;
886 GSList *unwind_ops = NULL;
887 MonoJumpInfo *ji = NULL;
889 code = buf = (guint8 *)mono_global_codeman_reserve (tramp_size);
893 /* Reserve space where the callee can save the argument registers */
894 framesize += 4 * sizeof (mgreg_t);
897 ctx_offset = framesize;
898 framesize += sizeof (MonoContext);
900 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
904 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, 8);
905 // IP saved at CFA - 8
906 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RIP, -cfa_offset);
908 amd64_push_reg (code, AMD64_RBP);
909 cfa_offset += sizeof(mgreg_t);
910 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
911 mono_add_unwind_op_offset (unwind_ops, code, buf, AMD64_RBP, - cfa_offset);
913 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof(mgreg_t));
914 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, AMD64_RBP);
915 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, framesize);
917 gregs_offset = ctx_offset + MONO_STRUCT_OFFSET (MonoContext, gregs);
919 /* Initialize a MonoContext structure on the stack */
920 for (i = 0; i < AMD64_NREG; ++i) {
921 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
922 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), i, sizeof (mgreg_t));
924 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, 0, sizeof (mgreg_t));
925 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
926 amd64_lea_membase (code, AMD64_R11, AMD64_RBP, 2 * sizeof (mgreg_t));
927 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RSP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
928 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, sizeof (mgreg_t), sizeof (mgreg_t));
929 amd64_mov_membase_reg (code, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), AMD64_R11, sizeof (mgreg_t));
931 /* Call the single step/breakpoint function in sdb */
932 amd64_lea_membase (code, AMD64_ARG_REG1, AMD64_RSP, ctx_offset);
936 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_single_step_from_context");
938 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "debugger_agent_breakpoint_from_context");
941 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_single_step_from_context);
943 amd64_mov_reg_imm (code, AMD64_R11, debugger_agent_breakpoint_from_context);
945 amd64_call_reg (code, AMD64_R11);
947 /* Restore registers from ctx */
948 for (i = 0; i < AMD64_NREG; ++i) {
949 if (i != AMD64_RIP && i != AMD64_RSP && i != AMD64_RBP)
950 amd64_mov_reg_membase (code, i, AMD64_RSP, gregs_offset + (i * sizeof (mgreg_t)), sizeof (mgreg_t));
952 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RBP * sizeof (mgreg_t)), sizeof (mgreg_t));
953 amd64_mov_membase_reg (code, AMD64_RBP, 0, AMD64_R11, sizeof (mgreg_t));
954 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RSP, gregs_offset + (AMD64_RIP * sizeof (mgreg_t)), sizeof (mgreg_t));
955 amd64_mov_membase_reg (code, AMD64_RBP, sizeof (mgreg_t), AMD64_R11, sizeof (mgreg_t));
958 cfa_offset -= sizeof (mgreg_t);
959 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, AMD64_RSP, cfa_offset);
962 mono_arch_flush_icache (code, code - buf);
963 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
964 g_assert (code - buf <= tramp_size);
966 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
967 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
973 * mono_arch_get_enter_icall_trampoline:
975 * A trampoline that handles the transition from interpreter into native world.
976 * It requiers to set up a descriptor (MethodArguments) that describes the
977 * required arguments passed to the callee.
980 mono_arch_get_enter_icall_trampoline (MonoTrampInfo **info)
982 #ifdef ENABLE_INTERPRETER
983 const int gregs_num = 8;
984 const int fregs_num = 3;
985 guint8 *start = NULL, *code, *label_gexits [gregs_num], *label_fexits [fregs_num], *label_leave_tramp [3], *label_is_float_ret;
986 MonoJumpInfo *ji = NULL;
987 GSList *unwind_ops = NULL;
988 static int farg_regs[] = {AMD64_XMM0, AMD64_XMM1, AMD64_XMM2};
989 int i, framesize = 0, off_rbp, off_methodargs, off_targetaddr;
991 start = code = (guint8 *) mono_global_codeman_reserve (256);
993 off_rbp = -framesize;
995 framesize += sizeof (mgreg_t);
996 off_methodargs = -framesize;
998 framesize += sizeof (mgreg_t);
999 off_targetaddr = -framesize;
1001 framesize += (gregs_num - PARAM_REGS) * sizeof (mgreg_t);
1003 amd64_push_reg (code, AMD64_RBP);
1004 amd64_mov_reg_reg (code, AMD64_RBP, AMD64_RSP, sizeof (mgreg_t));
1005 amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT));
1007 /* save MethodArguments* onto stack */
1008 amd64_mov_membase_reg (code, AMD64_RBP, off_methodargs, AMD64_ARG_REG2, sizeof (mgreg_t));
1010 /* save target address on stack */
1011 amd64_mov_membase_reg (code, AMD64_RBP, off_targetaddr, AMD64_ARG_REG1, sizeof (mgreg_t));
1013 /* load pointer to MethodArguments* into R11 */
1014 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG2, 8);
1016 /* move flen into RAX */ // TODO: struct offset
1017 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 16, sizeof (mgreg_t));
1018 /* load pointer to fregs into R11 */ // TODO: struct offset
1019 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 24, sizeof (mgreg_t));
1021 for (i = 0; i < fregs_num; ++i) {
1022 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
1023 label_fexits [i] = code;
1024 x86_branch8 (code, X86_CC_Z, 0, FALSE);
1026 amd64_sse_movsd_reg_membase (code, farg_regs [i], AMD64_R11, i * sizeof (double));
1027 amd64_dec_reg_size (code, AMD64_RAX, 1);
1030 for (i = 0; i < fregs_num; i++) {
1031 x86_patch (label_fexits [i], code);
1034 /* load pointer to MethodArguments* into R11 */
1035 amd64_mov_reg_reg (code, AMD64_R11, AMD64_ARG_REG2, sizeof (mgreg_t));
1036 /* move ilen into RAX */ // TODO: struct offset
1037 amd64_mov_reg_membase (code, AMD64_RAX, AMD64_R11, 0, sizeof (mgreg_t));
1039 int stack_offset = 0;
1040 for (i = 0; i < gregs_num; i++) {
1041 amd64_test_reg_reg (code, AMD64_RAX, AMD64_RAX);
1042 label_gexits [i] = code;
1043 x86_branch32 (code, X86_CC_Z, 0, FALSE);
1045 /* load pointer to MethodArguments* into R11 */
1046 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
1047 /* load pointer to iregs into R11 */ // TODO: struct offset
1048 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 8, sizeof (mgreg_t));
1050 if (i < PARAM_REGS) {
1051 amd64_mov_reg_membase (code, param_regs [i], AMD64_R11, i * sizeof (mgreg_t), sizeof (mgreg_t));
1053 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, i * sizeof (mgreg_t), sizeof (mgreg_t));
1054 amd64_mov_membase_reg (code, AMD64_RSP, stack_offset, AMD64_R11, sizeof (mgreg_t));
1055 stack_offset += sizeof (mgreg_t);
1057 amd64_dec_reg_size (code, AMD64_RAX, 1);
1060 for (i = 0; i < gregs_num; i++) {
1061 x86_patch (label_gexits [i], code);
1064 /* load target addr */
1065 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_targetaddr, sizeof (mgreg_t));
1067 /* call into native function */
1068 amd64_call_reg (code, AMD64_R11);
1070 /* load MethodArguments */
1071 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
1073 /* load is_float_ret */ // TODO: struct offset
1074 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0x28, sizeof (mgreg_t));
1076 /* check if a float return value is expected */
1077 amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
1079 label_is_float_ret = code;
1080 x86_branch8 (code, X86_CC_NZ, 0, FALSE);
1085 /* load MethodArguments */
1086 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
1087 /* load retval */ // TODO: struct offset
1088 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0x20, sizeof (mgreg_t));
1090 amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
1091 label_leave_tramp [0] = code;
1092 x86_branch8 (code, X86_CC_Z, 0, FALSE);
1094 amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_RAX, sizeof (mgreg_t));
1096 label_leave_tramp [1] = code;
1097 x86_jump8 (code, 0);
1102 x86_patch (label_is_float_ret, code);
1103 /* load MethodArguments */
1104 amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, off_methodargs, sizeof (mgreg_t));
1105 /* load retval */ // TODO: struct offset
1106 amd64_mov_reg_membase (code, AMD64_R11, AMD64_R11, 0x20, sizeof (mgreg_t));
1108 amd64_test_reg_reg (code, AMD64_R11, AMD64_R11);
1109 label_leave_tramp [2] = code;
1110 x86_branch8 (code, X86_CC_Z, 0, FALSE);
1112 amd64_sse_movsd_membase_reg (code, AMD64_R11, 0, AMD64_XMM0);
1114 for (i = 0; i < 3; i++)
1115 x86_patch (label_leave_tramp [i], code);
1117 amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT));
1118 amd64_pop_reg (code, AMD64_RBP);
1121 mono_arch_flush_icache (start, code - start);
1122 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_EXCEPTION_HANDLING, NULL);
1125 *info = mono_tramp_info_create ("enter_icall_trampoline", start, code - start, ji, unwind_ops);
1129 g_assert_not_reached ();
1131 #endif /* ENABLE_INTERPRETER */
1133 #endif /* !DISABLE_JIT */
1137 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
1139 g_assert_not_reached ();
1144 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
1146 g_assert_not_reached ();
1151 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
1153 g_assert_not_reached ();
1158 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
1160 g_assert_not_reached ();
1165 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
1167 g_assert_not_reached ();
1172 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
1174 g_assert_not_reached ();
1179 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
1181 g_assert_not_reached ();
1186 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1188 g_assert_not_reached ();
1193 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
1195 g_assert_not_reached ();
1200 mono_arch_get_enter_icall_trampoline (MonoTrampInfo **info)
1202 g_assert_not_reached ();
1205 #endif /* DISABLE_JIT */