2 * tramp-x86.c: JIT trampoline code for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/abi-details.h>
14 #include <mono/metadata/appdomain.h>
15 #include <mono/metadata/metadata-internals.h>
16 #include <mono/metadata/marshal.h>
17 #include <mono/metadata/tabledefs.h>
18 #include <mono/metadata/profiler-private.h>
19 #include <mono/metadata/gc-internals.h>
20 #include <mono/arch/x86/x86-codegen.h>
22 #include <mono/utils/memcheck.h>
26 #include "debugger-agent.h"
27 #include "jit-icalls.h"
29 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
32 * mono_arch_get_unbox_trampoline:
34 * @addr: pointer to native code for @m
36 * when value type methods are called through the vtable we need to unbox the
37 * this argument. This method returns a pointer to a trampoline which does
38 * unboxing before calling the method
41 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
44 int this_pos = 4, size = 16;
45 MonoDomain *domain = mono_domain_get ();
48 start = code = mono_domain_code_reserve (domain, size);
50 unwind_ops = mono_arch_get_cie_program ();
52 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
53 x86_jump_code (code, addr);
54 g_assert ((code - start) < size);
56 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_UNBOX_TRAMPOLINE, m);
58 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
64 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
70 MonoDomain *domain = mono_domain_get ();
74 start = code = mono_domain_code_reserve (domain, buf_len);
76 unwind_ops = mono_arch_get_cie_program ();
78 x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
79 x86_jump_code (code, addr);
80 g_assert ((code - start) <= buf_len);
82 mono_arch_flush_icache (start, code - start);
83 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
85 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
91 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
95 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
99 /* go to the start of the call instruction
101 * address_byte = (m << 6) | (o << 3) | reg
102 * call opcode: 0xff address_byte displacement
108 if (code [1] == 0xe8) {
110 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
112 /* Tell valgrind to recompile the patched code */
113 VALGRIND_DISCARD_TRANSLATIONS (orig_code + 2, 4);
115 } else if (code [1] == 0xe9) {
116 /* A PLT entry: jmp <DISP> */
118 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
120 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
121 code [4], code [5], code [6]);
122 g_assert_not_reached ();
127 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
131 /* Patch the jump table entry used by the plt entry */
133 /* A PLT entry: jmp *<DISP>(%ebx) */
134 g_assert (code [0] == 0xff);
135 g_assert (code [1] == 0xa3);
137 offset = *(guint32*)(code + 2);
139 got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
140 *(guint8**)((guint8*)got + offset) = addr;
144 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
146 const int kBufSize = 8;
151 mono_breakpoint_clean_code (NULL, code, kBufSize, buf, kBufSize);
156 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
157 reg = code [1] & 0x07;
158 disp = *((gint32*)(code + 2));
160 g_assert_not_reached ();
164 *displacement = disp;
165 return (gpointer)regs [reg];
169 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
173 vt = get_vcall_slot (code, regs, &displacement);
176 return (gpointer*)((char*)vt + displacement);
180 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
183 guint8 *buf, *code, *tramp, *br_ex_check;
184 GSList *unwind_ops = NULL;
185 MonoJumpInfo *ji = NULL;
186 int i, offset, frame_size, regarray_offset, lmf_offset, caller_ip_offset, arg_offset;
187 int cfa_offset; /* cfa = cfa_reg + cfa_offset */
189 code = buf = mono_global_codeman_reserve (256);
191 /* Note that there is a single argument to the trampoline
192 * and it is stored at: esp + pushed_args * sizeof (gpointer)
193 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
196 /* Compute frame offsets relative to the frame pointer %ebp */
197 arg_offset = sizeof (mgreg_t);
198 caller_ip_offset = 2 * sizeof (mgreg_t);
200 offset += sizeof (MonoLMF);
201 lmf_offset = -offset;
202 offset += X86_NREG * sizeof (mgreg_t);
203 regarray_offset = -offset;
205 offset += 4 * sizeof (mgreg_t);
206 frame_size = ALIGN_TO (offset, MONO_ARCH_FRAME_ALIGNMENT);
208 /* ret addr and arg are on the stack */
209 cfa_offset = 2 * sizeof (mgreg_t);
210 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
211 // IP saved at CFA - 4
212 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -4);
215 x86_push_reg (code, X86_EBP);
216 cfa_offset += sizeof (mgreg_t);
217 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
218 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, -cfa_offset);
220 x86_mov_reg_reg (code, X86_EBP, X86_ESP, sizeof (mgreg_t));
221 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
223 /* There are three words on the stack, adding + 4 aligns the stack to 16, which is needed on osx */
224 x86_alu_reg_imm (code, X86_SUB, X86_ESP, frame_size + sizeof (mgreg_t));
226 /* Save all registers */
227 for (i = X86_EAX; i <= X86_EDI; ++i) {
231 /* Save original ebp */
232 /* EAX is already saved */
233 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (mgreg_t));
235 } else if (i == X86_ESP) {
236 /* Save original esp */
237 /* EAX is already saved */
238 x86_mov_reg_reg (code, X86_EAX, X86_EBP, sizeof (mgreg_t));
239 /* Saved ebp + trampoline arg + return addr */
240 x86_alu_reg_imm (code, X86_ADD, X86_EAX, 3 * sizeof (mgreg_t));
243 x86_mov_membase_reg (code, X86_EBP, regarray_offset + (i * sizeof (mgreg_t)), reg, sizeof (mgreg_t));
248 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
249 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), 0, sizeof (mgreg_t));
251 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (mgreg_t));
252 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, eip), X86_EAX, sizeof (mgreg_t));
255 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP)) {
256 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (mgreg_t));
257 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), X86_EAX, sizeof (mgreg_t));
259 x86_mov_membase_imm (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, method), 0, sizeof (mgreg_t));
262 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESP * sizeof (mgreg_t)), sizeof (mgreg_t));
263 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esp), X86_EAX, sizeof (mgreg_t));
264 /* callee save registers */
265 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBX * sizeof (mgreg_t)), sizeof (mgreg_t));
266 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebx), X86_EAX, sizeof (mgreg_t));
267 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EDI * sizeof (mgreg_t)), sizeof (mgreg_t));
268 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, edi), X86_EAX, sizeof (mgreg_t));
269 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_ESI * sizeof (mgreg_t)), sizeof (mgreg_t));
270 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, esi), X86_EAX, sizeof (mgreg_t));
271 x86_mov_reg_membase (code, X86_EAX, X86_EBP, regarray_offset + (X86_EBP * sizeof (mgreg_t)), sizeof (mgreg_t));
272 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, ebp), X86_EAX, sizeof (mgreg_t));
275 /* get the address of lmf for the current thread */
277 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
278 x86_call_reg (code, X86_EAX);
280 x86_call_code (code, mono_get_lmf_addr);
282 /* lmf->lmf_addr = lmf_addr (%eax) */
283 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), X86_EAX, sizeof (mgreg_t));
284 /* lmf->previous_lmf = *(lmf_addr) */
285 x86_mov_reg_membase (code, X86_ECX, X86_EAX, 0, sizeof (mgreg_t));
286 /* Signal to mono_arch_unwind_frame () that this is a trampoline frame */
287 x86_alu_reg_imm (code, X86_ADD, X86_ECX, 1);
288 x86_mov_membase_reg (code, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), X86_ECX, sizeof (mgreg_t));
289 /* *lmf_addr = lmf */
290 x86_lea_membase (code, X86_ECX, X86_EBP, lmf_offset);
291 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (mgreg_t));
293 /* Call trampoline function */
294 /* Arg 1 - registers */
295 x86_lea_membase (code, X86_EAX, X86_EBP, regarray_offset);
296 x86_mov_membase_reg (code, X86_ESP, (0 * sizeof (mgreg_t)), X86_EAX, sizeof (mgreg_t));
297 /* Arg2 - calling code */
298 if (tramp_type == MONO_TRAMPOLINE_JUMP) {
299 x86_mov_membase_imm (code, X86_ESP, (1 * sizeof (mgreg_t)), 0, sizeof (mgreg_t));
301 x86_mov_reg_membase (code, X86_EAX, X86_EBP, caller_ip_offset, sizeof (mgreg_t));
302 x86_mov_membase_reg (code, X86_ESP, (1 * sizeof (mgreg_t)), X86_EAX, sizeof (mgreg_t));
304 /* Arg3 - trampoline argument */
305 x86_mov_reg_membase (code, X86_EAX, X86_EBP, arg_offset, sizeof (mgreg_t));
306 x86_mov_membase_reg (code, X86_ESP, (2 * sizeof (mgreg_t)), X86_EAX, sizeof (mgreg_t));
307 /* Arg4 - trampoline address */
309 x86_mov_membase_imm (code, X86_ESP, (3 * sizeof (mgreg_t)), 0, sizeof (mgreg_t));
312 /* check the stack is aligned after the ret ip is pushed */
314 x86_mov_reg_reg (code, X86_EDX, X86_ESP, 4);
315 x86_alu_reg_imm (code, X86_AND, X86_EDX, 15);
316 x86_alu_reg_imm (code, X86_CMP, X86_EDX, 0);
317 x86_branch_disp (code, X86_CC_Z, 3, FALSE);
318 x86_breakpoint (code);
323 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
324 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
325 x86_call_reg (code, X86_EAX);
327 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
328 x86_call_code (code, tramp);
332 * Overwrite the trampoline argument with the address we need to jump to,
335 x86_mov_membase_reg (code, X86_EBP, arg_offset, X86_EAX, 4);
338 x86_mov_reg_membase (code, X86_EAX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), sizeof (mgreg_t));
339 x86_mov_reg_membase (code, X86_ECX, X86_EBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, previous_lmf), sizeof (mgreg_t));
340 x86_alu_reg_imm (code, X86_SUB, X86_ECX, 1);
341 x86_mov_membase_reg (code, X86_EAX, 0, X86_ECX, sizeof (mgreg_t));
343 /* Check for interruptions */
345 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint_noraise");
346 x86_call_reg (code, X86_EAX);
348 x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint_noraise);
351 x86_test_reg_reg (code, X86_EAX, X86_EAX);
353 x86_branch8 (code, X86_CC_Z, -1, 1);
357 * We have an exception we want to throw in the caller's frame, so pop
358 * the trampoline frame and throw from the caller.
362 * The exception is in eax.
363 * We are calling the throw trampoline used by OP_THROW, so we have to setup the
364 * stack to look the same.
365 * The stack contains the ret addr, and the trampoline argument, the throw trampoline
366 * expects it to contain the ret addr and the exception. It also needs to be aligned
367 * after the exception is pushed.
370 x86_push_reg (code, X86_EAX);
371 /* Push the exception */
372 x86_push_reg (code, X86_EAX);
373 //x86_breakpoint (code);
374 /* Push the original return value */
375 x86_push_membase (code, X86_ESP, 3 * 4);
377 * EH is initialized after trampolines, so get the address of the variable
378 * which contains throw_exception, and load it from there.
381 /* Not really a jit icall */
382 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "throw_exception_addr");
384 x86_mov_reg_imm (code, X86_ECX, (guint8*)mono_get_throw_exception_addr ());
386 x86_mov_reg_membase (code, X86_ECX, X86_ECX, 0, sizeof(gpointer));
387 x86_jump_reg (code, X86_ECX);
390 mono_x86_patch (br_ex_check, code);
392 /* Restore registers */
393 for (i = X86_EAX; i <= X86_EDI; ++i) {
394 if (i == X86_ESP || i == X86_EBP)
396 if (i == X86_EAX && !((tramp_type == MONO_TRAMPOLINE_RESTORE_STACK_PROT) || (tramp_type == MONO_TRAMPOLINE_AOT_PLT)))
398 x86_mov_reg_membase (code, i, X86_EBP, regarray_offset + (i * 4), 4);
403 cfa_offset -= sizeof (mgreg_t);
404 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
405 mono_add_unwind_op_same_value (unwind_ops, code, buf, X86_EBP);
407 if (MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
408 /* Load the value returned by the trampoline */
409 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 0, 4);
410 /* The trampoline returns normally, pop the trampoline argument */
411 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
412 cfa_offset -= sizeof (mgreg_t);
413 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
416 /* The trampoline argument is at the top of the stack, and it contains the address we need to branch to */
417 if (tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
418 x86_pop_reg (code, X86_EAX);
419 cfa_offset -= sizeof (mgreg_t);
420 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
421 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 0x8);
422 x86_jump_reg (code, X86_EAX);
428 g_assert ((code - buf) <= 256);
429 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
431 tramp_name = mono_get_generic_trampoline_name (tramp_type);
432 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
438 #define TRAMPOLINE_SIZE 10
441 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
443 guint8 *code, *buf, *tramp;
445 tramp = mono_get_trampoline_code (tramp_type);
447 code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, 4);
449 x86_push_imm (buf, arg1);
450 x86_jump_code (buf, tramp);
451 g_assert ((buf - code) <= TRAMPOLINE_SIZE);
453 mono_arch_flush_icache (code, buf - code);
454 mono_profiler_code_buffer_new (code, buf - code, MONO_PROFILER_CODE_BUFFER_SPECIFIC_TRAMPOLINE, mono_get_generic_trampoline_simple_name (tramp_type));
457 *code_len = buf - code;
463 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
467 guint8 **rgctx_null_jumps;
472 MonoJumpInfo *ji = NULL;
473 GSList *unwind_ops = NULL;
475 unwind_ops = mono_arch_get_cie_program ();
477 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
478 index = MONO_RGCTX_SLOT_INDEX (slot);
480 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
481 for (depth = 0; ; ++depth) {
482 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
484 if (index < size - 1)
489 tramp_size = (aot ? 64 : 36) + 6 * depth;
491 code = buf = mono_global_codeman_reserve (tramp_size);
493 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
495 /* load vtable/mrgctx ptr */
496 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
498 /* load rgctx ptr from vtable */
499 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
500 /* is the rgctx ptr null? */
501 x86_test_reg_reg (code, X86_EAX, X86_EAX);
502 /* if yes, jump to actual trampoline */
503 rgctx_null_jumps [0] = code;
504 x86_branch8 (code, X86_CC_Z, -1, 1);
507 for (i = 0; i < depth; ++i) {
508 /* load ptr to next array */
509 if (mrgctx && i == 0)
510 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
512 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
513 /* is the ptr null? */
514 x86_test_reg_reg (code, X86_EAX, X86_EAX);
515 /* if yes, jump to actual trampoline */
516 rgctx_null_jumps [i + 1] = code;
517 x86_branch8 (code, X86_CC_Z, -1, 1);
521 x86_mov_reg_membase (code, X86_EAX, X86_EAX, sizeof (gpointer) * (index + 1), 4);
522 /* is the slot null? */
523 x86_test_reg_reg (code, X86_EAX, X86_EAX);
524 /* if yes, jump to actual trampoline */
525 rgctx_null_jumps [depth + 1] = code;
526 x86_branch8 (code, X86_CC_Z, -1, 1);
527 /* otherwise return */
530 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
531 x86_patch (rgctx_null_jumps [i], code);
533 g_free (rgctx_null_jumps);
535 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
538 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
539 x86_jump_reg (code, X86_EAX);
541 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
543 /* jump to the actual trampoline */
544 x86_jump_code (code, tramp);
547 mono_arch_flush_icache (buf, code - buf);
548 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
550 g_assert (code - buf <= tramp_size);
552 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
553 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
560 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
562 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
563 * the trampoline to call if the slot is not filled.
566 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
570 MonoJumpInfo *ji = NULL;
571 GSList *unwind_ops = NULL;
575 unwind_ops = mono_arch_get_cie_program ();
579 code = buf = mono_global_codeman_reserve (tramp_size);
581 // FIXME: Currently, we always go to the slow path.
583 /* Load trampoline addr */
584 x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_RGCTX_REG, 4, 4);
585 /* Load mrgctx/vtable */
586 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
588 x86_jump_reg (code, X86_EAX);
590 mono_arch_flush_icache (buf, code - buf);
591 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
593 g_assert (code - buf <= tramp_size);
595 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
601 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
603 /* FIXME: This is not thread safe */
604 guint8 *code = ji->code_start;
606 x86_push_imm (code, func_arg);
607 x86_call_code (code, (guint8*)func);
611 handler_block_trampoline_helper (void)
613 MonoJitTlsData *jit_tls = mono_tls_get_jit_tls ();
614 return jit_tls->handler_block_return_address;
618 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
620 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
623 MonoJumpInfo *ji = NULL;
625 GSList *unwind_ops = NULL;
629 code = buf = mono_global_codeman_reserve (tramp_size);
631 unwind_ops = mono_arch_get_cie_program ();
632 cfa_offset = sizeof (mgreg_t);
634 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
638 * We are in a method frame after the call emitted by OP_CALL_HANDLER.
641 /*Slow path uses a c helper*/
642 x86_call_code (code, handler_block_trampoline_helper);
643 /* Simulate a call */
644 /*Fix stack alignment*/
645 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x4);
646 cfa_offset += sizeof (mgreg_t);
647 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
649 /* This is the address the trampoline will return to */
650 x86_push_reg (code, X86_EAX);
651 cfa_offset += sizeof (mgreg_t);
652 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
654 /* Dummy trampoline argument, since we call the generic trampoline directly */
655 x86_push_imm (code, 0);
656 cfa_offset += sizeof (mgreg_t);
657 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
658 x86_jump_code (code, tramp);
660 mono_arch_flush_icache (buf, code - buf);
661 mono_profiler_code_buffer_new (buf, code - buf, MONO_PROFILER_CODE_BUFFER_HELPER, NULL);
662 g_assert (code - buf <= tramp_size);
664 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
670 mono_arch_get_call_target (guint8 *code)
672 if (code [-5] == 0xe8) {
673 gint32 disp = *(gint32*)(code - 4);
674 guint8 *target = code + disp;
683 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
685 return *(guint32*)(plt_entry + 6);
689 * mono_arch_get_gsharedvt_arg_trampoline:
691 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
694 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
696 guint8 *code, *start;
703 start = code = mono_domain_code_reserve (domain, buf_len);
705 unwind_ops = mono_arch_get_cie_program ();
707 x86_mov_reg_imm (code, X86_EAX, arg);
708 x86_jump_code (code, addr);
709 g_assert ((code - start) <= buf_len);
711 mono_arch_flush_icache (start, code - start);
712 mono_profiler_code_buffer_new (start, code - start, MONO_PROFILER_CODE_BUFFER_GENERICS_TRAMPOLINE, NULL);
714 mono_tramp_info_register (mono_tramp_info_create (NULL, start, code - start, NULL, unwind_ops), domain);
720 * mono_arch_create_sdb_trampoline:
722 * Return a trampoline which captures the current context, passes it to
723 * debugger_agent_single_step_from_context ()/debugger_agent_breakpoint_from_context (),
724 * then restores the (potentially changed) context.
727 mono_arch_create_sdb_trampoline (gboolean single_step, MonoTrampInfo **info, gboolean aot)
729 int tramp_size = 256;
730 int framesize, ctx_offset, cfa_offset;
732 GSList *unwind_ops = NULL;
733 MonoJumpInfo *ji = NULL;
735 code = buf = mono_global_codeman_reserve (tramp_size);
740 framesize += sizeof (mgreg_t);
742 framesize = ALIGN_TO (framesize, 8);
743 ctx_offset = framesize;
744 framesize += sizeof (MonoContext);
746 framesize = ALIGN_TO (framesize, MONO_ARCH_FRAME_ALIGNMENT);
750 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, 4);
751 // IP saved at CFA - 4
752 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_NREG, -cfa_offset);
754 x86_push_reg (code, X86_EBP);
755 cfa_offset += sizeof(mgreg_t);
756 mono_add_unwind_op_def_cfa_offset (unwind_ops, code, buf, cfa_offset);
757 mono_add_unwind_op_offset (unwind_ops, code, buf, X86_EBP, - cfa_offset);
759 x86_mov_reg_reg (code, X86_EBP, X86_ESP, sizeof(mgreg_t));
760 mono_add_unwind_op_def_cfa_reg (unwind_ops, code, buf, X86_EBP);
761 /* The + 8 makes the stack aligned */
762 x86_alu_reg_imm (code, X86_SUB, X86_ESP, framesize + 8);
764 /* Initialize a MonoContext structure on the stack */
765 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), X86_EAX, sizeof (mgreg_t));
766 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), X86_EBX, sizeof (mgreg_t));
767 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), X86_ECX, sizeof (mgreg_t));
768 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), X86_EDX, sizeof (mgreg_t));
769 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 0, sizeof (mgreg_t));
770 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), X86_EAX, sizeof (mgreg_t));
771 x86_mov_reg_reg (code, X86_EAX, X86_EBP, sizeof (mgreg_t));
772 x86_alu_reg_imm (code, X86_ADD, X86_EAX, cfa_offset);
773 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esp), X86_ESP, sizeof (mgreg_t));
774 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), X86_ESI, sizeof (mgreg_t));
775 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), X86_EDI, sizeof (mgreg_t));
776 x86_mov_reg_membase (code, X86_EAX, X86_EBP, 4, sizeof (mgreg_t));
777 x86_mov_membase_reg (code, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), X86_EAX, sizeof (mgreg_t));
779 /* Call the single step/breakpoint function in sdb */
780 x86_lea_membase (code, X86_EAX, X86_ESP, ctx_offset);
781 x86_mov_membase_reg (code, X86_ESP, 0, X86_EAX, sizeof (mgreg_t));
784 x86_breakpoint (code);
787 x86_call_code (code, debugger_agent_single_step_from_context);
789 x86_call_code (code, debugger_agent_breakpoint_from_context);
792 /* Restore registers from ctx */
793 /* Overwrite the saved ebp */
794 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebp), sizeof (mgreg_t));
795 x86_mov_membase_reg (code, X86_EBP, 0, X86_EAX, sizeof (mgreg_t));
796 /* Overwrite saved eip */
797 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eip), sizeof (mgreg_t));
798 x86_mov_membase_reg (code, X86_EBP, 4, X86_EAX, sizeof (mgreg_t));
799 x86_mov_reg_membase (code, X86_EAX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, eax), sizeof (mgreg_t));
800 x86_mov_reg_membase (code, X86_EBX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ebx), sizeof (mgreg_t));
801 x86_mov_reg_membase (code, X86_ECX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, ecx), sizeof (mgreg_t));
802 x86_mov_reg_membase (code, X86_EDX, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edx), sizeof (mgreg_t));
803 x86_mov_reg_membase (code, X86_ESI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, esi), sizeof (mgreg_t));
804 x86_mov_reg_membase (code, X86_EDI, X86_ESP, ctx_offset + G_STRUCT_OFFSET (MonoContext, edi), sizeof (mgreg_t));
807 cfa_offset -= sizeof (mgreg_t);
808 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, cfa_offset);
811 mono_arch_flush_icache (code, code - buf);
812 g_assert (code - buf <= tramp_size);
814 const char *tramp_name = single_step ? "sdb_single_step_trampoline" : "sdb_breakpoint_trampoline";
815 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);