2 * tramp-x86.c: JIT trampoline code for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/metadata-internals.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug.h>
18 #include <mono/metadata/mono-debug-debugger.h>
19 #include <mono/arch/x86/x86-codegen.h>
21 #ifdef HAVE_VALGRIND_MEMCHECK_H
22 #include <valgrind/memcheck.h>
28 static guint8* nullified_class_init_trampoline;
31 * mono_arch_get_unbox_trampoline:
33 * @addr: pointer to native code for @m
35 * when value type methods are called through the vtable we need to unbox the
36 * this argument. This method returns a pointer to a trampoline which does
37 * unboxing before calling the method
40 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
44 MonoDomain *domain = mono_domain_get ();
46 if (!mono_method_signature (m)->ret->byref && MONO_TYPE_ISSTRUCT (mono_method_signature (m)->ret))
49 mono_domain_lock (domain);
50 start = code = mono_code_manager_reserve (domain->code_mp, 16);
51 mono_domain_unlock (domain);
53 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
54 x86_jump_code (code, addr);
55 g_assert ((code - start) < 16);
61 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
65 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
68 if (mono_running_on_valgrind ())
71 /* go to the start of the call instruction
73 * address_byte = (m << 6) | (o << 3) | reg
74 * call opcode: 0xff address_byte displacement
80 if ((code [1] == 0xe8)) {
82 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
84 #ifdef HAVE_VALGRIND_MEMCHECK_H
85 /* Tell valgrind to recompile the patched code */
86 //VALGRIND_DISCARD_TRANSLATIONS (code + 2, code + 6);
89 } else if (code [1] == 0xe9) {
90 /* A PLT entry: jmp <DISP> */
92 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
94 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
95 code [4], code [5], code [6]);
96 g_assert_not_reached ();
101 mono_arch_patch_plt_entry (guint8 *code, guint8 *addr)
103 /* A PLT entry: jmp <DISP> */
104 g_assert (code [0] == 0xe9);
106 if (!mono_running_on_valgrind ())
107 InterlockedExchange ((gint32*)(code + 1), (guint)addr - (guint)code - 5);
111 mono_arch_nullify_class_init_trampoline (guint8 *code, gssize *regs)
114 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 6, buf, sizeof (buf));
120 if (code [0] == 0xe8) {
121 if (!mono_running_on_valgrind ()) {
124 * Thread safe code patching using the algorithm from the paper
125 * 'Practicing JUDO: Java Under Dynamic Optimizations'
128 * First atomically change the the first 2 bytes of the call to a
132 InterlockedExchange ((gint32*)code, ops);
134 /* Then change the other bytes to a nop */
139 /* Then atomically change the first 4 bytes to a nop as well */
141 InterlockedExchange ((gint32*)code, ops);
142 #ifdef HAVE_VALGRIND_MEMCHECK_H
143 /* FIXME: the calltree skin trips on the self modifying code above */
145 /* Tell valgrind to recompile the patched code */
146 //VALGRIND_DISCARD_TRANSLATIONS (code, code + 8);
149 } else if (code [0] == 0x90 || code [0] == 0xeb) {
150 /* Already changed by another thread */
152 } else if ((code [-1] == 0xff) && (x86_modrm_reg (code [0]) == 0x2)) {
153 /* call *<OFFSET>(<REG>) -> Call made from AOT code */
154 gpointer *vtable_slot;
156 vtable_slot = mono_arch_get_vcall_slot_addr (code + 5, (gpointer*)regs);
157 g_assert (vtable_slot);
159 *vtable_slot = nullified_class_init_trampoline;
161 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
162 code [4], code [5], code [6]);
163 g_assert_not_reached ();
168 mono_arch_nullify_plt_entry (guint8 *code)
170 if (!mono_running_on_valgrind ()) {
174 InterlockedExchange ((gint32*)code, ops);
176 /* Then change the other bytes to a nop */
181 /* Change the first byte to a nop */
183 InterlockedExchange ((gint32*)code, ops);
188 mono_arch_create_trampoline_code (MonoTrampolineType tramp_type)
190 guint8 *buf, *code, *tramp;
191 int pushed_args, pushed_args_caller_saved;
193 code = buf = mono_global_codeman_reserve (256);
195 /* Note that there is a single argument to the trampoline
196 * and it is stored at: esp + pushed_args * sizeof (gpointer)
197 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
200 /* If this is a generic class init the argument is not on the
201 * stack yet but in MONO_ARCH_VTABLE_REG. We first check
202 * whether the vtable is already initialized in which case we
203 * just return. Otherwise we push it and continue.
205 if (tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT) {
206 static int byte_offset = -1;
207 static guint8 bitmask;
212 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
214 x86_test_membase_imm (buf, MONO_ARCH_VTABLE_REG, byte_offset, bitmask);
216 x86_branch8 (buf, X86_CC_Z, -1, 1);
220 x86_patch (jump, buf);
221 x86_push_reg (buf, MONO_ARCH_VTABLE_REG);
224 /* Put all registers into an array on the stack
225 * If this code is changed, make sure to update the offset value in
226 * mono_arch_find_this_argument () in mini-x86.c.
228 x86_push_reg (buf, X86_EDI);
229 x86_push_reg (buf, X86_ESI);
230 x86_push_reg (buf, X86_EBP);
231 x86_push_reg (buf, X86_ESP);
232 x86_push_reg (buf, X86_EBX);
233 x86_push_reg (buf, X86_EDX);
234 x86_push_reg (buf, X86_ECX);
235 x86_push_reg (buf, X86_EAX);
237 pushed_args_caller_saved = pushed_args = 8;
239 /* Align stack on apple */
240 x86_alu_reg_imm (buf, X86_SUB, X86_ESP, 4);
246 /* save the IP (caller ip) */
247 if (tramp_type == MONO_TRAMPOLINE_JUMP)
248 x86_push_imm (buf, 0);
250 x86_push_membase (buf, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
254 x86_push_reg (buf, X86_EBP);
255 x86_push_reg (buf, X86_ESI);
256 x86_push_reg (buf, X86_EDI);
257 x86_push_reg (buf, X86_EBX);
262 x86_push_reg (buf, X86_ESP);
263 /* Adjust ESP so it points to the previous frame */
264 x86_alu_membase_imm (buf, X86_ADD, X86_ESP, 0, (pushed_args + 2) * 4);
268 /* save method info */
269 if ((tramp_type == MONO_TRAMPOLINE_GENERIC) || (tramp_type == MONO_TRAMPOLINE_JUMP))
270 x86_push_membase (buf, X86_ESP, pushed_args * sizeof (gpointer));
272 x86_push_imm (buf, 0);
276 /* On apple, the stack is correctly aligned to 16 bytes because pushed_args is
277 * 16 and there is the extra trampoline arg + the return ip pushed by call
278 * FIXME: Note that if an exception happens while some args are pushed
279 * on the stack, the stack will be misaligned.
281 g_assert (pushed_args == 16);
283 /* get the address of lmf for the current thread */
284 x86_call_code (buf, mono_get_lmf_addr);
286 x86_push_reg (buf, X86_EAX);
287 /* push *lfm (previous_lmf) */
288 x86_push_membase (buf, X86_EAX, 0);
289 /* Signal to mono_arch_find_jit_info () that this is a trampoline frame */
290 x86_alu_membase_imm (buf, X86_ADD, X86_ESP, 0, 1);
292 x86_mov_membase_reg (buf, X86_EAX, 0, X86_ESP, 4);
297 /* starting the call sequence */
299 /* FIXME: Push the trampoline address */
300 x86_push_imm (buf, 0);
304 /* push the method info */
305 x86_push_membase (buf, X86_ESP, pushed_args * sizeof (gpointer));
309 /* push the return address onto the stack */
310 if (tramp_type == MONO_TRAMPOLINE_JUMP)
311 x86_push_imm (buf, 0);
313 x86_push_membase (buf, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
315 /* push the address of the register array */
316 x86_lea_membase (buf, X86_EAX, X86_ESP, (pushed_args - 8) * sizeof (gpointer));
317 x86_push_reg (buf, X86_EAX);
322 /* check the stack is aligned after the ret ip is pushed */
323 /*x86_mov_reg_reg (buf, X86_EDX, X86_ESP, 4);
324 x86_alu_reg_imm (buf, X86_AND, X86_EDX, 15);
325 x86_alu_reg_imm (buf, X86_CMP, X86_EDX, 0);
326 x86_branch_disp (buf, X86_CC_Z, 3, FALSE);
327 x86_breakpoint (buf);*/
330 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
331 x86_call_code (buf, tramp);
333 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4*4);
337 /* Check for thread interruption */
338 /* This is not perf critical code so no need to check the interrupt flag */
339 x86_push_reg (buf, X86_EAX);
340 x86_call_code (buf, (guint8*)mono_thread_interruption_checkpoint);
341 x86_pop_reg (buf, X86_EAX);
345 /* ebx = previous_lmf */
346 x86_pop_reg (buf, X86_EBX);
348 x86_alu_reg_imm (buf, X86_SUB, X86_EBX, 1);
351 x86_pop_reg (buf, X86_EDI);
354 /* *(lmf) = previous_lmf */
355 x86_mov_membase_reg (buf, X86_EDI, 0, X86_EBX, 4);
357 /* discard method info */
358 x86_pop_reg (buf, X86_ESI);
362 x86_pop_reg (buf, X86_ESI);
365 /* restore caller saved regs */
366 x86_pop_reg (buf, X86_EBX);
367 x86_pop_reg (buf, X86_EDI);
368 x86_pop_reg (buf, X86_ESI);
369 x86_pop_reg (buf, X86_EBP);
373 /* discard save IP */
374 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 4);
377 /* restore LMF end */
379 /* Restore caller saved registers */
380 x86_mov_reg_membase (buf, X86_ECX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_ECX) * 4, 4);
381 x86_mov_reg_membase (buf, X86_EDX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_EDX) * 4, 4);
383 /* Pop saved reg array + stack align + method ptr */
384 x86_alu_reg_imm (buf, X86_ADD, X86_ESP, 10 * 4);
388 /* We've popped one more stack item than we've pushed (the
389 method ptr argument), so we must end up at -1. */
390 g_assert (pushed_args == -1);
392 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT ||
393 tramp_type == MONO_TRAMPOLINE_GENERIC_CLASS_INIT ||
394 tramp_type == MONO_TRAMPOLINE_RGCTX_LAZY_FETCH)
397 /* call the compiled method */
398 x86_jump_reg (buf, X86_EAX);
400 g_assert ((buf - code) <= 256);
402 if (tramp_type == MONO_TRAMPOLINE_CLASS_INIT) {
403 /* Initialize the nullified class init trampoline used in the AOT case */
404 nullified_class_init_trampoline = buf = mono_global_codeman_reserve (16);
411 #define TRAMPOLINE_SIZE 10
414 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
416 guint8 *code, *buf, *tramp;
418 tramp = mono_get_trampoline_code (tramp_type);
420 mono_domain_lock (domain);
421 code = buf = mono_code_manager_reserve_align (domain->code_mp, TRAMPOLINE_SIZE, 4);
422 mono_domain_unlock (domain);
424 x86_push_imm (buf, arg1);
425 x86_jump_code (buf, tramp);
426 g_assert ((buf - code) <= TRAMPOLINE_SIZE);
428 mono_arch_flush_icache (code, buf - code);
431 *code_len = buf - code;
437 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot)
439 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_RGCTX_LAZY_FETCH);
441 guint8 **rgctx_null_jumps;
449 for (depth = 0; ; ++depth) {
450 int size = mono_class_rgctx_get_array_size (depth);
452 if (index < size - 1)
457 tramp_size = 36 + 6 * depth;
459 code = buf = mono_global_codeman_reserve (tramp_size);
461 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
463 /* load vtable ptr */
464 x86_mov_reg_membase (buf, X86_EAX, X86_ESP, 4, 4);
465 /* load rgctx ptr from vtable */
466 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
467 /* is the rgctx ptr null? */
468 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
469 /* if yes, jump to actual trampoline */
470 rgctx_null_jumps [0] = buf;
471 x86_branch8 (buf, X86_CC_Z, -1, 1);
473 for (i = 0; i < depth; ++i) {
474 /* load ptr to next array */
475 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, 0, 4);
476 /* is the ptr null? */
477 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
478 /* if yes, jump to actual trampoline */
479 rgctx_null_jumps [i + 1] = buf;
480 x86_branch8 (buf, X86_CC_Z, -1, 1);
484 x86_mov_reg_membase (buf, X86_EAX, X86_EAX, sizeof (gpointer) * (index + 1), 4);
485 /* is the slot null? */
486 x86_test_reg_reg (buf, X86_EAX, X86_EAX);
487 /* if yes, jump to actual trampoline */
488 rgctx_null_jumps [depth + 1] = buf;
489 x86_branch8 (buf, X86_CC_Z, -1, 1);
490 /* otherwise return */
493 for (i = 0; i <= depth + 1; ++i)
494 x86_patch (rgctx_null_jumps [i], buf);
496 g_free (rgctx_null_jumps);
499 * our stack looks like this (tos on top):
505 * the trampoline code expects it to look like this:
511 * whereas our caller expects to still have one argument on
512 * the stack when we return, so we transform the stack into
520 * which actually only requires us to push the vtable ptr, and
521 * the "old" vtable ptr becomes the dummy.
524 x86_push_membase (buf, X86_ESP, 4);
526 x86_mov_reg_imm (buf, X86_EAX, slot);
527 x86_jump_code (buf, tramp);
529 mono_arch_flush_icache (code, buf - code);
531 g_assert (buf - code <= tramp_size);
537 mono_arch_get_rgctx_lazy_fetch_offset (gpointer *regs)
539 return (guint32)(regs [X86_EAX]);
543 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
545 /* FIXME: This is not thread safe */
546 guint8 *code = ji->code_start;
548 x86_push_imm (code, func_arg);
549 x86_call_code (code, (guint8*)func);