2 * tramp-x86.c: JIT trampoline code for x86
5 * Dietmar Maurer (dietmar@ximian.com)
7 * (C) 2001 Ximian, Inc.
13 #include <mono/metadata/appdomain.h>
14 #include <mono/metadata/metadata-internals.h>
15 #include <mono/metadata/marshal.h>
16 #include <mono/metadata/tabledefs.h>
17 #include <mono/metadata/mono-debug.h>
18 #include <mono/metadata/mono-debug-debugger.h>
19 #include <mono/metadata/monitor.h>
20 #include <mono/metadata/gc-internal.h>
21 #include <mono/arch/x86/x86-codegen.h>
23 #include <mono/utils/memcheck.h>
29 * mono_arch_get_unbox_trampoline:
31 * @addr: pointer to native code for @m
33 * when value type methods are called through the vtable we need to unbox the
34 * this argument. This method returns a pointer to a trampoline which does
35 * unboxing before calling the method
38 mono_arch_get_unbox_trampoline (MonoMethod *m, gpointer addr)
41 int this_pos = 4, size = NACL_SIZE(16, 32);
42 MonoDomain *domain = mono_domain_get ();
44 start = code = mono_domain_code_reserve (domain, size);
46 x86_alu_membase_imm (code, X86_ADD, X86_ESP, this_pos, sizeof (MonoObject));
47 x86_jump_code (code, addr);
48 g_assert ((code - start) < size);
50 nacl_domain_code_validate (domain, &start, size, &code);
56 mono_arch_get_static_rgctx_trampoline (MonoMethod *m, MonoMethodRuntimeGenericContext *mrgctx, gpointer addr)
61 MonoDomain *domain = mono_domain_get ();
63 buf_len = NACL_SIZE (10, 32);
65 start = code = mono_domain_code_reserve (domain, buf_len);
67 x86_mov_reg_imm (code, MONO_ARCH_RGCTX_REG, mrgctx);
68 x86_jump_code (code, addr);
69 g_assert ((code - start) <= buf_len);
71 nacl_domain_code_validate (domain, &start, buf_len, &code);
72 mono_arch_flush_icache (start, code - start);
78 mono_arch_get_llvm_imt_trampoline (MonoDomain *domain, MonoMethod *m, int vt_offset)
86 start = code = mono_domain_code_reserve (domain, buf_len);
88 this_offset = mono_x86_get_this_arg_offset (NULL, mono_method_signature (m));
91 x86_mov_reg_imm (code, MONO_ARCH_IMT_REG, m);
93 x86_mov_reg_membase (code, X86_EAX, X86_ESP, this_offset + 4, 4);
94 /* Load vtable address */
95 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
96 x86_jump_membase (code, X86_EAX, vt_offset);
98 g_assert ((code - start) < buf_len);
100 nacl_domain_code_validate (domain, &start, buf_len, &code);
102 mono_arch_flush_icache (start, code - start);
108 mono_arch_patch_callsite (guint8 *method_start, guint8 *orig_code, guint8 *addr)
110 #if defined(__default_codegen__)
113 gboolean can_write = mono_breakpoint_clean_code (method_start, orig_code, 8, buf, sizeof (buf));
117 /* go to the start of the call instruction
119 * address_byte = (m << 6) | (o << 3) | reg
120 * call opcode: 0xff address_byte displacement
126 if (code [1] == 0xe8) {
128 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
130 /* Tell valgrind to recompile the patched code */
131 VALGRIND_DISCARD_TRANSLATIONS (orig_code + 2, 4);
133 } else if (code [1] == 0xe9) {
134 /* A PLT entry: jmp <DISP> */
136 InterlockedExchange ((gint32*)(orig_code + 2), (guint)addr - ((guint)orig_code + 1) - 5);
138 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
139 code [4], code [5], code [6]);
140 g_assert_not_reached ();
142 #elif defined(__native_client__)
143 /* Target must be bundle-aligned */
144 g_assert (((guint32)addr & kNaClAlignmentMask) == 0);
146 /* 0xe8 = call <DISP>, 0xe9 = jump <DISP> */
147 if ((orig_code [-5] == 0xe8) || orig_code [-6] == 0xe9) {
149 gint32 offset = (gint32)addr - (gint32)orig_code;
150 guint8 buf[sizeof(gint32)];
151 *((gint32*)(buf)) = offset;
152 ret = nacl_dyncode_modify (orig_code - sizeof(gint32), buf, sizeof(gint32));
155 printf ("Invalid trampoline sequence %p: %02x %02x %02x %02x %02x\n", orig_code, orig_code [-5], orig_code [-4], orig_code [-3], orig_code [-2], orig_code[-1]);
156 g_assert_not_reached ();
162 mono_arch_patch_plt_entry (guint8 *code, gpointer *got, mgreg_t *regs, guint8 *addr)
166 /* Patch the jump table entry used by the plt entry */
168 #if defined(__native_client_codegen__) || defined(__native_client__)
169 /* for both compiler and runtime */
171 /* mov <DISP>(%ebx), %ecx */
172 /* and 0xffffffe0, %ecx */
174 g_assert (code [0] == 0x8b);
175 g_assert (code [1] == 0x8b);
177 offset = *(guint32*)(code + 2);
178 #elif defined(__default_codegen__)
179 /* A PLT entry: jmp *<DISP>(%ebx) */
180 g_assert (code [0] == 0xff);
181 g_assert (code [1] == 0xa3);
183 offset = *(guint32*)(code + 2);
184 #endif /* __native_client_codegen__ */
186 got = (gpointer*)(gsize) regs [MONO_ARCH_GOT_REG];
187 *(guint8**)((guint8*)got + offset) = addr;
191 get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
193 const int kBufSize = NACL_SIZE (8, 16);
198 mono_breakpoint_clean_code (NULL, code, kBufSize, buf, kBufSize);
203 if ((code [0] == 0xff) && ((code [1] & 0x18) == 0x10) && ((code [1] >> 6) == 2)) {
204 reg = code [1] & 0x07;
205 disp = *((gint32*)(code + 2));
206 #if defined(__native_client_codegen__) || defined(__native_client__)
207 } else if ((code[1] == 0x83) && (code[2] == 0xe1) && (code[4] == 0xff) &&
208 (code[5] == 0xd1) && (code[-5] == 0x8b)) {
209 disp = *((gint32*)(code - 3));
210 reg = code[-4] & 0x07;
211 } else if ((code[-2] == 0x8b) && (code[1] == 0x83) && (code[4] == 0xff)) {
212 reg = code[-1] & 0x07;
213 disp = (signed char)code[0];
216 g_assert_not_reached ();
220 *displacement = disp;
221 return (gpointer)regs [reg];
225 get_vcall_slot_addr (guint8* code, mgreg_t *regs)
229 vt = get_vcall_slot (code, regs, &displacement);
232 return (gpointer*)((char*)vt + displacement);
236 mono_arch_nullify_class_init_trampoline (guint8 *code, mgreg_t *regs)
239 gboolean can_write = mono_breakpoint_clean_code (NULL, code, 6, buf, sizeof (buf));
240 gpointer tramp = mini_get_nullified_class_init_trampoline ();
246 if (code [0] == 0xe8) {
247 #if defined(__default_codegen__)
248 if (!mono_running_on_valgrind ()) {
251 * Thread safe code patching using the algorithm from the paper
252 * 'Practicing JUDO: Java Under Dynamic Optimizations'
255 * First atomically change the the first 2 bytes of the call to a
259 InterlockedExchange ((gint32*)code, ops);
261 /* Then change the other bytes to a nop */
266 /* Then atomically change the first 4 bytes to a nop as well */
268 InterlockedExchange ((gint32*)code, ops);
269 /* FIXME: the calltree skin trips on the self modifying code above */
271 /* Tell valgrind to recompile the patched code */
272 //VALGRIND_DISCARD_TRANSLATIONS (code, 8);
274 #elif defined(__native_client_codegen__)
275 mono_arch_patch_callsite (code, code + 5, tramp);
277 } else if (code [0] == 0x90 || code [0] == 0xeb) {
278 /* Already changed by another thread */
280 } else if ((code [-1] == 0xff) && (x86_modrm_reg (code [0]) == 0x2)) {
281 /* call *<OFFSET>(<REG>) -> Call made from AOT code */
282 gpointer *vtable_slot;
284 vtable_slot = get_vcall_slot_addr (code + 5, regs);
285 g_assert (vtable_slot);
287 *vtable_slot = tramp;
289 printf ("Invalid trampoline sequence: %x %x %x %x %x %x %x\n", code [0], code [1], code [2], code [3],
290 code [4], code [5], code [6]);
291 g_assert_not_reached ();
296 mono_arch_create_generic_trampoline (MonoTrampolineType tramp_type, MonoTrampInfo **info, gboolean aot)
299 guint8 *buf, *code, *tramp;
300 int pushed_args, pushed_args_caller_saved;
301 GSList *unwind_ops = NULL;
302 MonoJumpInfo *ji = NULL;
304 unwind_ops = mono_arch_get_cie_program ();
306 code = buf = mono_global_codeman_reserve (256);
308 /* Note that there is a single argument to the trampoline
309 * and it is stored at: esp + pushed_args * sizeof (gpointer)
310 * the ret address is at: esp + (pushed_args + 1) * sizeof (gpointer)
313 /* Put all registers into an array on the stack
314 * If this code is changed, make sure to update the offset value in
315 * mono_arch_get_this_arg_from_call () in mini-x86.c.
317 x86_push_reg (code, X86_EDI);
318 x86_push_reg (code, X86_ESI);
319 x86_push_reg (code, X86_EBP);
320 x86_push_reg (code, X86_ESP);
321 x86_push_reg (code, X86_EBX);
322 x86_push_reg (code, X86_EDX);
323 x86_push_reg (code, X86_ECX);
324 x86_push_reg (code, X86_EAX);
326 pushed_args_caller_saved = pushed_args = 8;
328 /* Align stack on apple */
329 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 4);
335 /* save the IP (caller ip) */
336 if (tramp_type == MONO_TRAMPOLINE_JUMP)
337 x86_push_imm (code, 0);
339 x86_push_membase (code, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
343 x86_push_reg (code, X86_EBP);
344 x86_push_reg (code, X86_ESI);
345 x86_push_reg (code, X86_EDI);
346 x86_push_reg (code, X86_EBX);
351 x86_push_reg (code, X86_ESP);
352 /* Adjust ESP so it points to the previous frame */
353 x86_alu_membase_imm (code, X86_ADD, X86_ESP, 0, (pushed_args + 2) * 4);
357 /* save method info */
358 if ((tramp_type == MONO_TRAMPOLINE_JIT) || (tramp_type == MONO_TRAMPOLINE_JUMP))
359 x86_push_membase (code, X86_ESP, pushed_args * sizeof (gpointer));
361 x86_push_imm (code, 0);
365 /* On apple, the stack is correctly aligned to 16 bytes because pushed_args is
366 * 16 and there is the extra trampoline arg + the return ip pushed by call
367 * FIXME: Note that if an exception happens while some args are pushed
368 * on the stack, the stack will be misaligned.
370 g_assert (pushed_args == 16);
372 /* get the address of lmf for the current thread */
374 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_get_lmf_addr");
375 x86_call_reg (code, X86_EAX);
377 x86_call_code (code, mono_get_lmf_addr);
380 x86_push_reg (code, X86_EAX);
381 /* push *lfm (previous_lmf) */
382 x86_push_membase (code, X86_EAX, 0);
383 /* Signal to mono_arch_find_jit_info () that this is a trampoline frame */
384 x86_alu_membase_imm (code, X86_ADD, X86_ESP, 0, 1);
386 x86_mov_membase_reg (code, X86_EAX, 0, X86_ESP, 4);
391 /* starting the call sequence */
393 /* FIXME: Push the trampoline address */
394 x86_push_imm (code, 0);
398 /* push the method info */
399 x86_push_membase (code, X86_ESP, pushed_args * sizeof (gpointer));
403 /* push the return address onto the stack */
404 if (tramp_type == MONO_TRAMPOLINE_JUMP)
405 x86_push_imm (code, 0);
407 x86_push_membase (code, X86_ESP, (pushed_args + 1) * sizeof (gpointer));
409 /* push the address of the register array */
410 x86_lea_membase (code, X86_EAX, X86_ESP, (pushed_args - 8) * sizeof (gpointer));
411 x86_push_reg (code, X86_EAX);
416 /* check the stack is aligned after the ret ip is pushed */
417 /*x86_mov_reg_reg (buf, X86_EDX, X86_ESP, 4);
418 x86_alu_reg_imm (buf, X86_AND, X86_EDX, 15);
419 x86_alu_reg_imm (buf, X86_CMP, X86_EDX, 0);
420 x86_branch_disp (buf, X86_CC_Z, 3, FALSE);
421 x86_breakpoint (buf);*/
424 mono_add_unwind_op_def_cfa (unwind_ops, code, buf, X86_ESP, ((pushed_args + 2) * 4));
427 char *icall_name = g_strdup_printf ("trampoline_func_%d", tramp_type);
428 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, icall_name);
429 x86_call_reg (code, X86_EAX);
431 tramp = (guint8*)mono_get_trampoline_func (tramp_type);
432 x86_call_code (code, tramp);
435 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4*4);
439 /* Check for thread interruption */
440 /* This is not perf critical code so no need to check the interrupt flag */
441 /* Align the stack on osx */
442 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 3 * 4);
443 x86_push_reg (code, X86_EAX);
445 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "mono_thread_force_interruption_checkpoint");
446 x86_call_reg (code, X86_EAX);
448 x86_call_code (code, (guint8*)mono_thread_force_interruption_checkpoint);
450 x86_pop_reg (code, X86_EAX);
451 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 3 * 4);
455 /* ebx = previous_lmf */
456 x86_pop_reg (code, X86_EBX);
458 x86_alu_reg_imm (code, X86_SUB, X86_EBX, 1);
461 x86_pop_reg (code, X86_EDI);
464 /* *(lmf) = previous_lmf */
465 x86_mov_membase_reg (code, X86_EDI, 0, X86_EBX, 4);
467 /* discard method info */
468 x86_pop_reg (code, X86_ESI);
472 x86_pop_reg (code, X86_ESI);
475 /* restore caller saved regs */
476 x86_pop_reg (code, X86_EBX);
477 x86_pop_reg (code, X86_EDI);
478 x86_pop_reg (code, X86_ESI);
479 x86_pop_reg (code, X86_EBP);
483 /* discard save IP */
484 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 4);
487 /* restore LMF end */
489 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
491 * Overwrite the method ptr with the address we need to jump to,
494 x86_mov_membase_reg (code, X86_ESP, pushed_args * sizeof (gpointer), X86_EAX, 4);
497 /* Restore caller saved registers */
498 x86_mov_reg_membase (code, X86_ECX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_ECX) * 4, 4);
499 x86_mov_reg_membase (code, X86_EDX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_EDX) * 4, 4);
500 if ((tramp_type == MONO_TRAMPOLINE_RESTORE_STACK_PROT) || (tramp_type == MONO_TRAMPOLINE_AOT_PLT))
501 x86_mov_reg_membase (code, X86_EAX, X86_ESP, (pushed_args - pushed_args_caller_saved + X86_EAX) * 4, 4);
503 if (!MONO_TRAMPOLINE_TYPE_MUST_RETURN (tramp_type)) {
504 /* Pop saved reg array + stack align */
505 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 9 * 4);
507 g_assert (pushed_args == 0);
509 /* Pop saved reg array + stack align + method ptr */
510 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 10 * 4);
513 /* We've popped one more stack item than we've pushed (the
514 method ptr argument), so we must end up at -1. */
515 g_assert (pushed_args == -1);
518 /*block guard trampolines are called with the stack aligned but must exit with the stack unaligned. */
519 if (tramp_type == MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD) {
520 x86_pop_reg (code, X86_EAX);
521 x86_alu_reg_imm (code, X86_ADD, X86_ESP, 0x8);
522 x86_jump_reg (code, X86_EAX);
527 nacl_global_codeman_validate (&buf, 256, &code);
528 g_assert ((code - buf) <= 256);
531 tramp_name = mono_get_generic_trampoline_name (tramp_type);
532 *info = mono_tramp_info_create (tramp_name, buf, code - buf, ji, unwind_ops);
540 mono_arch_get_nullified_class_init_trampoline (MonoTrampInfo **info)
543 int tramp_size = NACL_SIZE (16, kNaClAlignment);
545 code = buf = mono_global_codeman_reserve (tramp_size);
548 nacl_global_codeman_validate (&buf, tramp_size, &code);
550 mono_arch_flush_icache (buf, code - buf);
553 *info = mono_tramp_info_create ("nullified_class_init_trampoline", buf, code - buf, NULL, NULL);
558 #define TRAMPOLINE_SIZE 10
561 mono_arch_create_specific_trampoline (gpointer arg1, MonoTrampolineType tramp_type, MonoDomain *domain, guint32 *code_len)
563 guint8 *code, *buf, *tramp;
565 tramp = mono_get_trampoline_code (tramp_type);
567 code = buf = mono_domain_code_reserve_align (domain, TRAMPOLINE_SIZE, NACL_SIZE (4, kNaClAlignment));
569 x86_push_imm (buf, arg1);
570 x86_jump_code (buf, tramp);
571 g_assert ((buf - code) <= TRAMPOLINE_SIZE);
573 nacl_domain_code_validate (domain, &code, NACL_SIZE (4, kNaClAlignment), &buf);
575 mono_arch_flush_icache (code, buf - code);
578 *code_len = buf - code;
584 mono_arch_create_rgctx_lazy_fetch_trampoline (guint32 slot, MonoTrampInfo **info, gboolean aot)
588 guint8 **rgctx_null_jumps;
593 MonoJumpInfo *ji = NULL;
594 GSList *unwind_ops = NULL;
596 unwind_ops = mono_arch_get_cie_program ();
598 mrgctx = MONO_RGCTX_SLOT_IS_MRGCTX (slot);
599 index = MONO_RGCTX_SLOT_INDEX (slot);
601 index += MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT / sizeof (gpointer);
602 for (depth = 0; ; ++depth) {
603 int size = mono_class_rgctx_get_array_size (depth, mrgctx);
605 if (index < size - 1)
610 #if defined(__default_codegen__)
611 tramp_size = (aot ? 64 : 36) + 6 * depth;
612 #elif defined(__native_client_codegen__)
613 tramp_size = (aot ? 64 : 36) + 2 * kNaClAlignment +
614 6 * (depth + kNaClAlignment);
617 code = buf = mono_global_codeman_reserve (tramp_size);
619 rgctx_null_jumps = g_malloc (sizeof (guint8*) * (depth + 2));
621 /* load vtable/mrgctx ptr */
622 x86_mov_reg_membase (code, X86_EAX, X86_ESP, 4, 4);
624 /* load rgctx ptr from vtable */
625 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoVTable, runtime_generic_context), 4);
626 /* is the rgctx ptr null? */
627 x86_test_reg_reg (code, X86_EAX, X86_EAX);
628 /* if yes, jump to actual trampoline */
629 rgctx_null_jumps [0] = code;
630 x86_branch8 (code, X86_CC_Z, -1, 1);
633 for (i = 0; i < depth; ++i) {
634 /* load ptr to next array */
635 if (mrgctx && i == 0)
636 x86_mov_reg_membase (code, X86_EAX, X86_EAX, MONO_SIZEOF_METHOD_RUNTIME_GENERIC_CONTEXT, 4);
638 x86_mov_reg_membase (code, X86_EAX, X86_EAX, 0, 4);
639 /* is the ptr null? */
640 x86_test_reg_reg (code, X86_EAX, X86_EAX);
641 /* if yes, jump to actual trampoline */
642 rgctx_null_jumps [i + 1] = code;
643 x86_branch8 (code, X86_CC_Z, -1, 1);
647 x86_mov_reg_membase (code, X86_EAX, X86_EAX, sizeof (gpointer) * (index + 1), 4);
648 /* is the slot null? */
649 x86_test_reg_reg (code, X86_EAX, X86_EAX);
650 /* if yes, jump to actual trampoline */
651 rgctx_null_jumps [depth + 1] = code;
652 x86_branch8 (code, X86_CC_Z, -1, 1);
653 /* otherwise return */
656 for (i = mrgctx ? 1 : 0; i <= depth + 1; ++i)
657 x86_patch (rgctx_null_jumps [i], code);
659 g_free (rgctx_null_jumps);
661 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
664 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
665 x86_jump_reg (code, X86_EAX);
667 tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
669 /* jump to the actual trampoline */
670 x86_jump_code (code, tramp);
673 nacl_global_codeman_validate (&buf, tramp_size, &code);
674 mono_arch_flush_icache (buf, code - buf);
676 g_assert (code - buf <= tramp_size);
679 char *name = mono_get_rgctx_fetch_trampoline_name (slot);
680 *info = mono_tramp_info_create (name, buf, code - buf, ji, unwind_ops);
688 * mono_arch_create_general_rgctx_lazy_fetch_trampoline:
690 * This is a general variant of the rgctx fetch trampolines. It receives a pointer to gpointer[2] in the rgctx reg. The first entry contains the slot, the second
691 * the trampoline to call if the slot is not filled.
694 mono_arch_create_general_rgctx_lazy_fetch_trampoline (MonoTrampInfo **info, gboolean aot)
698 MonoJumpInfo *ji = NULL;
699 GSList *unwind_ops = NULL;
703 unwind_ops = mono_arch_get_cie_program ();
707 code = buf = mono_global_codeman_reserve (tramp_size);
709 // FIXME: Currently, we always go to the slow path.
711 /* Load trampoline addr */
712 x86_mov_reg_membase (code, X86_EAX, MONO_ARCH_RGCTX_REG, 4, 4);
713 /* Load mrgctx/vtable */
714 x86_mov_reg_membase (code, MONO_ARCH_VTABLE_REG, X86_ESP, 4, 4);
716 x86_jump_reg (code, X86_EAX);
718 nacl_global_codeman_validate (&buf, tramp_size, &code);
719 mono_arch_flush_icache (buf, code - buf);
721 g_assert (code - buf <= tramp_size);
724 *info = mono_tramp_info_create ("rgctx_fetch_trampoline_general", buf, code - buf, ji, unwind_ops);
730 mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
734 static int byte_offset = -1;
735 static guint8 bitmask;
738 GSList *unwind_ops = NULL;
739 MonoJumpInfo *ji = NULL;
743 code = buf = mono_global_codeman_reserve (tramp_size);
745 unwind_ops = mono_arch_get_cie_program ();
748 mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
750 x86_test_membase_imm (code, MONO_ARCH_VTABLE_REG, byte_offset, bitmask);
752 x86_branch8 (code, X86_CC_Z, -1, 1);
756 x86_patch (jump, code);
758 /* Push the vtable so the stack is the same as in a specific trampoline */
759 x86_push_reg (code, MONO_ARCH_VTABLE_REG);
762 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "generic_trampoline_generic_class_init");
763 x86_jump_reg (code, X86_EAX);
765 tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_GENERIC_CLASS_INIT);
767 /* jump to the actual trampoline */
768 x86_jump_code (code, tramp);
771 mono_arch_flush_icache (code, code - buf);
773 g_assert (code - buf <= tramp_size);
774 #ifdef __native_client_codegen__
775 g_assert (code - buf <= kNaClAlignment);
778 nacl_global_codeman_validate (&buf, tramp_size, &code);
781 *info = mono_tramp_info_create ("generic_class_init_trampoline", buf, code - buf, ji, unwind_ops);
786 #ifdef MONO_ARCH_MONITOR_OBJECT_REG
788 * The code produced by this trampoline is equivalent to this:
791 * if (obj->synchronisation) {
792 * if (obj->synchronisation->owner == 0) {
793 * if (cmpxch (&obj->synchronisation->owner, TID, 0) == 0)
796 * if (obj->synchronisation->owner == TID) {
797 * ++obj->synchronisation->nest;
802 * return full_monitor_enter ();
806 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
808 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_ENTER);
810 guint8 *jump_obj_null, *jump_sync_null, *jump_other_owner, *jump_cmpxchg_failed, *jump_tid, *jump_sync_thin_hash = NULL;
812 int owner_offset, nest_offset, dummy;
813 MonoJumpInfo *ji = NULL;
814 GSList *unwind_ops = NULL;
816 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == X86_EAX);
818 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
819 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
820 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
821 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
822 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
824 tramp_size = NACL_SIZE (96, 128);
826 code = buf = mono_global_codeman_reserve (tramp_size);
828 if (mono_thread_get_tls_offset () != -1) {
829 /* MonoObject* obj is in EAX */
831 x86_test_reg_reg (code, X86_EAX, X86_EAX);
832 /* if yes, jump to actual trampoline */
833 jump_obj_null = code;
834 x86_branch8 (code, X86_CC_Z, -1, 1);
836 /* load obj->synchronization to ECX */
837 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoObject, synchronisation), 4);
839 if (mono_gc_is_moving ()) {
840 /*if bit zero is set it's a thin hash*/
841 /*FIXME use testb encoding*/
842 x86_test_reg_imm (code, X86_ECX, 0x01);
843 jump_sync_thin_hash = code;
844 x86_branch8 (code, X86_CC_NE, -1, 1);
846 /*clear bits used by the gc*/
847 x86_alu_reg_imm (code, X86_AND, X86_ECX, ~0x3);
850 /* is synchronization null? */
851 x86_test_reg_reg (code, X86_ECX, X86_ECX);
853 /* if yes, jump to actual trampoline */
854 jump_sync_null = code;
855 x86_branch8 (code, X86_CC_Z, -1, 1);
857 /* load MonoInternalThread* into EDX */
859 /* load_aotconst () puts the result into EAX */
860 x86_mov_reg_reg (code, X86_EDX, X86_EAX, sizeof (mgreg_t));
861 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_TLS_OFFSET, GINT_TO_POINTER (TLS_KEY_THREAD));
862 code = mono_x86_emit_tls_get_reg (code, X86_EAX, X86_EAX);
863 x86_xchg_reg_reg (code, X86_EAX, X86_EDX, sizeof (mgreg_t));
865 code = mono_x86_emit_tls_get (code, X86_EDX, mono_thread_get_tls_offset ());
867 /* load TID into EDX */
868 x86_mov_reg_membase (code, X86_EDX, X86_EDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 4);
870 /* is synchronization->owner null? */
871 x86_alu_membase_imm (code, X86_CMP, X86_ECX, owner_offset, 0);
872 /* if not, jump to next case */
874 x86_branch8 (code, X86_CC_NZ, -1, 1);
876 /* if yes, try a compare-exchange with the TID */
877 /* free up register EAX, needed for the zero */
878 x86_push_reg (code, X86_EAX);
880 x86_alu_reg_reg (code, X86_XOR, X86_EAX, X86_EAX);
881 /* compare and exchange */
882 x86_prefix (code, X86_LOCK_PREFIX);
883 x86_cmpxchg_membase_reg (code, X86_ECX, owner_offset, X86_EDX);
884 /* if not successful, jump to actual trampoline */
885 jump_cmpxchg_failed = code;
886 x86_branch8 (code, X86_CC_NZ, -1, 1);
887 /* if successful, pop and return */
888 x86_pop_reg (code, X86_EAX);
891 /* next case: synchronization->owner is not null */
892 x86_patch (jump_tid, code);
893 /* is synchronization->owner == TID? */
894 x86_alu_membase_reg (code, X86_CMP, X86_ECX, owner_offset, X86_EDX);
895 /* if not, jump to actual trampoline */
896 jump_other_owner = code;
897 x86_branch8 (code, X86_CC_NZ, -1, 1);
898 /* if yes, increment nest */
899 x86_inc_membase (code, X86_ECX, nest_offset);
904 x86_patch (jump_obj_null, code);
905 if (jump_sync_thin_hash)
906 x86_patch (jump_sync_thin_hash, code);
907 x86_patch (jump_sync_null, code);
908 x86_patch (jump_other_owner, code);
909 x86_push_reg (code, X86_EAX);
910 /* jump to the actual trampoline */
911 x86_patch (jump_cmpxchg_failed, code);
913 /* We are calling the generic trampoline directly, the argument is pushed
914 * on the stack just like a specific trampoline.
916 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "generic_trampoline_monitor_enter");
917 x86_jump_reg (code, X86_EAX);
919 x86_jump_code (code, tramp);
922 /* push obj and jump to the actual trampoline */
923 x86_push_reg (code, X86_EAX);
925 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "generic_trampoline_monitor_enter");
926 x86_jump_reg (code, X86_EAX);
928 x86_jump_code (code, tramp);
932 mono_arch_flush_icache (buf, code - buf);
933 g_assert (code - buf <= tramp_size);
935 nacl_global_codeman_validate (&buf, tramp_size, &code);
938 *info = mono_tramp_info_create ("monitor_enter_trampoline", buf, code - buf, ji, unwind_ops);
944 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
946 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_MONITOR_EXIT);
948 guint8 *jump_obj_null, *jump_have_waiters, *jump_sync_null, *jump_not_owned, *jump_sync_thin_hash = NULL;
951 int owner_offset, nest_offset, entry_count_offset;
952 MonoJumpInfo *ji = NULL;
953 GSList *unwind_ops = NULL;
955 g_assert (MONO_ARCH_MONITOR_OBJECT_REG == X86_EAX);
957 mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &entry_count_offset);
958 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
959 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
960 g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (entry_count_offset) == sizeof (gint32));
961 owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
962 nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
963 entry_count_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (entry_count_offset);
965 tramp_size = NACL_SIZE (96, 128);
967 code = buf = mono_global_codeman_reserve (tramp_size);
969 if (mono_thread_get_tls_offset () != -1) {
970 /* MonoObject* obj is in EAX */
972 x86_test_reg_reg (code, X86_EAX, X86_EAX);
973 /* if yes, jump to actual trampoline */
974 jump_obj_null = code;
975 x86_branch8 (code, X86_CC_Z, -1, 1);
977 /* load obj->synchronization to ECX */
978 x86_mov_reg_membase (code, X86_ECX, X86_EAX, G_STRUCT_OFFSET (MonoObject, synchronisation), 4);
980 if (mono_gc_is_moving ()) {
981 /*if bit zero is set it's a thin hash*/
982 /*FIXME use testb encoding*/
983 x86_test_reg_imm (code, X86_ECX, 0x01);
984 jump_sync_thin_hash = code;
985 x86_branch8 (code, X86_CC_NE, -1, 1);
987 /*clear bits used by the gc*/
988 x86_alu_reg_imm (code, X86_AND, X86_ECX, ~0x3);
991 /* is synchronization null? */
992 x86_test_reg_reg (code, X86_ECX, X86_ECX);
993 /* if yes, jump to actual trampoline */
994 jump_sync_null = code;
995 x86_branch8 (code, X86_CC_Z, -1, 1);
997 /* next case: synchronization is not null */
998 /* load MonoInternalThread* into EDX */
1000 /* load_aotconst () puts the result into EAX */
1001 x86_mov_reg_reg (code, X86_EDX, X86_EAX, sizeof (mgreg_t));
1002 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_TLS_OFFSET, GINT_TO_POINTER (TLS_KEY_THREAD));
1003 code = mono_x86_emit_tls_get_reg (code, X86_EAX, X86_EAX);
1004 x86_xchg_reg_reg (code, X86_EAX, X86_EDX, sizeof (mgreg_t));
1006 code = mono_x86_emit_tls_get (code, X86_EDX, mono_thread_get_tls_offset ());
1008 /* load TID into EDX */
1009 x86_mov_reg_membase (code, X86_EDX, X86_EDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 4);
1010 /* is synchronization->owner == TID */
1011 x86_alu_membase_reg (code, X86_CMP, X86_ECX, owner_offset, X86_EDX);
1012 /* if no, jump to actual trampoline */
1013 jump_not_owned = code;
1014 x86_branch8 (code, X86_CC_NZ, -1, 1);
1016 /* next case: synchronization->owner == TID */
1017 /* is synchronization->nest == 1 */
1018 x86_alu_membase_imm (code, X86_CMP, X86_ECX, nest_offset, 1);
1019 /* if not, jump to next case */
1021 x86_branch8 (code, X86_CC_NZ, -1, 1);
1022 /* if yes, is synchronization->entry_count zero? */
1023 x86_alu_membase_imm (code, X86_CMP, X86_ECX, entry_count_offset, 0);
1024 /* if not, jump to actual trampoline */
1025 jump_have_waiters = code;
1026 x86_branch8 (code, X86_CC_NZ, -1 , 1);
1027 /* if yes, set synchronization->owner to null and return */
1028 x86_mov_membase_imm (code, X86_ECX, owner_offset, 0, 4);
1031 /* next case: synchronization->nest is not 1 */
1032 x86_patch (jump_next, code);
1033 /* decrease synchronization->nest and return */
1034 x86_dec_membase (code, X86_ECX, nest_offset);
1037 /* push obj and jump to the actual trampoline */
1038 x86_patch (jump_obj_null, code);
1039 if (jump_sync_thin_hash)
1040 x86_patch (jump_sync_thin_hash, code);
1041 x86_patch (jump_have_waiters, code);
1042 x86_patch (jump_not_owned, code);
1043 x86_patch (jump_sync_null, code);
1046 /* push obj and jump to the actual trampoline */
1047 x86_push_reg (code, X86_EAX);
1049 code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "generic_trampoline_monitor_exit");
1050 x86_jump_reg (code, X86_EAX);
1052 x86_jump_code (code, tramp);
1055 nacl_global_codeman_validate (&buf, tramp_size, &code);
1057 mono_arch_flush_icache (buf, code - buf);
1058 g_assert (code - buf <= tramp_size);
1061 *info = mono_tramp_info_create ("monitor_exit_trampoline", buf, code - buf, ji, unwind_ops);
1069 mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
1071 g_assert_not_reached ();
1076 mono_arch_create_monitor_exit_trampoline (MonoTrampInfo **info, gboolean aot)
1078 g_assert_not_reached ();
1085 mono_arch_invalidate_method (MonoJitInfo *ji, void *func, gpointer func_arg)
1087 /* FIXME: This is not thread safe */
1088 guint8 *code = ji->code_start;
1090 x86_push_imm (code, func_arg);
1091 x86_call_code (code, (guint8*)func);
1095 handler_block_trampoline_helper (void)
1097 MonoJitTlsData *jit_tls = mono_native_tls_get_value (mono_jit_tls_id);
1098 return jit_tls->handler_block_return_address;
1102 mono_arch_create_handler_block_trampoline (MonoTrampInfo **info, gboolean aot)
1104 guint8 *tramp = mono_get_trampoline_code (MONO_TRAMPOLINE_HANDLER_BLOCK_GUARD);
1106 int tramp_size = 64;
1107 MonoJumpInfo *ji = NULL;
1108 GSList *unwind_ops = NULL;
1112 code = buf = mono_global_codeman_reserve (tramp_size);
1115 This trampoline restore the call chain of the handler block then jumps into the code that deals with it.
1119 * We are in a method frame after the call emitted by OP_CALL_HANDLER.
1122 if (mono_get_jit_tls_offset () != -1) {
1123 code = mono_x86_emit_tls_get (code, X86_EAX, mono_get_jit_tls_offset ());
1124 x86_mov_reg_membase (code, X86_EAX, X86_EAX, G_STRUCT_OFFSET (MonoJitTlsData, handler_block_return_address), 4);
1126 /*Slow path uses a c helper*/
1127 x86_call_code (code, handler_block_trampoline_helper);
1129 /* Simulate a call */
1130 /*Fix stack alignment*/
1131 x86_alu_reg_imm (code, X86_SUB, X86_ESP, 0x4);
1132 /* This is the address the trampoline will return to */
1133 x86_push_reg (code, X86_EAX);
1134 /* Dummy trampoline argument, since we call the generic trampoline directly */
1135 x86_push_imm (code, 0);
1136 x86_jump_code (code, tramp);
1138 nacl_global_codeman_validate (&buf, tramp_size, &code);
1140 mono_arch_flush_icache (buf, code - buf);
1141 g_assert (code - buf <= tramp_size);
1144 *info = mono_tramp_info_create ("handler_block_trampoline", buf, code - buf, ji, unwind_ops);
1150 mono_arch_get_call_target (guint8 *code)
1152 if (code [-5] == 0xe8) {
1153 gint32 disp = *(gint32*)(code - 4);
1154 guint8 *target = code + disp;
1163 mono_arch_get_plt_info_offset (guint8 *plt_entry, mgreg_t *regs, guint8 *code)
1165 return *(guint32*)(plt_entry + NACL_SIZE (6, 12));
1169 * mono_arch_get_gsharedvt_arg_trampoline:
1171 * Return a trampoline which passes ARG to the gsharedvt in/out trampoline ADDR.
1174 mono_arch_get_gsharedvt_arg_trampoline (MonoDomain *domain, gpointer arg, gpointer addr)
1176 guint8 *code, *start;
1181 start = code = mono_domain_code_reserve (domain, buf_len);
1183 x86_mov_reg_imm (code, X86_EAX, arg);
1184 x86_jump_code (code, addr);
1185 g_assert ((code - start) <= buf_len);
1187 nacl_domain_code_validate (domain, &start, buf_len, &code);
1188 mono_arch_flush_icache (start, code - start);
1193 #if defined(ENABLE_GSHAREDVT)
1195 #include "../../../mono-extensions/mono/mini/tramp-x86-gsharedvt.c"
1200 mono_arch_get_gsharedvt_trampoline (MonoTrampInfo **info, gboolean aot)
1207 #endif /* !MONOTOUCH */