+ amd64_mov_reg_reg (code, MONO_ARCH_VTABLE_REG, AMD64_ARG_REG1, sizeof(gpointer));
+
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, g_strdup_printf ("specific_trampoline_lazy_fetch_%u", slot));
+ amd64_jump_reg (code, AMD64_R11);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (GUINT_TO_POINTER (slot), MONO_TRAMPOLINE_RGCTX_LAZY_FETCH, mono_get_root_domain (), NULL);
+
+ /* jump to the actual trampoline */
+ amd64_jump_code (code, tramp);
+ }
+
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ if (info)
+ *info = mono_tramp_info_create (mono_get_rgctx_fetch_trampoline_name (slot), buf, code - buf, ji, unwind_ops);
+
+ return buf;
+}
+
+gpointer
+mono_arch_create_generic_class_init_trampoline (MonoTrampInfo **info, gboolean aot)
+{
+ guint8 *tramp;
+ guint8 *code, *buf;
+ static int byte_offset = -1;
+ static guint8 bitmask;
+ guint8 *jump;
+ int tramp_size;
+ GSList *unwind_ops = NULL;
+ MonoJumpInfo *ji = NULL;
+
+ tramp_size = 64;
+
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ if (byte_offset < 0)
+ mono_marshal_find_bitfield_offset (MonoVTable, initialized, &byte_offset, &bitmask);
+
+ amd64_test_membase_imm_size (code, MONO_AMD64_ARG_REG1, byte_offset, bitmask, 1);
+ jump = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ amd64_ret (code);
+
+ x86_patch (jump, code);
+
+ if (aot) {
+ code = mono_arch_emit_load_aotconst (buf, code, &ji, MONO_PATCH_INFO_JIT_ICALL_ADDR, "specific_trampoline_generic_class_init");
+ amd64_jump_reg (code, AMD64_R11);
+ } else {
+ tramp = mono_arch_create_specific_trampoline (NULL, MONO_TRAMPOLINE_GENERIC_CLASS_INIT, mono_get_root_domain (), NULL);
+
+ /* jump to the actual trampoline */
+ amd64_jump_code (code, tramp);
+ }
+
+ nacl_global_codeman_validate (&buf, tramp_size, &code);
+
+ mono_arch_flush_icache (buf, code - buf);
+
+ g_assert (code - buf <= tramp_size);
+
+ if (info)
+ *info = mono_tramp_info_create (g_strdup_printf ("generic_class_init_trampoline"), buf, code - buf, ji, unwind_ops);
+
+ return buf;
+}
+
+#ifdef MONO_ARCH_MONITOR_OBJECT_REG
+
+gpointer
+mono_arch_create_monitor_enter_trampoline (MonoTrampInfo **info, gboolean aot)
+{
+ guint8 *tramp;
+ guint8 *code, *buf;
+ guint8 *jump_obj_null, *jump_sync_null, *jump_cmpxchg_failed, *jump_other_owner, *jump_tid, *jump_sync_thin_hash = NULL;
+ int tramp_size;
+ int owner_offset, nest_offset, dummy;
+ MonoJumpInfo *ji = NULL;
+ GSList *unwind_ops = NULL;
+
+ g_assert (MONO_ARCH_MONITOR_OBJECT_REG == AMD64_RDI);
+
+ mono_monitor_threads_sync_members_offset (&owner_offset, &nest_offset, &dummy);
+ g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (owner_offset) == sizeof (gpointer));
+ g_assert (MONO_THREADS_SYNC_MEMBER_SIZE (nest_offset) == sizeof (guint32));
+ owner_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (owner_offset);
+ nest_offset = MONO_THREADS_SYNC_MEMBER_OFFSET (nest_offset);
+
+ tramp_size = 96;
+
+ code = buf = mono_global_codeman_reserve (tramp_size);
+
+ unwind_ops = mono_arch_get_cie_program ();
+
+ if (mono_thread_get_tls_offset () != -1) {
+ /* MonoObject* obj is in RDI */
+ /* is obj null? */
+ amd64_test_reg_reg (code, AMD64_RDI, AMD64_RDI);
+ /* if yes, jump to actual trampoline */
+ jump_obj_null = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ /* load obj->synchronization to RCX */
+ amd64_mov_reg_membase (code, AMD64_RCX, AMD64_RDI, G_STRUCT_OFFSET (MonoObject, synchronisation), 8);
+
+ if (mono_gc_is_moving ()) {
+ /*if bit zero is set it's a thin hash*/
+ /*FIXME use testb encoding*/
+ amd64_test_reg_imm (code, AMD64_RCX, 0x01);
+ jump_sync_thin_hash = code;
+ amd64_branch8 (code, X86_CC_NE, -1, 1);
+
+ /*clear bits used by the gc*/
+ amd64_alu_reg_imm (code, X86_AND, AMD64_RCX, ~0x3);
+ }
+
+ /* is synchronization null? */
+ amd64_test_reg_reg (code, AMD64_RCX, AMD64_RCX);
+ /* if yes, jump to actual trampoline */
+ jump_sync_null = code;
+ amd64_branch8 (code, X86_CC_Z, -1, 1);
+
+ /* load MonoInternalThread* into RDX */
+ code = mono_amd64_emit_tls_get (code, AMD64_RDX, mono_thread_get_tls_offset ());
+ /* load TID into RDX */
+ amd64_mov_reg_membase (code, AMD64_RDX, AMD64_RDX, G_STRUCT_OFFSET (MonoInternalThread, tid), 8);
+
+ /* is synchronization->owner null? */
+ amd64_alu_membase_imm_size (code, X86_CMP, AMD64_RCX, owner_offset, 0, 8);
+ /* if not, jump to next case */
+ jump_tid = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+
+ /* if yes, try a compare-exchange with the TID */
+ /* zero RAX */
+ amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
+ /* compare and exchange */
+ amd64_prefix (code, X86_LOCK_PREFIX);
+ amd64_cmpxchg_membase_reg_size (code, AMD64_RCX, owner_offset, AMD64_RDX, 8);
+ /* if not successful, jump to actual trampoline */
+ jump_cmpxchg_failed = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+ /* if successful, return */
+ amd64_ret (code);
+
+ /* next case: synchronization->owner is not null */
+ x86_patch (jump_tid, code);
+ /* is synchronization->owner == TID? */
+ amd64_alu_membase_reg_size (code, X86_CMP, AMD64_RCX, owner_offset, AMD64_RDX, 8);
+ /* if not, jump to actual trampoline */
+ jump_other_owner = code;
+ amd64_branch8 (code, X86_CC_NZ, -1, 1);
+ /* if yes, increment nest */
+ amd64_inc_membase_size (code, AMD64_RCX, nest_offset, 4);
+ /* return */
+ amd64_ret (code);
+
+ x86_patch (jump_obj_null, code);
+ if (jump_sync_thin_hash)
+ x86_patch (jump_sync_thin_hash, code);
+ x86_patch (jump_sync_null, code);
+ x86_patch (jump_cmpxchg_failed, code);
+ x86_patch (jump_other_owner, code);
+ }
+