Make MonoThreadCleanupFunc take an InternalThread instead of a Thread.
[mono.git] / mono / mini / mini-amd64.c
index d063e7958b632ea0b39b8e3cca503e1266ff794d..2dcd2d0e9d42a54c83208a55d130f65304b6d892 100644 (file)
 #include "cpu-amd64.h"
 #include "debugger-agent.h"
 
-/* 
- * Can't define this in mini-amd64.h cause that would turn on the generic code in
- * method-to-ir.c.
- */
-#define MONO_ARCH_IMT_REG AMD64_R11
-
 static gint lmf_tls_offset = -1;
 static gint lmf_addr_tls_offset = -1;
 static gint appdomain_tls_offset = -1;
@@ -718,7 +712,7 @@ get_call_info (MonoGenericSharingContext *gsctx, MonoMemPool *mp, MonoMethodSign
         * are sometimes made using calli without sig->hasthis set, like in the delegate
         * invoke wrappers.
         */
-       if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (sig->params [0])))) {
+       if (cinfo->vtype_retaddr && !is_pinvoke && (sig->hasthis || (sig->param_count > 0 && MONO_TYPE_IS_REFERENCE (mini_type_get_underlying_type (gsctx, sig->params [0]))))) {
                if (sig->hasthis) {
                        add_general (&gr, &stack_size, cinfo->args + 0);
                } else {
@@ -6908,41 +6902,6 @@ mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guin
        return can_write;
 }
 
-gpointer
-mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
-{
-       guint8 buf [10];
-       gint32 disp;
-       MonoJitInfo *ji = NULL;
-
-#ifdef ENABLE_LLVM
-       /* code - 9 might be before the start of the method */
-       /* FIXME: Avoid this expensive call somehow */
-       ji = mono_jit_info_table_find (mono_domain_get (), (char*)code);
-#endif
-
-       mono_breakpoint_clean_code (ji ? ji->code_start : NULL, code, 9, buf, sizeof (buf));
-       code = buf + 9;
-
-       *displacement = 0;
-
-       code -= 7;
-
-       /*
-        * This function is no longer used, the only caller is
-        * mono_arch_nullify_class_init_trampoline ().
-        */
-       if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
-               /* call OFFSET(%rip) */
-               g_assert_not_reached ();
-               *displacement = *(guint32*)(code + 3);
-               return (gpointer*)(code + disp + 7);
-       } else {
-               g_assert_not_reached ();
-               return NULL;
-       }
-}
-
 int
 mono_arch_get_this_arg_reg (MonoMethodSignature *sig, MonoGenericSharingContext *gsctx, guint8 *code)
 {
@@ -7217,25 +7176,24 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                        if (amd64_is_imm32 (item->key))
                                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                        else {
-                                               amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                                               amd64_mov_reg_imm (code, AMD64_R11, item->key);
+                                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
                                        }
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
-                               /* See the comment below about R10 */
                                if (item->has_target_code) {
-                                       amd64_mov_reg_imm (code, AMD64_R10, item->value.target_code);
-                                       amd64_jump_reg (code, AMD64_R10);
+                                       amd64_mov_reg_imm (code, AMD64_R11, item->value.target_code);
+                                       amd64_jump_reg (code, AMD64_R11);
                                } else {
-                                       amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                                       amd64_jump_membase (code, AMD64_R10, 0);
+                                       amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+                                       amd64_jump_membase (code, AMD64_R11, 0);
                                }
 
                                if (fail_case) {
                                        amd64_patch (item->jmp_code, code);
-                                       amd64_mov_reg_imm (code, AMD64_R10, fail_tramp);
-                                       amd64_jump_reg (code, AMD64_R10);
+                                       amd64_mov_reg_imm (code, AMD64_R11, fail_tramp);
+                                       amd64_jump_reg (code, AMD64_R11);
                                        item->jmp_code = NULL;
                                }
                        } else {
@@ -7244,33 +7202,27 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
                                if (amd64_is_imm32 (item->key))
                                        amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                                else {
-                                       amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                                       amd64_mov_reg_imm (code, AMD64_R11, item->key);
+                                       amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
                                }
                                item->jmp_code = code;
                                amd64_branch8 (code, X86_CC_NE, 0, FALSE);
-                               /* See the comment below about R10 */
-                               amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R10, 0);
+                               amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, AMD64_R11, 0);
                                amd64_patch (item->jmp_code, code);
                                amd64_breakpoint (code);
                                item->jmp_code = NULL;
 #else
-                               /* We're using R10 here because R11
-                                  needs to be preserved.  R10 needs
-                                  to be preserved for calls which
-                                  require a runtime generic context,
-                                  but interface calls don't. */
-                               amd64_mov_reg_imm (code, AMD64_R10, & (vtable->vtable [item->value.vtable_slot]));
-                               amd64_jump_membase (code, AMD64_R10, 0);
+                               amd64_mov_reg_imm (code, AMD64_R11, & (vtable->vtable [item->value.vtable_slot]));
+                               amd64_jump_membase (code, AMD64_R11, 0);
 #endif
                        }
                } else {
                        if (amd64_is_imm32 (item->key))
                                amd64_alu_reg_imm (code, X86_CMP, MONO_ARCH_IMT_REG, (guint32)(gssize)item->key);
                        else {
-                               amd64_mov_reg_imm (code, AMD64_R10, item->key);
-                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R10);
+                               amd64_mov_reg_imm (code, AMD64_R11, item->key);
+                               amd64_alu_reg_reg (code, X86_CMP, MONO_ARCH_IMT_REG, AMD64_R11);
                        }
                        item->jmp_code = code;
                        if (x86_is_imm8 (imt_branch_distance (imt_entries, i, item->check_target_idx)))