Contributed under the terms of the MIT/X11 license by
[mono.git] / mono / mini / mini-amd64.c
index 14cfd0a403bd0041a139fe94dcac7ae213e1eb2a..2547707016f8c4547216f8dd0f5c6f575ebe31f8 100644 (file)
@@ -131,6 +131,13 @@ mono_arch_fregname (int reg)
                return "unknown";
 }
 
+/* TODO: Figure out away of telling this and the one above apart if things get confussing. */
+const char *
+mono_arch_xregname (int reg)
+{
+       return mono_arch_fregname (reg);
+}
+
 G_GNUC_UNUSED static void
 break_count (void)
 {
@@ -389,15 +396,16 @@ merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
 
 static void
 add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
-              gboolean is_return,
-              guint32 *gr, guint32 *fr, guint32 *stack_size)
+                          gboolean is_return,
+                          guint32 *gr, guint32 *fr, guint32 *stack_size)
 {
        guint32 size, quad, nquads, i;
        ArgumentClass args [2];
        MonoMarshalType *info = NULL;
        MonoClass *klass;
        MonoGenericSharingContext tmp_gsctx;
-
+       gboolean pass_on_stack = FALSE;
+       
        /* 
         * The gsctx currently contains no data, it is only used for checking whenever
         * open types are allowed, some callers like mono_arch_get_argument_info ()
@@ -412,9 +420,15 @@ add_valuetype (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, ArgIn
        if (!sig->pinvoke && !disable_vtypes_in_regs && ((is_return && (size == 8)) || (!is_return && (size <= 16)))) {
                /* We pass and return vtypes of size 8 in a register */
        } else if (!sig->pinvoke || (size == 0) || (size > 16)) {
+               pass_on_stack = TRUE;
+       }
 #else
        if (!sig->pinvoke) {
+               pass_on_stack = TRUE;
+       }
 #endif
+
+       if (pass_on_stack) {
                /* Allways pass in memory */
                ainfo->offset = *stack_size;
                *stack_size += ALIGN_TO (size, 8);
@@ -909,6 +923,36 @@ mono_arch_cpu_optimizazions (guint32 *exclude_mask)
        return opts;
 }
 
+/*
+ * This function test for all SSE functions supported.
+ *
+ * Returns a bitmask corresponding to all supported versions.
+ * 
+ * TODO detect other versions like SSE4a.
+ */
+guint32
+mono_arch_cpu_enumerate_simd_versions (void)
+{
+       int eax, ebx, ecx, edx;
+       guint32 sse_opts = 0;
+
+       if (cpuid (1, &eax, &ebx, &ecx, &edx)) {
+               if (edx & (1 << 25))
+                       sse_opts |= 1 << SIMD_VERSION_SSE1;
+               if (edx & (1 << 26))
+                       sse_opts |= 1 << SIMD_VERSION_SSE2;
+               if (ecx & (1 << 0))
+                       sse_opts |= 1 << SIMD_VERSION_SSE3;
+               if (ecx & (1 << 9))
+                       sse_opts |= 1 << SIMD_VERSION_SSSE3;
+               if (ecx & (1 << 19))
+                       sse_opts |= 1 << SIMD_VERSION_SSE41;
+               if (ecx & (1 << 20))
+                       sse_opts |= 1 << SIMD_VERSION_SSE42;
+       }
+       return sse_opts;        
+}
+
 GList *
 mono_arch_get_allocatable_int_vars (MonoCompile *cfg)
 {
@@ -1241,7 +1285,18 @@ mono_arch_allocate_vars (MonoCompile *cfg)
 
        /*
         * We use the ABI calling conventions for managed code as well.
-        * Exception: valuetypes are never passed or returned in registers.
+        * Exception: valuetypes are only sometimes passed or returned in registers.
+        */
+
+       /*
+        * The stack looks like this:
+        * <incoming arguments passed on the stack>
+        * <return value>
+        * <lmf/caller saved registers>
+        * <locals>
+        * <spill area>
+        * <localloc area>  -> grows dynamically
+        * <params area>
         */
 
        if (cfg->arch.omit_fp) {
@@ -1256,8 +1311,6 @@ mono_arch_allocate_vars (MonoCompile *cfg)
 
        if (cfg->method->save_lmf) {
                /* Reserve stack space for saving LMF */
-               /* mono_arch_find_jit_info () expects to find the LMF at a fixed offset */
-               g_assert (offset == 0);
                if (cfg->arch.omit_fp) {
                        cfg->arch.lmf_offset = offset;
                        offset += sizeof (MonoLMF);
@@ -1507,6 +1560,19 @@ mono_arch_create_vars (MonoCompile *cfg)
                        mono_print_ins (cfg->vret_addr);
                }
        }
+
+#ifdef MONO_AMD64_NO_PUSHES
+       /*
+        * When this is set, we pass arguments on the stack by moves, and by allocating 
+        * a bigger stack frame, instead of pushes.
+        * Pushes complicate exception handling because the arguments on the stack have
+        * to be popped each time a frame is unwound. They also make fp elimination
+        * impossible.
+        * FIXME: This doesn't work inside filter/finally clauses, since those execute
+        * on a new frame which doesn't include a param area.
+        */
+       cfg->arch.no_pushes = TRUE;
+#endif
 }
 
 static void
@@ -1592,11 +1658,117 @@ emit_sig_cookie (MonoCompile *cfg, MonoCallInst *call, CallInfo *cinfo)
        sig_arg->inst_p0 = tmp_sig;
        MONO_ADD_INS (cfg->cbb, sig_arg);
 
-       MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
-       arg->sreg1 = sig_arg->dreg;
-       MONO_ADD_INS (cfg->cbb, arg);
+       if (cfg->arch.no_pushes) {
+               MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, cinfo->sig_cookie.offset, sig_arg->dreg);
+       } else {
+               MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
+               arg->sreg1 = sig_arg->dreg;
+               MONO_ADD_INS (cfg->cbb, arg);
+       }
+}
+
+static inline LLVMArgStorage
+arg_storage_to_llvm_arg_storage (MonoCompile *cfg, ArgStorage storage)
+{
+       switch (storage) {
+       case ArgInIReg:
+               return LLVMArgInIReg;
+       case ArgNone:
+               return LLVMArgNone;
+       default:
+               g_assert_not_reached ();
+               return LLVMArgNone;
+       }
 }
 
+#ifdef ENABLE_LLVM
+LLVMCallInfo*
+mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
+{
+       int i, n;
+       CallInfo *cinfo;
+       ArgInfo *ainfo;
+       int j;
+       LLVMCallInfo *linfo;
+
+       n = sig->param_count + sig->hasthis;
+
+       cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
+
+       linfo = mono_mempool_alloc0 (cfg->mempool, sizeof (LLVMCallInfo) + (sizeof (LLVMArgInfo) * n));
+
+       /*
+        * LLVM always uses the native ABI while we use our own ABI, the
+        * only difference is the handling of vtypes:
+        * - we only pass/receive them in registers in some cases, and only 
+        *   in 1 or 2 integer registers.
+        */
+       if (cinfo->ret.storage == ArgValuetypeInReg) {
+               if (sig->pinvoke) {
+                       cfg->exception_message = g_strdup ("pinvoke + vtypes");
+                       cfg->disable_llvm = TRUE;
+                       return linfo;
+               }
+
+               linfo->ret.storage = LLVMArgVtypeInReg;
+               for (j = 0; j < 2; ++j)
+                       linfo->ret.pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, cinfo->ret.pair_storage [j]);
+       }
+
+       if (MONO_TYPE_ISSTRUCT (sig->ret) && cinfo->ret.storage == ArgInIReg) {
+               /* Vtype returned using a hidden argument */
+               linfo->ret.storage = LLVMArgVtypeRetAddr;
+       }
+
+       for (i = 0; i < n; ++i) {
+               ainfo = cinfo->args + i;
+
+               linfo->args [i].storage = LLVMArgNone;
+
+               switch (ainfo->storage) {
+               case ArgInIReg:
+                       linfo->args [i].storage = LLVMArgInIReg;
+                       break;
+               case ArgInDoubleSSEReg:
+               case ArgInFloatSSEReg:
+                       linfo->args [i].storage = LLVMArgInFPReg;
+                       break;
+               case ArgOnStack:
+                       if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
+                               linfo->args [i].storage = LLVMArgVtypeByVal;
+                       } else {
+                               linfo->args [i].storage = LLVMArgInIReg;
+                               if (!sig->params [i - sig->hasthis]->byref) {
+                                       if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+                                               linfo->args [i].storage = LLVMArgInFPReg;
+                                       } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+                                               linfo->args [i].storage = LLVMArgInFPReg;
+                                       }
+                               }
+                       }
+                       break;
+               case ArgValuetypeInReg:
+                       if (sig->pinvoke) {
+                               cfg->exception_message = g_strdup ("pinvoke + vtypes");
+                               cfg->disable_llvm = TRUE;
+                               return linfo;
+                       }
+
+                       linfo->args [i].storage = LLVMArgVtypeInReg;
+                       for (j = 0; j < 2; ++j)
+                               linfo->args [i].pair_storage [j] = arg_storage_to_llvm_arg_storage (cfg, ainfo->pair_storage [j]);
+                       break;
+               default:
+                       cfg->exception_message = g_strdup ("ainfo->storage");
+                       cfg->disable_llvm = TRUE;
+                       break;
+               }
+       }
+
+       return linfo;
+}
+#endif
+
 void
 mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
 {
@@ -1614,52 +1786,45 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
        cinfo = get_call_info (cfg->generic_sharing_context, cfg->mempool, sig, sig->pinvoke);
 
        if (COMPILE_LLVM (cfg)) {
-               for (i = 0; i < n; ++i) {
-                       MonoInst *ins;
+               /* We shouldn't be called in the llvm case */
+               cfg->disable_llvm = TRUE;
+               return;
+       }
+
+       if (cinfo->need_stack_align) {
+               if (!cfg->arch.no_pushes)
+                       MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+       }
 
+       /* 
+        * Emit all arguments which are passed on the stack to prevent register
+        * allocation problems.
+        */
+       if (cfg->arch.no_pushes) {
+               for (i = 0; i < n; ++i) {
+                       MonoType *t;
                        ainfo = cinfo->args + i;
 
                        in = call->args [i];
 
-                       /* Simply remember the arguments */
-                       switch (ainfo->storage) {
-                       case ArgInIReg:
-                               MONO_INST_NEW (cfg, ins, OP_MOVE);
-                               ins->dreg = mono_alloc_ireg (cfg);
-                               ins->sreg1 = in->dreg;
-                               break;
-                       case ArgInDoubleSSEReg:
-                       case ArgInFloatSSEReg:
-                               MONO_INST_NEW (cfg, ins, OP_FMOVE);
-                               ins->dreg = mono_alloc_freg (cfg);
-                               ins->sreg1 = in->dreg;
-                               break;
-                       case ArgOnStack:
-                               if ((i >= sig->hasthis) && (MONO_TYPE_ISSTRUCT(sig->params [i - sig->hasthis]))) {
-                                       cfg->exception_message = g_strdup ("vtype argument");
-                                       cfg->disable_llvm = TRUE;
+                       if (sig->hasthis && i == 0)
+                               t = &mono_defaults.object_class->byval_arg;
+                       else
+                               t = sig->params [i - sig->hasthis];
+
+                       if (ainfo->storage == ArgOnStack && !MONO_TYPE_ISSTRUCT (t) && !call->tail_call) {
+                               if (!t->byref) {
+                                       if (t->type == MONO_TYPE_R4)
+                                               MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER4_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
+                                       else if (t->type == MONO_TYPE_R8)
+                                               MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORER8_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
+                                       else
+                                               MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
                                } else {
-                                       MONO_INST_NEW (cfg, ins, OP_MOVE);
-                                       ins->dreg = mono_alloc_ireg (cfg);
-                                       ins->sreg1 = in->dreg;
+                                       MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, in->dreg);
                                }
-                               break;
-                       default:
-                               cfg->exception_message = g_strdup ("ainfo->storage");
-                               cfg->disable_llvm = TRUE;
-                               return;
-                       }
-
-                       if (!cfg->disable_llvm) {
-                               MONO_ADD_INS (cfg->cbb, ins);
-                               mono_call_inst_add_outarg_reg (cfg, call, ins->dreg, 0, FALSE);
                        }
                }
-               return;
-       }
-
-       if (cinfo->need_stack_align) {
-               MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
        }
 
        /*
@@ -1730,22 +1895,26 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
                                        MONO_ADD_INS (cfg->cbb, arg);
                                }
                        } else {
-                               MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
-                               arg->sreg1 = in->dreg;
-                               if (!sig->params [i - sig->hasthis]->byref) {
-                                       if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
-                                               MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
-                                               arg->opcode = OP_STORER4_MEMBASE_REG;
-                                               arg->inst_destbasereg = X86_ESP;
-                                               arg->inst_offset = 0;
-                                       } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
-                                               MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
-                                               arg->opcode = OP_STORER8_MEMBASE_REG;
-                                               arg->inst_destbasereg = X86_ESP;
-                                               arg->inst_offset = 0;
+                               if (cfg->arch.no_pushes) {
+                                       /* Already done */
+                               } else {
+                                       MONO_INST_NEW (cfg, arg, OP_X86_PUSH);
+                                       arg->sreg1 = in->dreg;
+                                       if (!sig->params [i - sig->hasthis]->byref) {
+                                               if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R4) {
+                                                       MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+                                                       arg->opcode = OP_STORER4_MEMBASE_REG;
+                                                       arg->inst_destbasereg = X86_ESP;
+                                                       arg->inst_offset = 0;
+                                               } else if (sig->params [i - sig->hasthis]->type == MONO_TYPE_R8) {
+                                                       MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
+                                                       arg->opcode = OP_STORER8_MEMBASE_REG;
+                                                       arg->inst_destbasereg = X86_ESP;
+                                                       arg->inst_offset = 0;
+                                               }
                                        }
+                                       MONO_ADD_INS (cfg->cbb, arg);
                                }
-                               MONO_ADD_INS (cfg->cbb, arg);
                        }
                        break;
                default:
@@ -1858,6 +2027,8 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
                MonoInst *vtaddr, *load;
                vtaddr = mono_compile_create_var (cfg, &ins->klass->byval_arg, OP_LOCAL);
                
+               g_assert (!cfg->arch.no_pushes);
+
                MONO_INST_NEW (cfg, load, OP_LDADDR);
                load->inst_p0 = vtaddr;
                vtaddr->flags |= MONO_INST_INDIRECT;
@@ -1881,20 +2052,36 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
                }
        } else {
                if (size == 8) {
-                       /* Can't use this for < 8 since it does an 8 byte memory load */
-                       MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
-                       arg->inst_basereg = src->dreg;
-                       arg->inst_offset = 0;
-                       MONO_ADD_INS (cfg->cbb, arg);
+                       if (cfg->arch.no_pushes) {
+                               int dreg = mono_alloc_ireg (cfg);
+
+                               MONO_EMIT_NEW_LOAD_MEMBASE (cfg, dreg, src->dreg, 0);
+                               MONO_EMIT_NEW_STORE_MEMBASE (cfg, OP_STORE_MEMBASE_REG, AMD64_RSP, ainfo->offset, dreg);
+                       } else {
+                               /* Can't use this for < 8 since it does an 8 byte memory load */
+                               MONO_INST_NEW (cfg, arg, OP_X86_PUSH_MEMBASE);
+                               arg->inst_basereg = src->dreg;
+                               arg->inst_offset = 0;
+                               MONO_ADD_INS (cfg->cbb, arg);
+                       }
                } else if (size <= 40) {
-                       MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 8));
-                       mini_emit_memcpy (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
+                       if (cfg->arch.no_pushes) {
+                               mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
+                       } else {
+                               MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, ALIGN_TO (size, 8));
+                               mini_emit_memcpy (cfg, X86_ESP, 0, src->dreg, 0, size, 4);
+                       }
                } else {
-                       MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
-                       arg->inst_basereg = src->dreg;
-                       arg->inst_offset = 0;
-                       arg->inst_imm = size;
-                       MONO_ADD_INS (cfg->cbb, arg);
+                       if (cfg->arch.no_pushes) {
+                               // FIXME: Code growth
+                               mini_emit_memcpy (cfg, AMD64_RSP, ainfo->offset, src->dreg, 0, size, 4);
+                       } else {
+                               MONO_INST_NEW (cfg, arg, OP_X86_PUSH_OBJ);
+                               arg->inst_basereg = src->dreg;
+                               arg->inst_offset = 0;
+                               arg->inst_imm = size;
+                               MONO_ADD_INS (cfg->cbb, arg);
+                       }
                }
        }
 }
@@ -1906,7 +2093,10 @@ mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
 
        if (!ret->byref) {
                if (ret->type == MONO_TYPE_R4) {
-                       MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
+                       if (COMPILE_LLVM (cfg))
+                               MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
+                       else
+                               MONO_EMIT_NEW_UNALU (cfg, OP_AMD64_SET_XMMREG_R4, cfg->ret->dreg, val->dreg);
                        return;
                } else if (ret->type == MONO_TYPE_R8) {
                        MONO_EMIT_NEW_UNALU (cfg, OP_FMOVE, cfg->ret->dreg, val->dreg);
@@ -1918,28 +2108,15 @@ mono_arch_emit_setret (MonoCompile *cfg, MonoMethod *method, MonoInst *val)
 }
 
 #define EMIT_COND_BRANCH(ins,cond,sign) \
-if (ins->flags & MONO_INST_BRLABEL) { \
-        if (ins->inst_i0->inst_c0) { \
-               x86_branch (code, cond, cfg->native_code + ins->inst_i0->inst_c0, sign); \
-        } else { \
-               mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_LABEL, ins->inst_i0); \
-               if ((cfg->opt & MONO_OPT_BRANCH) && \
-                    x86_is_imm8 (ins->inst_i0->inst_c1 - cpos)) \
-                       x86_branch8 (code, cond, 0, sign); \
-                else \
-                       x86_branch32 (code, cond, 0, sign); \
-        } \
-} else { \
         if (ins->inst_true_bb->native_offset) { \
                x86_branch (code, cond, cfg->native_code + ins->inst_true_bb->native_offset, sign); \
         } else { \
                mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_BB, ins->inst_true_bb); \
                if ((cfg->opt & MONO_OPT_BRANCH) && \
-                    x86_is_imm8 (ins->inst_true_bb->max_offset - cpos)) \
+            x86_is_imm8 (ins->inst_true_bb->max_offset - offset)) \
                        x86_branch8 (code, cond, 0, sign); \
                 else \
                        x86_branch32 (code, cond, 0, sign); \
-        } \
 }
 
 /* emit an exception if condition is fail */
@@ -2341,6 +2518,30 @@ mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                                ins->sreg1 = temp->dreg;
                        }
                        break;
+#ifdef MONO_ARCH_SIMD_INTRINSICS
+               case OP_EXPAND_I1: {
+                               int temp_reg1 = mono_alloc_ireg (cfg);
+                               int temp_reg2 = mono_alloc_ireg (cfg);
+                               int original_reg = ins->sreg1;
+
+                               NEW_INS (cfg, ins, temp, OP_ICONV_TO_U1);
+                               temp->sreg1 = original_reg;
+                               temp->dreg = temp_reg1;
+
+                               NEW_INS (cfg, ins, temp, OP_SHL_IMM);
+                               temp->sreg1 = temp_reg1;
+                               temp->dreg = temp_reg2;
+                               temp->inst_imm = 8;
+
+                               NEW_INS (cfg, ins, temp, OP_LOR);
+                               temp->sreg1 = temp->dreg = temp_reg2;
+                               temp->sreg2 = temp_reg1;
+
+                               ins->opcode = OP_EXPAND_I2;
+                               ins->sreg1 = temp_reg2;
+                       }
+                       break;
+#endif
                default:
                        break;
                }
@@ -2384,7 +2585,7 @@ emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size,
 }
 
 static unsigned char*
-mono_emit_stack_alloc (guchar *code, MonoInst* tree)
+mono_emit_stack_alloc (MonoCompile *cfg, guchar *code, MonoInst* tree)
 {
        int sreg = tree->sreg1;
        int need_touch = FALSE;
@@ -2456,6 +2657,8 @@ mono_emit_stack_alloc (guchar *code, MonoInst* tree)
                amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
                                
                amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
+               if (cfg->param_area && cfg->arch.no_pushes)
+                       amd64_alu_reg_imm (code, X86_ADD, AMD64_RDI, cfg->param_area);
                amd64_cld (code);
                amd64_prefix (code, X86_REP_PREFIX);
                amd64_stosl (code);
@@ -2598,7 +2801,17 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
        guint8 *code = cfg->native_code + cfg->code_len;
        MonoInst *last_ins = NULL;
        guint last_offset = 0;
-       int max_len, cpos;
+       int max_len;
+
+       /* Fix max_offset estimate for each successor bb */
+       if (cfg->opt & MONO_OPT_BRANCH) {
+               int current_offset = cfg->code_len;
+               MonoBasicBlock *current_bb;
+               for (current_bb = bb; current_bb != NULL; current_bb = current_bb->next_bb) {
+                       current_bb->max_offset = current_offset;
+                       current_offset += current_bb->max_length;
+               }
+       }
 
        if (cfg->opt & MONO_OPT_LOOP) {
                int pad, align = LOOP_ALIGNMENT;
@@ -2615,12 +2828,9 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
        if (cfg->verbose_level > 2)
                g_print ("Basic block %d starting at offset 0x%x\n", bb->block_num, bb->native_offset);
 
-       cpos = bb->max_offset;
-
        if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
                MonoProfileCoverageInfo *cov = cfg->coverage_info;
                g_assert (!cfg->compile_aot);
-               cpos += 6;
 
                cov->data [bb->dfn].cil_code = bb->cil_code;
                amd64_mov_reg_imm (code, AMD64_R11, (guint64)&cov->data [bb->dfn].count);
@@ -3398,7 +3608,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                        /* FIXME: no tracing support... */
                        if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
-                               code = mono_arch_instrument_epilog (cfg, mono_profiler_method_leave, code, FALSE);
+                               code = mono_arch_instrument_epilog_full (cfg, mono_profiler_method_leave, code, FALSE, FALSE);
 
                        g_assert (!cfg->method->save_lmf);
 
@@ -3480,7 +3690,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method, FALSE);
                        else
                                code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, call->fptr, FALSE);
-                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
+                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
                        break;
@@ -3528,7 +3738,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        }
 
                        amd64_call_reg (code, ins->sreg1);
-                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
+                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
                        break;
@@ -3557,7 +3767,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_nop (code);
 
                        amd64_call_membase (code, ins->sreg1, ins->inst_offset);
-                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention))
+                       if (call->stack_usage && !CALLCONV_IS_STDCALL (call->signature->call_convention) && !cfg->arch.no_pushes)
                                amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, call->stack_usage);
                        code = emit_move_return_value (cfg, ins, code);
                        break;
@@ -3565,17 +3775,23 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_mov_membase_reg (code, cfg->frame_reg, cfg->arch.lmf_offset + G_STRUCT_OFFSET (MonoLMF, rsp), AMD64_RSP, 8);
                        break;
                case OP_X86_PUSH:
+                       g_assert (!cfg->arch.no_pushes);
                        amd64_push_reg (code, ins->sreg1);
                        break;
                case OP_X86_PUSH_IMM:
+                       g_assert (!cfg->arch.no_pushes);
                        g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_push_imm (code, ins->inst_imm);
                        break;
                case OP_X86_PUSH_MEMBASE:
+                       g_assert (!cfg->arch.no_pushes);
                        amd64_push_membase (code, ins->inst_basereg, ins->inst_offset);
                        break;
                case OP_X86_PUSH_OBJ: {
                        int size = ALIGN_TO (ins->inst_imm, 8);
+
+                       g_assert (!cfg->arch.no_pushes);
+
                        amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
                        amd64_push_reg (code, AMD64_RDI);
                        amd64_push_reg (code, AMD64_RSI);
@@ -3607,8 +3823,10 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        /* keep alignment */
                        amd64_alu_reg_imm (code, X86_ADD, ins->sreg1, MONO_ARCH_FRAME_ALIGNMENT - 1);
                        amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ~(MONO_ARCH_FRAME_ALIGNMENT - 1));
-                       code = mono_emit_stack_alloc (code, ins);
+                       code = mono_emit_stack_alloc (cfg, code, ins);
                        amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
+                       if (cfg->param_area && cfg->arch.no_pushes)
+                               amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
                        break;
                case OP_LOCALLOC_IMM: {
                        guint32 size = ins->inst_imm;
@@ -3628,13 +3846,15 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                        amd64_mov_reg_imm (code, ins->dreg, size);
                                        ins->sreg1 = ins->dreg;
 
-                                       code = mono_emit_stack_alloc (code, ins);
+                                       code = mono_emit_stack_alloc (cfg, code, ins);
                                        amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
                                }
                        } else {
                                amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, size);
                                amd64_mov_reg_reg (code, ins->dreg, AMD64_RSP, 8);
                        }
+                       if (cfg->param_area && cfg->arch.no_pushes)
+                               amd64_alu_reg_imm (code, X86_ADD, ins->dreg, cfg->param_area);
                        break;
                }
                case OP_THROW: {
@@ -3660,6 +3880,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_START_HANDLER: {
                        MonoInst *spvar = mono_find_spvar_for_region (cfg, bb->region);
                        amd64_mov_membase_reg (code, spvar->inst_basereg, spvar->inst_offset, AMD64_RSP, 8);
+
+                       if ((MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY) ||
+                                MONO_BBLOCK_IS_IN_REGION (bb, MONO_REGION_FINALLY)) &&
+                               cfg->param_area && cfg->arch.no_pushes) {
+                               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, ALIGN_TO (cfg->param_area, MONO_ARCH_FRAME_ALIGNMENT));
+                       }
                        break;
                }
                case OP_ENDFINALLY: {
@@ -3683,28 +3909,15 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        //g_print ("target: %p, next: %p, curr: %p, last: %p\n", ins->inst_target_bb, bb->next_bb, ins, bb->last_ins);
                        //if ((ins->inst_target_bb == bb->next_bb) && ins == bb->last_ins)
                        //break;
-                       if (ins->flags & MONO_INST_BRLABEL) {
-                               if (ins->inst_i0->inst_c0) {
-                                       amd64_jump_code (code, cfg->native_code + ins->inst_i0->inst_c0);
-                               } else {
-                                       mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_LABEL, ins->inst_i0);
-                                       if ((cfg->opt & MONO_OPT_BRANCH) &&
-                                           x86_is_imm8 (ins->inst_i0->inst_c1 - cpos))
-                                               x86_jump8 (code, 0);
-                                       else 
-                                               x86_jump32 (code, 0);
-                               }
-                       } else {
                                if (ins->inst_target_bb->native_offset) {
                                        amd64_jump_code (code, cfg->native_code + ins->inst_target_bb->native_offset); 
                                } else {
                                        mono_add_patch_info (cfg, offset, MONO_PATCH_INFO_BB, ins->inst_target_bb);
                                        if ((cfg->opt & MONO_OPT_BRANCH) &&
-                                           x86_is_imm8 (ins->inst_target_bb->max_offset - cpos))
+                                           x86_is_imm8 (ins->inst_target_bb->max_offset - offset))
                                                x86_jump8 (code, 0);
                                        else 
                                                x86_jump32 (code, 0);
-                               } 
                        }
                        break;
                case OP_BR_REG:
@@ -4299,6 +4512,581 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
                        break;
                }
+#ifdef MONO_ARCH_SIMD_INTRINSICS
+               /* TODO: Some of these IR opcodes are marked as no clobber when they indeed do. */
+               case OP_ADDPS:
+                       amd64_sse_addps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_DIVPS:
+                       amd64_sse_divps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MULPS:
+                       amd64_sse_mulps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_SUBPS:
+                       amd64_sse_subps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MAXPS:
+                       amd64_sse_maxps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MINPS:
+                       amd64_sse_minps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_COMPPS:
+                       g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
+                       amd64_sse_cmpps_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
+                       break;
+               case OP_ANDPS:
+                       amd64_sse_andps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_ANDNPS:
+                       amd64_sse_andnps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_ORPS:
+                       amd64_sse_orps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_XORPS:
+                       amd64_sse_xorps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_SQRTPS:
+                       amd64_sse_sqrtps_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               case OP_RSQRTPS:
+                       amd64_sse_rsqrtps_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               case OP_RCPPS:
+                       amd64_sse_rcpps_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               case OP_ADDSUBPS:
+                       amd64_sse_addsubps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_HADDPS:
+                       amd64_sse_haddps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_HSUBPS:
+                       amd64_sse_hsubps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_DUPPS_HIGH:
+                       amd64_sse_movshdup_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               case OP_DUPPS_LOW:
+                       amd64_sse_movsldup_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+
+               case OP_PSHUFLEW_HIGH:
+                       g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
+                       amd64_sse_pshufhw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
+                       break;
+               case OP_PSHUFLEW_LOW:
+                       g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
+                       amd64_sse_pshuflw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
+                       break;
+               case OP_PSHUFLED:
+                       g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 0xFF);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
+                       break;
+
+               case OP_ADDPD:
+                       amd64_sse_addpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_DIVPD:
+                       amd64_sse_divpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MULPD:
+                       amd64_sse_mulpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_SUBPD:
+                       amd64_sse_subpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MAXPD:
+                       amd64_sse_maxpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_MINPD:
+                       amd64_sse_minpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_COMPPD:
+                       g_assert (ins->inst_c0 >= 0 && ins->inst_c0 <= 7);
+                       amd64_sse_cmppd_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
+                       break;
+               case OP_ANDPD:
+                       amd64_sse_andpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_ANDNPD:
+                       amd64_sse_andnpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_ORPD:
+                       amd64_sse_orpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_XORPD:
+                       amd64_sse_xorpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               /* TODO: This op is in the AMD64 manual but has not been implemented.
+               case OP_SQRTPD:
+                       amd64_sse_sqrtpd_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               */
+               case OP_ADDSUBPD:
+                       amd64_sse_addsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_HADDPD:
+                       amd64_sse_haddpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_HSUBPD:
+                       amd64_sse_hsubpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_DUPPD:
+                       amd64_sse_movddup_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+
+               case OP_EXTRACT_MASK:
+                       amd64_sse_pmovmskb_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+
+               case OP_PAND:
+                       amd64_sse_pand_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_POR:
+                       amd64_sse_por_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PXOR:
+                       amd64_sse_pxor_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PADDB:
+                       amd64_sse_paddb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PADDW:
+                       amd64_sse_paddw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PADDD:
+                       amd64_sse_paddd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PADDQ:
+                       amd64_sse_paddq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PSUBB:
+                       amd64_sse_psubb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBW:
+                       amd64_sse_psubw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBD:
+                       amd64_sse_psubd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBQ:
+                       amd64_sse_psubq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PMAXB_UN:
+                       amd64_sse_pmaxub_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMAXW_UN:
+                       amd64_sse_pmaxuw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMAXD_UN:
+                       amd64_sse_pmaxud_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               
+               case OP_PMAXB:
+                       amd64_sse_pmaxsb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMAXW:
+                       amd64_sse_pmaxsw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMAXD:
+                       amd64_sse_pmaxsd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PAVGB_UN:
+                       amd64_sse_pavgb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PAVGW_UN:
+                       amd64_sse_pavgw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PMINB_UN:
+                       amd64_sse_pminub_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMINW_UN:
+                       amd64_sse_pminuw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMIND_UN:
+                       amd64_sse_pminud_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PMINB:
+                       amd64_sse_pminsb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMINW:
+                       amd64_sse_pminsw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMIND:
+                       amd64_sse_pminsd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PCMPEQB:
+                       amd64_sse_pcmpeqb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPEQW:
+                       amd64_sse_pcmpeqw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPEQD:
+                       amd64_sse_pcmpeqd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPEQQ:
+                       amd64_sse_pcmpeqq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PCMPGTB:
+                       amd64_sse_pcmpgtb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPGTW:
+                       amd64_sse_pcmpgtw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPGTD:
+                       amd64_sse_pcmpgtd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PCMPGTQ:
+                       amd64_sse_pcmpgtq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PSUM_ABS_DIFF:
+                       amd64_sse_psadbw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_UNPACK_LOWB:
+                       amd64_sse_punpcklbw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_LOWW:
+                       amd64_sse_punpcklwd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_LOWD:
+                       amd64_sse_punpckldq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_LOWQ:
+                       amd64_sse_punpcklqdq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_LOWPS:
+                       amd64_sse_unpcklps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_LOWPD:
+                       amd64_sse_unpcklpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_UNPACK_HIGHB:
+                       amd64_sse_punpckhbw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_HIGHW:
+                       amd64_sse_punpckhwd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_HIGHD:
+                       amd64_sse_punpckhdq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_HIGHQ:
+                       amd64_sse_punpckhqdq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_HIGHPS:
+                       amd64_sse_unpckhps_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_UNPACK_HIGHPD:
+                       amd64_sse_unpckhpd_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PACKW:
+                       amd64_sse_packsswb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PACKD:
+                       amd64_sse_packssdw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PACKW_UN:
+                       amd64_sse_packuswb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PACKD_UN:
+                       amd64_sse_packusdw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PADDB_SAT_UN:
+                       amd64_sse_paddusb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBB_SAT_UN:
+                       amd64_sse_psubusb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PADDW_SAT_UN:
+                       amd64_sse_paddusw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBW_SAT_UN:
+                       amd64_sse_psubusw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PADDB_SAT:
+                       amd64_sse_paddsb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBB_SAT:
+                       amd64_sse_psubsb_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PADDW_SAT:
+                       amd64_sse_paddsw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PSUBW_SAT:
+                       amd64_sse_psubsw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+                       
+               case OP_PMULW:
+                       amd64_sse_pmullw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMULD:
+                       amd64_sse_pmulld_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMULQ:
+                       amd64_sse_pmuludq_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMULW_HIGH_UN:
+                       amd64_sse_pmulhuw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+               case OP_PMULW_HIGH:
+                       amd64_sse_pmulhw_reg_reg (code, ins->sreg1, ins->sreg2);
+                       break;
+
+               case OP_PSHRW:
+                       amd64_sse_psrlw_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHRW_REG:
+                       amd64_sse_psrlw_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSARW:
+                       amd64_sse_psraw_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSARW_REG:
+                       amd64_sse_psraw_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSHLW:
+                       amd64_sse_psllw_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHLW_REG:
+                       amd64_sse_psllw_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSHRD:
+                       amd64_sse_psrld_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHRD_REG:
+                       amd64_sse_psrld_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSARD:
+                       amd64_sse_psrad_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSARD_REG:
+                       amd64_sse_psrad_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSHLD:
+                       amd64_sse_pslld_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHLD_REG:
+                       amd64_sse_pslld_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+
+               case OP_PSHRQ:
+                       amd64_sse_psrlq_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHRQ_REG:
+                       amd64_sse_psrlq_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+               
+               /*TODO: This is appart of the sse spec but not added
+               case OP_PSARQ:
+                       amd64_sse_psraq_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSARQ_REG:
+                       amd64_sse_psraq_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;  
+               */
+       
+               case OP_PSHLQ:
+                       amd64_sse_psllq_reg_imm (code, ins->dreg, ins->inst_imm);
+                       break;
+               case OP_PSHLQ_REG:
+                       amd64_sse_psllq_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;  
+
+               case OP_ICONV_TO_X:
+                       amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
+                       break;
+               case OP_EXTRACT_I4:
+                       amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
+                       break;
+               case OP_EXTRACT_I8:
+                       if (ins->inst_c0) {
+                               amd64_movhlps_reg_reg (code, AMD64_XMM15, ins->sreg1);
+                               amd64_movd_reg_xreg_size (code, ins->dreg, AMD64_XMM15, 8);
+                       } else {
+                               amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 8);
+                       }
+                       break;
+               case OP_EXTRACT_I1:
+               case OP_EXTRACT_U1:
+                       amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
+                       if (ins->inst_c0)
+                               amd64_shift_reg_imm (code, X86_SHR, ins->dreg, ins->inst_c0 * 8);
+                       amd64_widen_reg (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I1, FALSE);
+                       break;
+               case OP_EXTRACT_I2:
+               case OP_EXTRACT_U2:
+                       /*amd64_movd_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
+                       if (ins->inst_c0)
+                               amd64_shift_reg_imm_size (code, X86_SHR, ins->dreg, 16, 4);*/
+                       amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
+                       amd64_widen_reg_size (code, ins->dreg, ins->dreg, ins->opcode == OP_EXTRACT_I2, TRUE, 4);
+                       break;
+               case OP_EXTRACT_R8:
+                       if (ins->inst_c0)
+                               amd64_movhlps_reg_reg (code, ins->dreg, ins->sreg1);
+                       else
+                               amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+               case OP_INSERT_I2:
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->sreg1, ins->sreg2, ins->inst_c0);
+                       break;
+               case OP_EXTRACTX_U2:
+                       amd64_sse_pextrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0);
+                       break;
+               case OP_INSERTX_U1_SLOW:
+                       /*sreg1 is the extracted ireg (scratch)
+                       /sreg2 is the to be inserted ireg (scratch)
+                       /dreg is the xreg to receive the value*/
+
+                       /*clear the bits from the extracted word*/
+                       amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_c0 & 1 ? 0x00FF : 0xFF00);
+                       /*shift the value to insert if needed*/
+                       if (ins->inst_c0 & 1)
+                               amd64_shift_reg_imm_size (code, X86_SHL, ins->sreg2, 8, 4);
+                       /*join them together*/
+                       amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_c0 / 2);
+                       break;
+               case OP_INSERTX_I4_SLOW:
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2);
+                       amd64_shift_reg_imm (code, X86_SHR, ins->sreg2, 16);
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg2, ins->inst_c0 * 2 + 1);
+                       break;
+               case OP_INSERTX_I8_SLOW:
+                       amd64_movd_xreg_reg_size(code, AMD64_XMM15, ins->sreg2, 8);
+                       if (ins->inst_c0)
+                               amd64_movlhps_reg_reg (code, ins->dreg, AMD64_XMM15);
+                       else
+                               amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM15);
+                       break;
+
+               case OP_INSERTX_R4_SLOW:
+                       switch (ins->inst_c0) {
+                       case 0:
+                               amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
+                               break;
+                       case 1:
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
+                               amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(1, 0, 2, 3));
+                               break;
+                       case 2:
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
+                               amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(2, 1, 0, 3));
+                               break;
+                       case 3:
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
+                               amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->sreg2);
+                               amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, mono_simd_shuffle_mask(3, 1, 2, 0));
+                               break;
+                       }
+                       break;
+               case OP_INSERTX_R8_SLOW:
+                       if (ins->inst_c0)
+                               amd64_movlhps_reg_reg (code, ins->dreg, ins->sreg2);
+                       else
+                               amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg2);
+                       break;
+               case OP_STOREX_MEMBASE_REG:
+               case OP_STOREX_MEMBASE:
+                       amd64_sse_movups_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
+                       break;
+               case OP_LOADX_MEMBASE:
+                       amd64_sse_movups_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
+                       break;
+               case OP_LOADX_ALIGNED_MEMBASE:
+                       amd64_sse_movaps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
+                       break;
+               case OP_STOREX_ALIGNED_MEMBASE_REG:
+                       amd64_sse_movaps_membase_reg (code, ins->dreg, ins->inst_offset, ins->sreg1);
+                       break;
+               case OP_STOREX_NTA_MEMBASE_REG:
+                       amd64_sse_movntps_reg_membase (code, ins->dreg, ins->sreg1, ins->inst_offset);
+                       break;
+               case OP_PREFETCH_MEMBASE:
+                       amd64_sse_prefetch_reg_membase (code, ins->backend.arg_info, ins->sreg1, ins->inst_offset);
+                       break;
+
+               case OP_XMOVE:
+                       /*FIXME the peephole pass should have killed this*/
+                       if (ins->dreg != ins->sreg1)
+                               amd64_sse_movaps_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;          
+               case OP_XZERO:
+                       amd64_sse_pxor_reg_reg (code, ins->dreg, ins->dreg);
+                       break;
+               case OP_ICONV_TO_R8_RAW:
+                       amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
+                       amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, ins->dreg);
+                       break;
+
+               case OP_FCONV_TO_R8_X:
+                       amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
+
+               case OP_XCONV_R8_TO_I4:
+                       amd64_sse_cvttsd2si_reg_xreg_size (code, ins->dreg, ins->sreg1, 4);
+                       switch (ins->backend.source_opcode) {
+                       case OP_FCONV_TO_I1:
+                               amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, FALSE);
+                               break;
+                       case OP_FCONV_TO_U1:
+                               amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
+                               break;
+                       case OP_FCONV_TO_I2:
+                               amd64_widen_reg (code, ins->dreg, ins->dreg, TRUE, TRUE);
+                               break;
+                       case OP_FCONV_TO_U2:
+                               amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, TRUE);
+                               break;
+                       }                       
+                       break;
+
+               case OP_EXPAND_I2:
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 0);
+                       amd64_sse_pinsrw_reg_reg_imm (code, ins->dreg, ins->sreg1, 1);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
+                       break;
+               case OP_EXPAND_I4:
+                       amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 4);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
+                       break;
+               case OP_EXPAND_I8:
+                       amd64_movd_xreg_reg_size (code, ins->dreg, ins->sreg1, 8);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
+                       break;
+               case OP_EXPAND_R4:
+                       amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
+                       amd64_sse_cvtsd2ss_reg_reg (code, ins->dreg, ins->dreg);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0);
+                       break;
+               case OP_EXPAND_R8:
+                       amd64_sse_movsd_reg_reg (code, ins->dreg, ins->sreg1);
+                       amd64_sse_pshufd_reg_reg_imm (code, ins->dreg, ins->dreg, 0x44);
+                       break;
+#endif
                case OP_LIVERANGE_START: {
                        if (cfg->verbose_level > 1)
                                printf ("R%d START=0x%x\n", MONO_VARINFO (cfg, ins->inst_c0)->vreg, (int)(code - cfg->native_code));
@@ -4322,8 +5110,6 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        g_assert_not_reached ();
                }
               
-               cpos += max_len;
-
                last_ins = ins;
                last_offset = offset;
        }
@@ -4443,7 +5229,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        MonoBasicBlock *bb;
        MonoMethodSignature *sig;
        MonoInst *ins;
-       int alloc_size, pos, max_offset, i, cfa_offset, quad, max_epilog_size;
+       int alloc_size, pos, i, cfa_offset, quad, max_epilog_size;
        guint8 *code;
        CallInfo *cinfo;
        gint32 lmf_offset = cfg->arch.lmf_offset;
@@ -4517,6 +5303,15 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        }
        }
 
+       /* The param area is always at offset 0 from sp */
+       /* This needs to be allocated here, since it has to come after the spill area */
+       if (cfg->arch.no_pushes && cfg->param_area) {
+               if (cfg->arch.omit_fp)
+                       // FIXME:
+                       g_assert_not_reached ();
+               cfg->stack_offset += ALIGN_TO (cfg->param_area, sizeof (gpointer));
+       }
+
        if (cfg->arch.omit_fp) {
                /* 
                 * On enter, the stack is misaligned by the the pushing of the return
@@ -4646,30 +5441,28 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                amd64_mov_membase_reg (code, cfg->rgctx_var->inst_basereg, cfg->rgctx_var->inst_offset, MONO_ARCH_RGCTX_REG, 8);
        }
 
-       /* compute max_offset in order to use short forward jumps */
-       max_offset = 0;
+       /* compute max_length in order to use short forward jumps */
        max_epilog_size = get_max_epilog_size (cfg);
        if (cfg->opt & MONO_OPT_BRANCH) {
                for (bb = cfg->bb_entry; bb; bb = bb->next_bb) {
                        MonoInst *ins;
-                       bb->max_offset = max_offset;
+                       int max_length = 0;
 
                        if (cfg->prof_options & MONO_PROFILE_COVERAGE)
-                               max_offset += 6;
+                               max_length += 6;
                        /* max alignment for loops */
                        if ((cfg->opt & MONO_OPT_LOOP) && bb_is_loop_start (bb))
-                               max_offset += LOOP_ALIGNMENT;
+                               max_length += LOOP_ALIGNMENT;
 
                        MONO_BB_FOR_EACH_INS (bb, ins) {
-                               if (ins->opcode == OP_LABEL)
-                                       ins->inst_c1 = max_offset;
-                               
-                               max_offset += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
+                               max_length += ((guint8 *)ins_get_spec (ins->opcode))[MONO_INST_LEN];
                        }
 
-                       if (mono_jit_trace_calls && bb == cfg->bb_exit)
-                               /* The tracing code can be quite large */
-                               max_offset += max_epilog_size;
+                       /* Take prolog and epilog instrumentation into account */
+                       if (bb == cfg->bb_entry || bb == cfg->bb_exit)
+                               max_length += max_epilog_size;
+                       
+                       bb->max_length = max_length;
                }
        }
 
@@ -4816,10 +5609,15 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        guint8 *buf, *no_domain_branch;
 
                        code = mono_amd64_emit_tls_get (code, AMD64_RAX, appdomain_tls_offset);
-                       if ((domain >> 32) == 0)
-                               amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
-                       else
-                               amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+                       if (cfg->compile_aot) {
+                               /* AOT code is only used in the root domain */
+                               amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
+                       } else {
+                               if ((domain >> 32) == 0)
+                                       amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
+                               else
+                                       amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+                       }
                        amd64_alu_reg_reg (code, X86_CMP, AMD64_RAX, AMD64_ARG_REG1);
                        no_domain_branch = code;
                        x86_branch8 (code, X86_CC_NE, 0, 0);
@@ -4838,10 +5636,15 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 #endif
                } else {
                        g_assert (!cfg->compile_aot);
-                       if ((domain >> 32) == 0)
-                               amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
-                       else
-                               amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+                       if (cfg->compile_aot) {
+                               /* AOT code is only used in the root domain */
+                               amd64_mov_reg_imm (code, AMD64_ARG_REG1, 0);
+                       } else {
+                               if ((domain >> 32) == 0)
+                                       amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 4);
+                               else
+                                       amd64_mov_reg_imm_size (code, AMD64_ARG_REG1, domain, 8);
+                       }
                        code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD,
                                          (gpointer)"mono_jit_thread_attach", TRUE);
                }
@@ -4850,8 +5653,8 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        if (method->save_lmf) {
                if ((lmf_tls_offset != -1) && !optimize_for_xen) {
                        /*
-                        * Optimized version which uses the mono_lmf TLS variable instead of indirection
-                        * through the mono_lmf_addr TLS variable.
+                        * Optimized version which uses the mono_lmf TLS variable instead of 
+                        * indirection through the mono_lmf_addr TLS variable.
                         */
                        /* %rax = previous_lmf */
                        x86_prefix (code, X86_FS_PREFIX);
@@ -5337,7 +6140,7 @@ enum {
 };
 
 void*
-mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
+mono_arch_instrument_epilog_full (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments, gboolean preserve_argument_registers)
 {
        guchar *code = p;
        int save_mode = SAVE_NONE;
@@ -5410,10 +6213,20 @@ mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean ena
        else
                amd64_mov_reg_imm (code, AMD64_RAX, 0);
 
+       if (preserve_argument_registers) {
+               amd64_push_reg (code, MONO_AMD64_ARG_REG1);
+               amd64_push_reg (code, MONO_AMD64_ARG_REG2);
+       }
+
        mono_add_patch_info (cfg, code-cfg->native_code, MONO_PATCH_INFO_METHODCONST, method);
        amd64_set_reg_template (code, AMD64_ARG_REG1);
        code = emit_call (cfg, code, MONO_PATCH_INFO_ABS, (gpointer)func, TRUE);
 
+       if (preserve_argument_registers) {
+               amd64_pop_reg (code, MONO_AMD64_ARG_REG2);
+               amd64_pop_reg (code, MONO_AMD64_ARG_REG1);
+       }
+
        /* Restore result */
        switch (save_mode) {
        case SAVE_EAX:
@@ -5580,7 +6393,7 @@ mono_breakpoint_clean_code (guint8 *method_start, guint8 *code, int offset, guin
 }
 
 gpointer
-mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
+mono_arch_get_vcall_slot (guint8 *code, mgreg_t *regs, int *displacement)
 {
        guint8 buf [10];
        guint32 reg;
@@ -5661,18 +6474,7 @@ mono_arch_get_vcall_slot (guint8 *code, gpointer *regs, int *displacement)
        g_assert (reg != AMD64_R11);
 
        *displacement = disp;
-       return regs [reg];
-}
-
-gpointer*
-mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
-{
-       gpointer vt;
-       int displacement;
-       vt = mono_arch_get_vcall_slot (code, regs, &displacement);
-       if (!vt)
-               return NULL;
-       return (gpointer*)((char*)vt + displacement);
+       return (gpointer)regs [reg];
 }
 
 int
@@ -5697,13 +6499,85 @@ mono_arch_get_this_arg_reg (MonoMethodSignature *sig, MonoGenericSharingContext
 }
 
 gpointer
-mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, gssize *regs, guint8 *code)
+mono_arch_get_this_arg_from_call (MonoGenericSharingContext *gsctx, MonoMethodSignature *sig, mgreg_t *regs, guint8 *code)
 {
        return (gpointer)regs [mono_arch_get_this_arg_reg (sig, gsctx, code)];
 }
 
 #define MAX_ARCH_DELEGATE_PARAMS 10
 
+static gpointer
+get_delegate_invoke_impl (gboolean has_target, guint32 param_count, guint32 *code_len)
+{
+       guint8 *code, *start;
+       int i;
+
+       if (has_target) {
+               start = code = mono_global_codeman_reserve (64);
+
+               /* Replace the this argument with the target */
+               amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
+               amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, target), 8);
+               amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+
+               g_assert ((code - start) < 64);
+       } else {
+               start = code = mono_global_codeman_reserve (64);
+
+               if (param_count == 0) {
+                       amd64_jump_membase (code, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+               } else {
+                       /* We have to shift the arguments left */
+                       amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
+                       for (i = 0; i < param_count; ++i) {
+#ifdef PLATFORM_WIN32
+                               if (i < 3)
+                                       amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
+                               else
+                                       amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8);
+#else
+                               amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
+#endif
+                       }
+
+                       amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+               }
+               g_assert ((code - start) < 64);
+       }
+
+       mono_debug_add_delegate_trampoline (start, code - start);
+
+       if (code_len)
+               *code_len = code - start;
+
+       return start;
+}
+
+/*
+ * mono_arch_get_delegate_invoke_impls:
+ *
+ *   Return a list of MonoAotTrampInfo structures for the delegate invoke impl
+ * trampolines.
+ */
+GSList*
+mono_arch_get_delegate_invoke_impls (void)
+{
+       GSList *res = NULL;
+       guint8 *code;
+       guint32 code_len;
+       int i;
+
+       code = get_delegate_invoke_impl (TRUE, 0, &code_len);
+       res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup ("delegate_invoke_impl_has_target"), code, code_len));
+
+       for (i = 0; i < MAX_ARCH_DELEGATE_PARAMS; ++i) {
+               code = get_delegate_invoke_impl (FALSE, i, &code_len);
+               res = g_slist_prepend (res, mono_aot_tramp_info_create (g_strdup_printf ("delegate_invoke_impl_target_%d", i), code, code_len));
+       }
+
+       return res;
+}
+
 gpointer
 mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_target)
 {
@@ -5723,16 +6597,10 @@ mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_targe
                if (cached)
                        return cached;
 
-               start = code = mono_global_codeman_reserve (64);
-
-               /* Replace the this argument with the target */
-               amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
-               amd64_mov_reg_membase (code, AMD64_ARG_REG1, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, target), 8);
-               amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
-
-               g_assert ((code - start) < 64);
-
-               mono_debug_add_delegate_trampoline (start, code - start);
+               if (mono_aot_only)
+                       start = mono_aot_get_named_code ("delegate_invoke_impl_has_target");
+               else
+                       start = get_delegate_invoke_impl (TRUE, 0, NULL);
 
                mono_memory_barrier ();
 
@@ -5749,29 +6617,13 @@ mono_arch_get_delegate_invoke_impl (MonoMethodSignature *sig, gboolean has_targe
                if (code)
                        return code;
 
-               start = code = mono_global_codeman_reserve (64);
-
-               if (sig->param_count == 0) {
-                       amd64_jump_membase (code, AMD64_ARG_REG1, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+               if (mono_aot_only) {
+                       char *name = g_strdup_printf ("delegate_invoke_impl_target_%d", sig->param_count);
+                       start = mono_aot_get_named_code (name);
+                       g_free (name);
                } else {
-                       /* We have to shift the arguments left */
-                       amd64_mov_reg_reg (code, AMD64_RAX, AMD64_ARG_REG1, 8);
-                       for (i = 0; i < sig->param_count; ++i) {
-#ifdef PLATFORM_WIN32
-                               if (i < 3)
-                                       amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
-                               else
-                                       amd64_mov_reg_membase (code, param_regs [i], AMD64_RSP, 0x28, 8);
-#else
-                               amd64_mov_reg_reg (code, param_regs [i], param_regs [i + 1], 8);
-#endif
-                       }
-
-                       amd64_jump_membase (code, AMD64_RAX, G_STRUCT_OFFSET (MonoDelegate, method_ptr));
+                       start = get_delegate_invoke_impl (FALSE, sig->param_count, NULL);
                }
-               g_assert ((code - start) < 64);
-
-               mono_debug_add_delegate_trampoline (start, code - start);
 
                mono_memory_barrier ();
 
@@ -5999,20 +6851,20 @@ mono_arch_build_imt_thunk (MonoVTable *vtable, MonoDomain *domain, MonoIMTCheckI
 }
 
 MonoMethod*
-mono_arch_find_imt_method (gpointer *regs, guint8 *code)
+mono_arch_find_imt_method (mgreg_t *regs, guint8 *code)
 {
-       return regs [MONO_ARCH_IMT_REG];
+       return (MonoMethod*)regs [MONO_ARCH_IMT_REG];
 }
 
 MonoObject*
-mono_arch_find_this_argument (gpointer *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
+mono_arch_find_this_argument (mgreg_t *regs, MonoMethod *method, MonoGenericSharingContext *gsctx)
 {
-       return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), (gssize*)regs, NULL);
+       return mono_arch_get_this_arg_from_call (gsctx, mono_method_signature (method), regs, NULL);
 }
 #endif
 
 MonoVTable*
-mono_arch_find_static_call_vtable (gpointer *regs, guint8 *code)
+mono_arch_find_static_call_vtable (mgreg_t *regs, guint8 *code)
 {
        return (MonoVTable*) regs [MONO_ARCH_RGCTX_REG];
 }