* Makefile: Build the make-map.exe in Mono.Unix.Native; add /nowarn:0618 to
[mono.git] / mono / mini / mini-amd64.c
index becdbf48917ad70d427a6e31692be2c7607065fa..1fc3db7e8f27fab3953cbb55d285d51908d07560 100644 (file)
@@ -29,13 +29,10 @@ static gint lmf_tls_offset = -1;
 static gint appdomain_tls_offset = -1;
 static gint thread_tls_offset = -1;
 
-/* Use SSE2 instructions for fp arithmetic */
-static gboolean use_sse2 = FALSE;
+static gboolean use_sse2 = !MONO_ARCH_USE_FPSTACK;
 
-/* xmm15 is reserved for use by some opcodes */
-#define AMD64_CALLEE_FREGS 0xef
-
-#define FPSTACK_SIZE 6
+const char * const amd64_desc [OP_LAST];
+static const char*const * ins_spec = amd64_desc;
 
 #define ALIGN_TO(val,align) ((((guint64)val) + ((align) - 1)) & ~((align) - 1))
 
@@ -48,8 +45,6 @@ static gboolean use_sse2 = FALSE;
 #define CALLCONV_IS_STDCALL(call_conv) ((call_conv) == MONO_CALL_STDCALL)
 #endif
 
-#define SIGNAL_STACK_SIZE (64 * 1024)
-
 #define ARGS_OFFSET 16
 #define GP_SCRATCH_REG AMD64_R11
 
@@ -61,12 +56,13 @@ static gboolean use_sse2 = FALSE;
  */
 
 /*
- * FIXME: 
- * - Use xmm registers instead of the x87 stack
- * - Allocate arguments to global registers
- * - implement emulated opcodes
- * - (all archs) do not store trampoline addresses in method->info since they
- *   are domain specific.   
+ * Floating point comparison results:
+ *                  ZF PF CF
+ * A > B            0  0  0
+ * A < B            0  0  1
+ * A = B            1  0  0
+ * A > B            0  0  0
+ * UNORDERED        1  1  1
  */
 
 #define NOT_IMPLEMENTED g_assert_not_reached ()
@@ -99,7 +95,7 @@ static const char * xmmregs [] = {
        "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
 };
 
-static const char*
+const char*
 mono_arch_fregname (int reg)
 {
        if (reg < AMD64_XMM_NREG)
@@ -108,15 +104,6 @@ mono_arch_fregname (int reg)
                return "unknown";
 }
 
-static const char*
-mono_amd64_regname (int reg, gboolean fp)
-{
-       if (fp)
-               return mono_arch_fregname (reg);
-       else
-               return mono_arch_regname (reg);
-}
-
 static inline void 
 amd64_patch (unsigned char* code, gpointer target)
 {
@@ -124,10 +111,24 @@ amd64_patch (unsigned char* code, gpointer target)
        if ((code [0] >= 0x40) && (code [0] <= 0x4f))
                code += 1;
 
-       if (code [0] == 0xbb) {
+       if ((code [0] & 0xf8) == 0xb8) {
                /* amd64_set_reg_template */
                *(guint64*)(code + 1) = (guint64)target;
        }
+       else if (code [0] == 0x8b) {
+               /* mov 0(%rip), %dreg */
+               *(guint32*)(code + 2) = (guint32)(guint64)target - 7;
+       }
+       else if ((code [0] == 0xff) && (code [1] == 0x15)) {
+               /* call *<OFFSET>(%rip) */
+               *(guint32*)(code + 2) = ((guint32)(guint64)target) - 7;
+       }
+       else if ((code [0] == 0xe8)) {
+               /* call <DISP> */
+               gint64 disp = (guint8*)target - (guint8*)code;
+               g_assert (amd64_is_imm32 (disp));
+               x86_patch (code, (unsigned char*)target);
+       }
        else
                x86_patch (code, (unsigned char*)target);
 }
@@ -225,7 +226,7 @@ typedef enum ArgumentClass {
 static ArgumentClass
 merge_argument_class_from_type (MonoType *type, ArgumentClass class1)
 {
-       ArgumentClass class2;
+       ArgumentClass class2 = ARG_CLASS_NO_CLASS;
        MonoType *ptype;
 
        ptype = mono_type_get_underlying_type (type);
@@ -452,6 +453,7 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                case MONO_TYPE_I:
                case MONO_TYPE_U:
                case MONO_TYPE_PTR:
+               case MONO_TYPE_FNPTR:
                case MONO_TYPE_CLASS:
                case MONO_TYPE_OBJECT:
                case MONO_TYPE_SZARRAY:
@@ -546,6 +548,7 @@ get_call_info (MonoMethodSignature *sig, gboolean is_pinvoke)
                case MONO_TYPE_I:
                case MONO_TYPE_U:
                case MONO_TYPE_PTR:
+               case MONO_TYPE_FNPTR:
                case MONO_TYPE_CLASS:
                case MONO_TYPE_OBJECT:
                case MONO_TYPE_STRING:
@@ -649,8 +652,6 @@ mono_arch_cpu_init (void)
        fpcw |= X86_FPCW_PREC_DOUBLE;
        __asm__  __volatile__ ("fldcw %0\n": : "m" (fpcw));
        __asm__  __volatile__ ("fnstcw %0\n": "=m" (fpcw));
-
-       mono_amd64_exceptions_init ();
 }
 
 /*
@@ -679,6 +680,12 @@ mono_arch_cpu_optimizazions (guint32 *exclude_mask)
        return opts;
 }
 
+gboolean
+mono_amd64_is_sse2 (void)
+{
+       return use_sse2;
+}
+
 static gboolean
 is_regsize_var (MonoType *t) {
        if (t->byref)
@@ -690,6 +697,7 @@ is_regsize_var (MonoType *t) {
        case MONO_TYPE_I:
        case MONO_TYPE_U:
        case MONO_TYPE_PTR:
+       case MONO_TYPE_FNPTR:
                return TRUE;
        case MONO_TYPE_OBJECT:
        case MONO_TYPE_STRING:
@@ -779,12 +787,14 @@ mono_arch_allocate_vars (MonoCompile *m)
        MonoMethodSignature *sig;
        MonoMethodHeader *header;
        MonoInst *inst;
-       int i, offset, size, align, curinst;
+       int i, offset;
+       guint32 locals_stack_size, locals_stack_align;
+       gint32 *offsets;
        CallInfo *cinfo;
 
-       header = ((MonoMethodNormal *)m->method)->header;
+       header = mono_method_get_header (m->method);
 
-       sig = m->method->signature;
+       sig = mono_method_signature (m->method);
 
        cinfo = get_call_info (sig, FALSE);
 
@@ -829,42 +839,36 @@ mono_arch_allocate_vars (MonoCompile *m)
                                m->ret->inst_c0 = cinfo->ret.reg;
                        }
                        break;
+               case ArgValuetypeInReg:
+                       /* Allocate a local to hold the result, the epilog will copy it to the correct place */
+                       offset += 16;
+                       m->ret->opcode = OP_REGOFFSET;
+                       m->ret->inst_basereg = AMD64_RBP;
+                       m->ret->inst_offset = - offset;
+                       break;
                default:
                        g_assert_not_reached ();
                }
                m->ret->dreg = m->ret->inst_c0;
        }
 
-       curinst = m->locals_start;
-       for (i = curinst; i < m->num_varinfo; ++i) {
-               inst = m->varinfo [i];
-
-               if (inst->opcode == OP_REGVAR) {
-                       //g_print ("allocating local %d to %s\n", i, mono_arch_regname (inst->dreg));
-                       continue;
+       /* Allocate locals */
+       offsets = mono_allocate_stack_slots (m, &locals_stack_size, &locals_stack_align);
+       if (locals_stack_align) {
+               offset += (locals_stack_align - 1);
+               offset &= ~(locals_stack_align - 1);
+       }
+       for (i = m->locals_start; i < m->num_varinfo; i++) {
+               if (offsets [i] != -1) {
+                       MonoInst *inst = m->varinfo [i];
+                       inst->opcode = OP_REGOFFSET;
+                       inst->inst_basereg = AMD64_RBP;
+                       inst->inst_offset = - (offset + offsets [i]);
+                       //printf ("allocated local %d to ", i); mono_print_tree_nl (inst);
                }
-
-               /* inst->unused indicates native sized value types, this is used by the
-               * pinvoke wrappers when they call functions returning structure */
-               if (inst->unused && MONO_TYPE_ISSTRUCT (inst->inst_vtype) && inst->inst_vtype->type != MONO_TYPE_TYPEDBYREF)
-                       size = mono_class_native_size (mono_class_from_mono_type (inst->inst_vtype), &align);
-               else
-                       size = mono_type_stack_size (inst->inst_vtype, &align);
-
-               /*
-                * variables are accessed as negative offsets from %fp, so increase
-                * the offset before assigning it to a variable
-                */
-               offset += size;
-
-               offset += align - 1;
-               offset &= ~(align - 1);
-               inst->opcode = OP_REGOFFSET;
-               inst->inst_basereg = AMD64_RBP;
-               inst->inst_offset = - offset;
-
-               //g_print ("allocating local %d to [%s - %d]\n", i, mono_arch_regname (inst->inst_basereg), - inst->inst_offset);
        }
+       g_free (offsets);
+       offset += locals_stack_size;
 
        if (!sig->pinvoke && (sig->call_convention == MONO_CALL_VARARG)) {
                g_assert (cinfo->sig_cookie.storage == ArgOnStack);
@@ -892,7 +896,7 @@ mono_arch_allocate_vars (MonoCompile *m)
                         * are volatile across calls.
                         * FIXME: Optimize this.
                         */
-                       if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg))
+                       if ((ainfo->storage == ArgInIReg) || (ainfo->storage == ArgInFloatSSEReg) || (ainfo->storage == ArgInDoubleSSEReg) || (ainfo->storage == ArgValuetypeInReg))
                                inreg = FALSE;
 
                        inst->opcode = OP_REGOFFSET;
@@ -909,6 +913,8 @@ mono_arch_allocate_vars (MonoCompile *m)
                                inst->inst_basereg = AMD64_RBP;
                                inst->inst_offset = ainfo->offset + ARGS_OFFSET;
                                break;
+                       case ArgValuetypeInReg:
+                               break;
                        default:
                                NOT_IMPLEMENTED;
                        }
@@ -917,7 +923,10 @@ mono_arch_allocate_vars (MonoCompile *m)
                                inst->opcode = OP_REGOFFSET;
                                inst->inst_basereg = AMD64_RBP;
                                /* These arguments are saved to the stack in the prolog */
-                               offset += 8;
+                               if (ainfo->storage == ArgValuetypeInReg)
+                                       offset += 2 * sizeof (gpointer);
+                               else
+                                       offset += sizeof (gpointer);
                                inst->inst_offset = - offset;
                        }
                }
@@ -928,6 +937,22 @@ mono_arch_allocate_vars (MonoCompile *m)
        g_free (cinfo);
 }
 
+void
+mono_arch_create_vars (MonoCompile *cfg)
+{
+       MonoMethodSignature *sig;
+       CallInfo *cinfo;
+
+       sig = mono_method_signature (cfg->method);
+
+       cinfo = get_call_info (sig, FALSE);
+
+       if (cinfo->ret.storage == ArgValuetypeInReg)
+               cfg->ret_var_is_local = TRUE;
+
+       g_free (cinfo);
+}
+
 static void
 add_outarg_reg (MonoCompile *cfg, MonoCallInst *call, MonoInst *arg, ArgStorage storage, int reg, MonoInst *tree)
 {
@@ -1081,6 +1106,7 @@ mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call,
 
                                                /* Reg1 */
                                                MONO_INST_NEW (cfg, load, CEE_LDIND_I);
+                                               load->ssa_op = MONO_SSA_LOAD;
                                                load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
 
                                                NEW_ICONST (cfg, offset_ins, 0);
@@ -1095,6 +1121,7 @@ mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call,
 
                                                /* Reg2 */
                                                MONO_INST_NEW (cfg, load, CEE_LDIND_I);
+                                               load->ssa_op = MONO_SSA_LOAD;
                                                load->inst_i0 = (cfg)->varinfo [vtaddr->inst_c0];
 
                                                NEW_ICONST (cfg, offset_ins, 8);
@@ -1117,6 +1144,7 @@ mono_arch_call_opcode (MonoCompile *cfg, MonoBasicBlock* bb, MonoCallInst *call,
                                                /* Prepend a copy inst */
                                                MONO_INST_NEW (cfg, arg, CEE_STIND_I);
                                                arg->cil_code = in->cil_code;
+                                               arg->ssa_op = MONO_SSA_STORE;
                                                arg->inst_left = vtaddr;
                                                arg->inst_right = in;
                                                arg->type = in->type;
@@ -1213,29 +1241,97 @@ if (ins->flags & MONO_INST_BRLABEL) { \
        amd64_fnstsw (code); \
 } while (0); 
 
-/*
- * Emitting a call and patching it later is expensive on amd64, so try to
- * determine the patch target immediately, and emit more efficient code if
- * possible.
- */
+#define EMIT_SSE2_FPFUNC(code, op, dreg, sreg1) do { \
+    amd64_movsd_membase_reg (code, AMD64_RSP, -8, (sreg1)); \
+       amd64_fld_membase (code, AMD64_RSP, -8, TRUE); \
+       amd64_ ##op (code); \
+       amd64_fst_membase (code, AMD64_RSP, -8, TRUE, TRUE); \
+       amd64_movsd_reg_membase (code, (dreg), AMD64_RSP, -8); \
+} while (0);
+
 static guint8*
 emit_call (MonoCompile *cfg, guint8 *code, guint32 patch_type, gconstpointer data)
 {
-       /* FIXME: */
        mono_add_patch_info (cfg, code - cfg->native_code, patch_type, data);
-       amd64_set_reg_template (code, GP_SCRATCH_REG);
-       amd64_call_reg (code, GP_SCRATCH_REG);
+
+       if (cfg->compile_aot) {
+               amd64_call_membase (code, AMD64_RIP, 0);
+       }
+       else {
+               gboolean near_call = FALSE;
+
+               /*
+                * Indirect calls are expensive so try to make a near call if possible.
+                * The caller memory is allocated by the code manager so it is 
+                * guaranteed to be at a 32 bit offset.
+                */
+
+               if (patch_type != MONO_PATCH_INFO_ABS) {
+                       /* The target is in memory allocated using the code manager */
+                       near_call = TRUE;
+
+                       if ((patch_type == MONO_PATCH_INFO_METHOD) || (patch_type == MONO_PATCH_INFO_METHOD_JUMP)) {
+                               if (((MonoMethod*)data)->klass->image->assembly->aot_module)
+                                       /* The callee might be an AOT method */
+                                       near_call = FALSE;
+                       }
+
+                       if (patch_type == MONO_PATCH_INFO_INTERNAL_METHOD) {
+                               /* 
+                                * The call might go directly to a native function without
+                                * the wrapper.
+                                */
+                               MonoJitICallInfo *mi = mono_find_jit_icall_by_name (data);
+                               if (mi) {
+                                       gconstpointer target = mono_icall_get_wrapper (mi);
+                                       if ((((guint64)target) >> 32) != 0)
+                                               near_call = FALSE;
+                               }
+                       }
+               }
+               else {
+                       if (mono_find_class_init_trampoline_by_addr (data))
+                               near_call = TRUE;
+                       else {
+                               MonoJitICallInfo *info = mono_find_jit_icall_by_addr (data);
+                               if (info) {
+                                       if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && 
+                                               strstr (cfg->method->name, info->name)) {
+                                               /* A call to the wrapped function */
+                                               if ((((guint64)data) >> 32) == 0)
+                                                       near_call = TRUE;
+                                       }
+                                       else if (info->func == info->wrapper) {
+                                               /* No wrapper */
+                                               if ((((guint64)info->func) >> 32) == 0)
+                                                       near_call = TRUE;
+                                       }
+                                       else
+                                               near_call = TRUE;
+                               }
+                               else if ((((guint64)data) >> 32) == 0)
+                                       near_call = TRUE;
+                       }
+               }
+
+               if (cfg->method->dynamic)
+                       /* These methods are allocated using malloc */
+                       near_call = FALSE;
+
+               if (near_call) {
+                       amd64_call_code (code, 0);
+               }
+               else {
+                       amd64_set_reg_template (code, GP_SCRATCH_REG);
+                       amd64_call_reg (code, GP_SCRATCH_REG);
+               }
+       }
 
        return code;
 }
 
-#define EMIT_CALL() do { \
-    amd64_set_reg_template (code, GP_SCRATCH_REG); \
-    amd64_call_reg (code, GP_SCRATCH_REG); \
-} while (0);
-
 /* FIXME: Add more instructions */
-#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG))
+#define INST_IGNORES_CFLAGS(ins) (((ins)->opcode == CEE_BR) || ((ins)->opcode == OP_STORE_MEMBASE_IMM) || ((ins)->opcode == OP_STOREI8_MEMBASE_REG) || ((ins)->opcode == OP_MOVE) || ((ins)->opcode == OP_ICONST) || ((ins)->opcode == OP_I8CONST) || ((ins)->opcode == OP_LOAD_MEMBASE))
 
 static void
 peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
@@ -1250,7 +1346,7 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_I8CONST:
                        /* reg = 0 -> XOR (reg, reg) */
                        /* XOR sets cflags on x86, so we cant do it always */
-                       if (ins->inst_c0 == 0 && ins->next && INST_IGNORES_CFLAGS (ins->next)) {
+                       if (ins->inst_c0 == 0 && (ins->next && INST_IGNORES_CFLAGS (ins->next))) {
                                ins->opcode = CEE_XOR;
                                ins->sreg1 = ins->dreg;
                                ins->sreg2 = ins->dreg;
@@ -1280,7 +1376,7 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                        if (!ins->inst_imm)
                                ins->opcode = OP_X86_TEST_NULL;
                        break;
-               case OP_X86_COMPARE_MEMBASE_IMM:
+               case OP_AMD64_ICOMPARE_MEMBASE_IMM:
                        /* 
                         * OP_STORE_MEMBASE_REG reg, offset(basereg)
                         * OP_X86_COMPARE_MEMBASE_IMM offset(basereg), imm
@@ -1293,7 +1389,7 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
                        if (last_ins && (last_ins->opcode == OP_STOREI4_MEMBASE_REG) &&
                            ins->inst_basereg == last_ins->inst_destbasereg &&
                            ins->inst_offset == last_ins->inst_offset) {
-                                       ins->opcode = OP_COMPARE_IMM;
+                                       ins->opcode = OP_ICOMPARE_IMM;
                                        ins->sreg1 = last_ins->sreg1;
 
                                        /* check if we can remove cmp reg,0 with test null */
@@ -1458,6 +1554,114 @@ peephole_pass (MonoCompile *cfg, MonoBasicBlock *bb)
        bb->last_ins = last_ins;
 }
 
+static void
+insert_after_ins (MonoBasicBlock *bb, MonoInst *ins, MonoInst *to_insert)
+{
+       if (ins == NULL) {
+               ins = bb->code;
+               bb->code = to_insert;
+               to_insert->next = ins;
+       }
+       else {
+               to_insert->next = ins->next;
+               ins->next = to_insert;
+       }
+}
+
+#define NEW_INS(cfg,dest,op) do {      \
+               (dest) = mono_mempool_alloc0 ((cfg)->mempool, sizeof (MonoInst));       \
+               (dest)->opcode = (op);  \
+        insert_after_ins (bb, last_ins, (dest)); \
+       } while (0)
+
+/*
+ * mono_arch_lowering_pass:
+ *
+ *  Converts complex opcodes into simpler ones so that each IR instruction
+ * corresponds to one machine instruction.
+ */
+static void
+mono_arch_lowering_pass (MonoCompile *cfg, MonoBasicBlock *bb)
+{
+       MonoInst *ins, *temp, *last_ins = NULL;
+       ins = bb->code;
+
+       if (bb->max_ireg > cfg->rs->next_vireg)
+               cfg->rs->next_vireg = bb->max_ireg;
+       if (bb->max_freg > cfg->rs->next_vfreg)
+               cfg->rs->next_vfreg = bb->max_freg;
+
+       /*
+        * FIXME: Need to add more instructions, but the current machine 
+        * description can't model some parts of the composite instructions like
+        * cdq.
+        */
+       while (ins) {
+               switch (ins->opcode) {
+               case OP_DIV_IMM:
+               case OP_REM_IMM:
+               case OP_IDIV_IMM:
+               case OP_IREM_IMM:
+                       NEW_INS (cfg, temp, OP_ICONST);
+                       temp->inst_c0 = ins->inst_imm;
+                       temp->dreg = mono_regstate_next_int (cfg->rs);
+                       switch (ins->opcode) {
+                       case OP_DIV_IMM:
+                               ins->opcode = OP_LDIV;
+                               break;
+                       case OP_REM_IMM:
+                               ins->opcode = OP_LREM;
+                               break;
+                       case OP_IDIV_IMM:
+                               ins->opcode = OP_IDIV;
+                               break;
+                       case OP_IREM_IMM:
+                               ins->opcode = OP_IREM;
+                               break;
+                       }
+                       ins->sreg2 = temp->dreg;
+                       break;
+               case OP_COMPARE_IMM:
+                       if (!amd64_is_imm32 (ins->inst_imm)) {
+                               NEW_INS (cfg, temp, OP_I8CONST);
+                               temp->inst_c0 = ins->inst_imm;
+                               temp->dreg = mono_regstate_next_int (cfg->rs);
+                               ins->opcode = OP_COMPARE;
+                               ins->sreg2 = temp->dreg;
+                       }
+                       break;
+               case OP_LOAD_MEMBASE:
+               case OP_LOADI8_MEMBASE:
+                       if (!amd64_is_imm32 (ins->inst_offset)) {
+                               NEW_INS (cfg, temp, OP_I8CONST);
+                               temp->inst_c0 = ins->inst_offset;
+                               temp->dreg = mono_regstate_next_int (cfg->rs);
+                               ins->opcode = OP_AMD64_LOADI8_MEMINDEX;
+                               ins->inst_indexreg = temp->dreg;
+                       }
+                       break;
+               case OP_STORE_MEMBASE_IMM:
+               case OP_STOREI8_MEMBASE_IMM:
+                       if (!amd64_is_imm32 (ins->inst_imm)) {
+                               NEW_INS (cfg, temp, OP_I8CONST);
+                               temp->inst_c0 = ins->inst_imm;
+                               temp->dreg = mono_regstate_next_int (cfg->rs);
+                               ins->opcode = OP_STOREI8_MEMBASE_REG;
+                               ins->sreg1 = temp->dreg;
+                       }
+                       break;
+               default:
+                       break;
+               }
+               last_ins = ins;
+               ins = ins->next;
+       }
+       bb->last_ins = last_ins;
+
+       bb->max_ireg = cfg->rs->next_vireg;
+       bb->max_freg = cfg->rs->next_vfreg;
+}
+
 static const int 
 branch_cc_table [] = {
        X86_CC_EQ, X86_CC_GE, X86_CC_GT, X86_CC_LE, X86_CC_LT,
@@ -1500,1453 +1704,145 @@ opcode_to_x86_cond (int opcode)
        return -1;
 }
 
+/*#include "cprop.c"*/
+
 /*
- * returns the offset used by spillvar. It allocates a new
- * spill variable if necessary. 
+ * Local register allocation.
+ * We first scan the list of instructions and we save the liveness info of
+ * each register (when the register is first used, when it's value is set etc.).
+ * We also reverse the list of instructions (in the InstList list) because assigning
+ * registers backwards allows for more tricks to be used.
  */
-static int
-mono_spillvar_offset (MonoCompile *cfg, int spillvar)
+void
+mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
 {
-       MonoSpillInfo **si, *info;
-       int i = 0;
-
-       si = &cfg->spill_info; 
-       
-       while (i <= spillvar) {
-
-               if (!*si) {
-                       *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
-                       info->next = NULL;
-                       cfg->stack_offset += sizeof (gpointer);
-                       info->offset = - cfg->stack_offset;
-               }
-
-               if (i == spillvar)
-                       return (*si)->offset;
+       if (!bb->code)
+               return;
 
-               i++;
-               si = &(*si)->next;
-       }
+       mono_arch_lowering_pass (cfg, bb);
 
-       g_assert_not_reached ();
-       return 0;
+       mono_local_regalloc (cfg, bb);
 }
 
-/*
- * returns the offset used by spillvar. It allocates a new
- * spill float variable if necessary. 
- * (same as mono_spillvar_offset but for float)
- */
-static int
-mono_spillvar_offset_float (MonoCompile *cfg, int spillvar)
+static unsigned char*
+emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
 {
-       MonoSpillInfo **si, *info;
-       int i = 0;
-
-       si = &cfg->spill_info_float; 
-       
-       while (i <= spillvar) {
-
-               if (!*si) {
-                       *si = info = mono_mempool_alloc (cfg->mempool, sizeof (MonoSpillInfo));
-                       info->next = NULL;
-                       cfg->stack_offset += sizeof (double);
-                       info->offset = - cfg->stack_offset;
-               }
-
-               if (i == spillvar)
-                       return (*si)->offset;
-
-               i++;
-               si = &(*si)->next;
+       if (use_sse2) {
+               amd64_sse_cvttsd2si_reg_reg (code, dreg, sreg);
+       }
+       else {
+               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
+               x86_fnstcw_membase(code, AMD64_RSP, 0);
+               amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
+               amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
+               amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
+               amd64_fldcw_membase (code, AMD64_RSP, 2);
+               amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
+               amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
+               amd64_pop_reg (code, dreg);
+               amd64_fldcw_membase (code, AMD64_RSP, 0);
+               amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
        }
 
-       g_assert_not_reached ();
-       return 0;
-}
-
-/*
- * Creates a store for spilled floating point items
- */
-static MonoInst*
-create_spilled_store_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
-{
-       MonoInst *store;
-       MONO_INST_NEW (cfg, store, OP_STORER8_MEMBASE_REG);
-       store->sreg1 = reg;
-       store->inst_destbasereg = AMD64_RBP;
-       store->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
-       DEBUG (g_print ("SPILLED FLOAT STORE (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)store->inst_offset, reg));
-       return store;
+       if (size == 1)
+               amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
+       else if (size == 2)
+               amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
+       return code;
 }
 
-/*
- * Creates a load for spilled floating point items 
- */
-static MonoInst*
-create_spilled_load_float (MonoCompile *cfg, int spill, int reg, MonoInst *ins)
+static unsigned char*
+mono_emit_stack_alloc (guchar *code, MonoInst* tree)
 {
-       MonoInst *load;
-       MONO_INST_NEW (cfg, load, OP_LOADR8_SPILL_MEMBASE);
-       load->dreg = reg;
-       load->inst_basereg = AMD64_RBP;
-       load->inst_offset = mono_spillvar_offset_float (cfg, spill);
-
-       DEBUG (g_print ("SPILLED FLOAT LOAD (%d at 0x%08lx(%%sp)) (from %d)\n", spill, (long)load->inst_offset, reg));
-       return load;
-}
-
-#define ireg_is_freeable(r) ((r) >= 0 && (r) <= 7 && AMD64_IS_CALLEE_REG ((r)))
-#define freg_is_freeable(r) ((r) >= 0 && (r) <= AMD64_XMM_NREG)
-
-#define reg_is_freeable(r,fp) ((fp) ? freg_is_freeable ((r)) : ireg_is_freeable ((r)))
-#define reg_is_hard(r,fp) ((fp) ? ((r) < MONO_MAX_FREGS) : ((r) < MONO_MAX_IREGS))
-#define reg_is_soft(r,fp) (!reg_is_hard((r),(fp)))
-#define rassign(cfg,reg,fp) ((fp) ? (cfg)->rs->fassign [(reg)] : (cfg)->rs->iassign [(reg)])
-#define sreg1_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC1] == 'f')
-#define sreg2_is_fp(ins) (ins_spec [(ins)->opcode] [MONO_INST_SRC2] == 'f')
-#define dreg_is_fp(ins)  (ins_spec [(ins)->opcode] [MONO_INST_DEST] == 'f')
+       int sreg = tree->sreg1;
+       int need_touch = FALSE;
 
-typedef struct {
-       int born_in;
-       int killed_in;
-       int last_use;
-       int prev_use;
-       int flags;              /* used to track fp spill/load */
-} RegTrack;
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
+       if (!tree->flags & MONO_INST_INIT)
+               need_touch = TRUE;
+#endif
 
-static const char*const * ins_spec = amd64_desc;
+       if (need_touch) {
+               guint8* br[5];
 
-static void
-print_ins (int i, MonoInst *ins)
-{
-       const char *spec = ins_spec [ins->opcode];
-       g_print ("\t%-2d %s", i, mono_inst_name (ins->opcode));
-       if (!spec)
-               g_error ("Unknown opcode: %s\n", mono_inst_name (ins->opcode));
-       if (spec [MONO_INST_DEST]) {
-               gboolean fp = (spec [MONO_INST_DEST] == 'f');
-               if (reg_is_soft (ins->dreg, fp))
-                       g_print (" R%d <-", ins->dreg);
-               else
-                       g_print (" %s <-", mono_amd64_regname (ins->dreg, fp));
-       }
-       if (spec [MONO_INST_SRC1]) {
-               gboolean fp = (spec [MONO_INST_SRC1] == 'f');
-               if (reg_is_soft (ins->sreg1, fp))
-                       g_print (" R%d", ins->sreg1);
-               else
-                       g_print (" %s", mono_amd64_regname (ins->sreg1, fp));
-       }
-       if (spec [MONO_INST_SRC2]) {
-               gboolean fp = (spec [MONO_INST_SRC2] == 'f');
-               if (reg_is_soft (ins->sreg2, fp))
-                       g_print (" R%d", ins->sreg2);
-               else
-                       g_print (" %s", mono_amd64_regname (ins->sreg2, fp));
+               /*
+                * Under Windows:
+                * If requested stack size is larger than one page,
+                * perform stack-touch operation
+                */
+               /*
+                * Generate stack probe code.
+                * Under Windows, it is necessary to allocate one page at a time,
+                * "touching" stack after each successful sub-allocation. This is
+                * because of the way stack growth is implemented - there is a
+                * guard page before the lowest stack page that is currently commited.
+                * Stack normally grows sequentially so OS traps access to the
+                * guard page and commits more pages when needed.
+                */
+               amd64_test_reg_imm (code, sreg, ~0xFFF);
+               br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+
+               br[2] = code; /* loop */
+               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
+               amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
+               amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
+               amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
+               br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
+               amd64_patch (br[3], br[2]);
+               amd64_test_reg_reg (code, sreg, sreg);
+               br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
+               amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
+
+               br[1] = code; x86_jump8 (code, 0);
+
+               amd64_patch (br[0], code);
+               amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
+               amd64_patch (br[1], code);
+               amd64_patch (br[4], code);
        }
-       if (spec [MONO_INST_CLOB])
-               g_print (" clobbers: %c", spec [MONO_INST_CLOB]);
-       g_print ("\n");
-}
+       else
+               amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
 
-static void
-print_regtrack (RegTrack *t, int num)
-{
-       int i;
-       char buf [32];
-       const char *r;
-       
-       for (i = 0; i < num; ++i) {
-               if (!t [i].born_in)
-                       continue;
-               if (i >= MONO_MAX_IREGS) {
-                       g_snprintf (buf, sizeof(buf), "R%d", i);
-                       r = buf;
-               } else
-                       r = mono_arch_regname (i);
-               g_print ("liveness: %s [%d - %d]\n", r, t [i].born_in, t[i].last_use);
+       if (tree->flags & MONO_INST_INIT) {
+               int offset = 0;
+               if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
+                       amd64_push_reg (code, AMD64_RAX);
+                       offset += 8;
+               }
+               if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
+                       amd64_push_reg (code, AMD64_RCX);
+                       offset += 8;
+               }
+               if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
+                       amd64_push_reg (code, AMD64_RDI);
+                       offset += 8;
+               }
+               
+               amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
+               if (sreg != AMD64_RCX)
+                       amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
+               amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
+                               
+               amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
+               amd64_cld (code);
+               amd64_prefix (code, X86_REP_PREFIX);
+               amd64_stosl (code);
+               
+               if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
+                       amd64_pop_reg (code, AMD64_RDI);
+               if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
+                       amd64_pop_reg (code, AMD64_RCX);
+               if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
+                       amd64_pop_reg (code, AMD64_RAX);
        }
-}
-
-typedef struct InstList InstList;
-
-struct InstList {
-       InstList *prev;
-       InstList *next;
-       MonoInst *data;
-};
-
-static inline InstList*
-inst_list_prepend (MonoMemPool *pool, InstList *list, MonoInst *data)
-{
-       InstList *item = mono_mempool_alloc (pool, sizeof (InstList));
-       item->data = data;
-       item->prev = NULL;
-       item->next = list;
-       if (list)
-               list->prev = item;
-       return item;
-}
-
-/*
- * Force the spilling of the variable in the symbolic register 'reg'.
- */
-static int
-get_register_force_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, int reg, gboolean fp)
-{
-       MonoInst *load;
-       int i, sel, spill;
-       int *assign, *symbolic;
-
-       if (fp) {
-               assign = cfg->rs->fassign;
-               symbolic = cfg->rs->fsymbolic;
-       }
-       else {
-               assign = cfg->rs->iassign;
-               symbolic = cfg->rs->isymbolic;
-       }       
-       
-       sel = assign [reg];
-       /*i = cfg->rs->isymbolic [sel];
-       g_assert (i == reg);*/
-       i = reg;
-       spill = ++cfg->spill_count;
-       assign [i] = -spill - 1;
-       if (fp)
-               mono_regstate_free_float (cfg->rs, sel);
-       else
-               mono_regstate_free_int (cfg->rs, sel);
-       /* we need to create a spill var and insert a load to sel after the current instruction */
-       if (fp)
-               MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
-       else
-               MONO_INST_NEW (cfg, load, OP_LOAD_MEMBASE);
-       load->dreg = sel;
-       load->inst_basereg = AMD64_RBP;
-       load->inst_offset = mono_spillvar_offset (cfg, spill);
-       if (item->prev) {
-               while (ins->next != item->prev->data)
-                       ins = ins->next;
-       }
-       load->next = ins->next;
-       ins->next = load;
-       DEBUG (g_print ("SPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
-       if (fp)
-               i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
-       else
-               i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
-       g_assert (i == sel);
-
-       return sel;
-}
-
-static int
-get_register_spilling (MonoCompile *cfg, InstList *item, MonoInst *ins, guint32 regmask, int reg, gboolean fp)
-{
-       MonoInst *load;
-       int i, sel, spill;
-       int *assign, *symbolic;
-
-       if (fp) {
-               assign = cfg->rs->fassign;
-               symbolic = cfg->rs->fsymbolic;
-       }
-       else {
-               assign = cfg->rs->iassign;
-               symbolic = cfg->rs->isymbolic;
-       }
-
-       DEBUG (g_print ("\tstart regmask to assign R%d: 0x%08x (R%d <- R%d R%d)\n", reg, regmask, ins->dreg, ins->sreg1, ins->sreg2));
-       /* exclude the registers in the current instruction */
-       if ((sreg1_is_fp (ins) == fp) && (reg != ins->sreg1) && (reg_is_freeable (ins->sreg1, fp) || (reg_is_soft (ins->sreg1, fp) && rassign (cfg, ins->sreg1, fp) >= 0))) {
-               if (reg_is_soft (ins->sreg1, fp))
-                       regmask &= ~ (1 << rassign (cfg, ins->sreg1, fp));
-               else
-                       regmask &= ~ (1 << ins->sreg1);
-               DEBUG (g_print ("\t\texcluding sreg1 %s\n", mono_amd64_regname (ins->sreg1, fp)));
-       }
-       if ((sreg2_is_fp (ins) == fp) && (reg != ins->sreg2) && (reg_is_freeable (ins->sreg2, fp) || (reg_is_soft (ins->sreg2, fp) && rassign (cfg, ins->sreg2, fp) >= 0))) {
-               if (reg_is_soft (ins->sreg2, fp))
-                       regmask &= ~ (1 << rassign (cfg, ins->sreg2, fp));
-               else
-                       regmask &= ~ (1 << ins->sreg2);
-               DEBUG (g_print ("\t\texcluding sreg2 %s %d\n", mono_amd64_regname (ins->sreg2, fp), ins->sreg2));
-       }
-       if ((dreg_is_fp (ins) == fp) && (reg != ins->dreg) && reg_is_freeable (ins->dreg, fp)) {
-               regmask &= ~ (1 << ins->dreg);
-               DEBUG (g_print ("\t\texcluding dreg %s\n", mono_amd64_regname (ins->dreg, fp)));
-       }
-
-       DEBUG (g_print ("\t\tavailable regmask: 0x%08x\n", regmask));
-       g_assert (regmask); /* need at least a register we can free */
-       sel = -1;
-       /* we should track prev_use and spill the register that's farther */
-       if (fp) {
-               for (i = 0; i < MONO_MAX_FREGS; ++i) {
-                       if (regmask & (1 << i)) {
-                               sel = i;
-                               DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_fregname (sel), cfg->rs->fassign [sel]));
-                               break;
-                       }
-               }
-
-               i = cfg->rs->fsymbolic [sel];
-               spill = ++cfg->spill_count;
-               cfg->rs->fassign [i] = -spill - 1;
-               mono_regstate_free_float (cfg->rs, sel);
-       }
-       else {
-               for (i = 0; i < MONO_MAX_IREGS; ++i) {
-                       if (regmask & (1 << i)) {
-                               sel = i;
-                               DEBUG (g_print ("\t\tselected register %s has assignment %d\n", mono_arch_regname (sel), cfg->rs->iassign [sel]));
-                               break;
-                       }
-               }
-
-               i = cfg->rs->isymbolic [sel];
-               spill = ++cfg->spill_count;
-               cfg->rs->iassign [i] = -spill - 1;
-               mono_regstate_free_int (cfg->rs, sel);
-       }
-
-       /* we need to create a spill var and insert a load to sel after the current instruction */
-       MONO_INST_NEW (cfg, load, fp ? OP_LOADR8_MEMBASE : OP_LOAD_MEMBASE);
-       load->dreg = sel;
-       load->inst_basereg = AMD64_RBP;
-       load->inst_offset = mono_spillvar_offset (cfg, spill);
-       if (item->prev) {
-               while (ins->next != item->prev->data)
-                       ins = ins->next;
-       }
-       load->next = ins->next;
-       ins->next = load;
-       DEBUG (g_print ("\tSPILLED LOAD (%d at 0x%08lx(%%ebp)) R%d (freed %s)\n", spill, (long)load->inst_offset, i, mono_amd64_regname (sel, fp)));
-       if (fp)
-               i = mono_regstate_alloc_float (cfg->rs, 1 << sel);
-       else
-               i = mono_regstate_alloc_int (cfg->rs, 1 << sel);
-       g_assert (i == sel);
-       
-       return sel;
-}
-
-static MonoInst*
-create_copy_ins (MonoCompile *cfg, int dest, int src, MonoInst *ins, gboolean fp)
-{
-       MonoInst *copy;
-
-       if (fp)
-               MONO_INST_NEW (cfg, copy, OP_FMOVE);
-       else
-               MONO_INST_NEW (cfg, copy, OP_MOVE);
-
-       copy->dreg = dest;
-       copy->sreg1 = src;
-       if (ins) {
-               copy->next = ins->next;
-               ins->next = copy;
-       }
-       DEBUG (g_print ("\tforced copy from %s to %s\n", mono_arch_regname (src), mono_arch_regname (dest)));
-       return copy;
-}
-
-static MonoInst*
-create_spilled_store (MonoCompile *cfg, int spill, int reg, int prev_reg, MonoInst *ins, gboolean fp)
-{
-       MonoInst *store;
-       MONO_INST_NEW (cfg, store, fp ? OP_STORER8_MEMBASE_REG : OP_STORE_MEMBASE_REG);
-       store->sreg1 = reg;
-       store->inst_destbasereg = AMD64_RBP;
-       store->inst_offset = mono_spillvar_offset (cfg, spill);
-       if (ins) {
-               store->next = ins->next;
-               ins->next = store;
-       }
-       DEBUG (g_print ("\tSPILLED STORE (%d at 0x%08lx(%%ebp)) R%d (from %s)\n", spill, (long)store->inst_offset, prev_reg, mono_amd64_regname (reg, fp)));
-       return store;
-}
-
-static void
-insert_before_ins (MonoInst *ins, InstList *item, MonoInst* to_insert)
-{
-       MonoInst *prev;
-       if (item->next) {
-               prev = item->next->data;
-
-               while (prev->next != ins)
-                       prev = prev->next;
-               to_insert->next = ins;
-               prev->next = to_insert;
-       } else {
-               to_insert->next = ins;
-       }
-       /* 
-        * needed otherwise in the next instruction we can add an ins to the 
-        * end and that would get past this instruction.
-        */
-       item->data = to_insert; 
-}
-
-/* flags used in reginfo->flags */
-enum {
-       MONO_X86_FP_NEEDS_LOAD_SPILL    = 1 << 0,
-       MONO_X86_FP_NEEDS_SPILL                 = 1 << 1,
-       MONO_X86_FP_NEEDS_LOAD                  = 1 << 2,
-       MONO_X86_REG_NOT_ECX                    = 1 << 3,
-       MONO_X86_REG_EAX                                = 1 << 4,
-       MONO_X86_REG_EDX                                = 1 << 5,
-       MONO_X86_REG_ECX                                = 1 << 6
-};
-
-static int
-mono_amd64_alloc_int_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg, int flags)
-{
-       int val;
-       int test_mask = dest_mask;
-
-       if (flags & MONO_X86_REG_EAX)
-               test_mask &= (1 << AMD64_RAX);
-       else if (flags & MONO_X86_REG_EDX)
-               test_mask &= (1 << AMD64_RDX);
-       else if (flags & MONO_X86_REG_ECX)
-               test_mask &= (1 << AMD64_RCX);
-       else if (flags & MONO_X86_REG_NOT_ECX)
-               test_mask &= ~ (1 << AMD64_RCX);
-
-       val = mono_regstate_alloc_int (cfg->rs, test_mask);
-       if (val >= 0 && test_mask != dest_mask)
-               DEBUG(g_print ("\tUsed flag to allocate reg %s for R%u\n", mono_arch_regname (val), sym_reg));
-
-       if (val < 0 && (flags & MONO_X86_REG_NOT_ECX)) {
-               DEBUG(g_print ("\tFailed to allocate flag suggested mask (%u) but exluding ECX\n", test_mask));
-               val = mono_regstate_alloc_int (cfg->rs, (dest_mask & (~1 << AMD64_RCX)));
-       }
-
-       if (val < 0) {
-               val = mono_regstate_alloc_int (cfg->rs, dest_mask);
-               if (val < 0)
-                       val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, FALSE);
-       }
-
-       return val;
-}
-
-static int
-mono_amd64_alloc_float_reg (MonoCompile *cfg, InstList *tmp, MonoInst *ins, guint32 dest_mask, int sym_reg)
-{
-       int val;
-
-       val = mono_regstate_alloc_float (cfg->rs, dest_mask);
-
-       if (val < 0) {
-               val = get_register_spilling (cfg, tmp, ins, dest_mask, sym_reg, TRUE);
-       }
-
-       return val;
-}
-
-
-/*#include "cprop.c"*/
-
-/*
- * Local register allocation.
- * We first scan the list of instructions and we save the liveness info of
- * each register (when the register is first used, when it's value is set etc.).
- * We also reverse the list of instructions (in the InstList list) because assigning
- * registers backwards allows for more tricks to be used.
- */
-void
-mono_arch_local_regalloc (MonoCompile *cfg, MonoBasicBlock *bb)
-{
-       MonoInst *ins;
-       MonoRegState *rs = cfg->rs;
-       int i, val, fpcount;
-       RegTrack *reginfo, *reginfof;
-       RegTrack *reginfo1, *reginfo2, *reginfod;
-       InstList *tmp, *reversed = NULL;
-       const char *spec;
-       guint32 src1_mask, src2_mask, dest_mask;
-       GList *fspill_list = NULL;
-       int fspill = 0;
-
-       if (!bb->code)
-               return;
-       rs->next_vireg = bb->max_ireg;
-       rs->next_vfreg = bb->max_freg;
-       mono_regstate_assign (rs);
-       reginfo = g_malloc0 (sizeof (RegTrack) * rs->next_vireg);
-       reginfof = g_malloc0 (sizeof (RegTrack) * rs->next_vfreg);
-       rs->ifree_mask = AMD64_CALLEE_REGS;
-       rs->ffree_mask = AMD64_CALLEE_FREGS;
-
-       if (!use_sse2)
-               /* The fp stack is 6 entries deep */
-               rs->ffree_mask = 0x3f;
-
-       ins = bb->code;
-
-       /*if (cfg->opt & MONO_OPT_COPYPROP)
-               local_copy_prop (cfg, ins);*/
-
-       i = 1;
-       fpcount = 0;
-       DEBUG (g_print ("LOCAL regalloc: basic block: %d\n", bb->block_num));
-       /* forward pass on the instructions to collect register liveness info */
-       while (ins) {
-               spec = ins_spec [ins->opcode];
-               
-               DEBUG (print_ins (i, ins));
-
-               if (spec [MONO_INST_SRC1]) {
-                       if (spec [MONO_INST_SRC1] == 'f') {
-                               reginfo1 = reginfof;
-
-                               if (!use_sse2) {
-                                       GList *spill;
-
-                                       spill = g_list_first (fspill_list);
-                                       if (spill && fpcount < FPSTACK_SIZE) {
-                                               reginfo1 [ins->sreg1].flags |= MONO_X86_FP_NEEDS_LOAD;
-                                               fspill_list = g_list_remove (fspill_list, spill->data);
-                                       } else
-                                               fpcount--;
-                               }
-                       }
-                       else
-                               reginfo1 = reginfo;
-                       reginfo1 [ins->sreg1].prev_use = reginfo1 [ins->sreg1].last_use;
-                       reginfo1 [ins->sreg1].last_use = i;
-                       if (spec [MONO_INST_SRC1] == 'L') {
-                               /* The virtual register is allocated sequentially */
-                               reginfo1 [ins->sreg1 + 1].prev_use = reginfo1 [ins->sreg1 + 1].last_use;
-                               reginfo1 [ins->sreg1 + 1].last_use = i;
-                               if (reginfo1 [ins->sreg1 + 1].born_in == 0 || reginfo1 [ins->sreg1 + 1].born_in > i)
-                                       reginfo1 [ins->sreg1 + 1].born_in = i;
-
-                               reginfo1 [ins->sreg1].flags |= MONO_X86_REG_EAX;
-                               reginfo1 [ins->sreg1 + 1].flags |= MONO_X86_REG_EDX;
-                       }
-               } else {
-                       ins->sreg1 = -1;
-               }
-               if (spec [MONO_INST_SRC2]) {
-                       if (spec [MONO_INST_SRC2] == 'f') {
-                               reginfo2 = reginfof;
-
-                               if (!use_sse2) {
-                                       GList *spill;
-
-                                       spill = g_list_first (fspill_list);
-                                       if (spill) {
-                                               reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD;
-                                               fspill_list = g_list_remove (fspill_list, spill->data);
-                                               if (fpcount >= FPSTACK_SIZE) {
-                                                       fspill++;
-                                                       fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
-                                                       reginfo2 [ins->sreg2].flags |= MONO_X86_FP_NEEDS_LOAD_SPILL;
-                                               }
-                                       } else
-                                               fpcount--;
-                               }
-                       }
-                       else
-                               reginfo2 = reginfo;
-                       reginfo2 [ins->sreg2].prev_use = reginfo2 [ins->sreg2].last_use;
-                       reginfo2 [ins->sreg2].last_use = i;
-                       if (spec [MONO_INST_SRC2] == 'L') {
-                               /* The virtual register is allocated sequentially */
-                               reginfo2 [ins->sreg2 + 1].prev_use = reginfo2 [ins->sreg2 + 1].last_use;
-                               reginfo2 [ins->sreg2 + 1].last_use = i;
-                               if (reginfo2 [ins->sreg2 + 1].born_in == 0 || reginfo2 [ins->sreg2 + 1].born_in > i)
-                                       reginfo2 [ins->sreg2 + 1].born_in = i;
-                       }
-                       if (spec [MONO_INST_CLOB] == 's') {
-                               reginfo2 [ins->sreg1].flags |= MONO_X86_REG_NOT_ECX;
-                               reginfo2 [ins->sreg2].flags |= MONO_X86_REG_ECX;
-                       }
-               } else {
-                       ins->sreg2 = -1;
-               }
-               if (spec [MONO_INST_DEST]) {
-                       if (spec [MONO_INST_DEST] == 'f') {
-                               reginfod = reginfof;
-                               if (!use_sse2 && (spec [MONO_INST_CLOB] != 'm')) {
-                                       if (fpcount >= FPSTACK_SIZE) {
-                                               reginfod [ins->dreg].flags |= MONO_X86_FP_NEEDS_SPILL;
-                                               fspill++;
-                                               fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
-                                               fpcount--;
-                                       }
-                                       fpcount++;
-                               }
-                       }
-                       else
-                               reginfod = reginfo;
-                       if (spec [MONO_INST_DEST] != 'b') /* it's not just a base register */
-                               reginfod [ins->dreg].killed_in = i;
-                       reginfod [ins->dreg].prev_use = reginfod [ins->dreg].last_use;
-                       reginfod [ins->dreg].last_use = i;
-                       if (reginfod [ins->dreg].born_in == 0 || reginfod [ins->dreg].born_in > i)
-                               reginfod [ins->dreg].born_in = i;
-                       if (spec [MONO_INST_DEST] == 'l' || spec [MONO_INST_DEST] == 'L') {
-                               /* The virtual register is allocated sequentially */
-                               reginfod [ins->dreg + 1].prev_use = reginfod [ins->dreg + 1].last_use;
-                               reginfod [ins->dreg + 1].last_use = i;
-                               if (reginfod [ins->dreg + 1].born_in == 0 || reginfod [ins->dreg + 1].born_in > i)
-                                       reginfod [ins->dreg + 1].born_in = i;
-
-                               reginfod [ins->dreg].flags |= MONO_X86_REG_EAX;
-                               reginfod [ins->dreg + 1].flags |= MONO_X86_REG_EDX;
-                       }
-               } else {
-                       ins->dreg = -1;
-               }
-
-               if (spec [MONO_INST_CLOB] == 'c') {
-                       /* A call instruction implicitly uses all registers in call->out_ireg_args */
-
-                       MonoCallInst *call = (MonoCallInst*)ins;
-                       GSList *list;
-
-                       list = call->out_ireg_args;
-                       if (list) {
-                               while (list) {
-                                       guint64 regpair;
-                                       int reg, hreg;
-
-                                       regpair = (guint64) (list->data);
-                                       hreg = regpair >> 32;
-                                       reg = regpair & 0xffffffff;
-
-                                       reginfo [reg].prev_use = reginfo [reg].last_use;
-                                       reginfo [reg].last_use = i;
-
-                                       list = g_slist_next (list);
-                               }
-                       }
-
-                       list = call->out_freg_args;
-                       if (use_sse2 && list) {
-                               while (list) {
-                                       guint64 regpair;
-                                       int reg, hreg;
-
-                                       regpair = (guint64) (list->data);
-                                       hreg = regpair >> 32;
-                                       reg = regpair & 0xffffffff;
-
-                                       reginfof [reg].prev_use = reginfof [reg].last_use;
-                                       reginfof [reg].last_use = i;
-
-                                       list = g_slist_next (list);
-                               }
-                       }
-               }
-
-               reversed = inst_list_prepend (cfg->mempool, reversed, ins);
-               ++i;
-               ins = ins->next;
-       }
-
-       // todo: check if we have anything left on fp stack, in verify mode?
-       fspill = 0;
-
-       DEBUG (print_regtrack (reginfo, rs->next_vireg));
-       DEBUG (print_regtrack (reginfof, rs->next_vfreg));
-       tmp = reversed;
-       while (tmp) {
-               int prev_dreg, prev_sreg1, prev_sreg2, clob_dreg;
-               dest_mask = src1_mask = src2_mask = AMD64_CALLEE_REGS;
-               --i;
-               ins = tmp->data;
-               spec = ins_spec [ins->opcode];
-               prev_dreg = -1;
-               clob_dreg = -1;
-               DEBUG (g_print ("processing:"));
-               DEBUG (print_ins (i, ins));
-               if (spec [MONO_INST_CLOB] == 's') {
-                       if (rs->ifree_mask & (1 << AMD64_RCX)) {
-                               DEBUG (g_print ("\tshortcut assignment of R%d to ECX\n", ins->sreg2));
-                               if (ins->sreg2 < MONO_MAX_IREGS) {
-                                       /* Argument already in hard reg, need to copy */
-                                       MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
-                                       insert_before_ins (ins, tmp, copy);
-                               }
-                               rs->iassign [ins->sreg2] = AMD64_RCX;
-                               rs->isymbolic [AMD64_RCX] = ins->sreg2;
-                               ins->sreg2 = AMD64_RCX;
-                               rs->ifree_mask &= ~ (1 << AMD64_RCX);
-                       } else {
-                               int need_ecx_spill = TRUE;
-                               /* 
-                                * we first check if src1/dreg is already assigned a register
-                                * and then we force a spill of the var assigned to ECX.
-                                */
-                               /* the destination register can't be ECX */
-                               dest_mask &= ~ (1 << AMD64_RCX);
-                               src1_mask &= ~ (1 << AMD64_RCX);
-                               val = rs->iassign [ins->dreg];
-                               /* 
-                                * the destination register is already assigned to ECX:
-                                * we need to allocate another register for it and then
-                                * copy from this to ECX.
-                                */
-                               if (val == AMD64_RCX && ins->dreg != ins->sreg2) {
-                                       int new_dest;
-                                       new_dest = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
-                                       g_assert (new_dest >= 0);
-                                       DEBUG (g_print ("\tclob:s changing dreg R%d to %s from ECX\n", ins->dreg, mono_arch_regname (new_dest)));
-
-                                       rs->isymbolic [new_dest] = ins->dreg;
-                                       rs->iassign [ins->dreg] = new_dest;
-                                       clob_dreg = ins->dreg;
-                                       ins->dreg = new_dest;
-                                       create_copy_ins (cfg, AMD64_RCX, new_dest, ins, FALSE);
-                                       need_ecx_spill = FALSE;
-                                       /*DEBUG (g_print ("\tforced spill of R%d\n", ins->dreg));
-                                       val = get_register_force_spilling (cfg, tmp, ins, ins->dreg);
-                                       rs->iassign [ins->dreg] = val;
-                                       rs->isymbolic [val] = prev_dreg;
-                                       ins->dreg = val;*/
-                               }
-                               val = rs->iassign [ins->sreg2];
-                               if (val >= 0 && val != AMD64_RCX) {
-                                       MonoInst *move = create_copy_ins (cfg, AMD64_RCX, val, NULL, FALSE);
-                                       DEBUG (g_print ("\tmoved arg from R%d (%d) to ECX\n", val, ins->sreg2));
-                                       move->next = ins;
-                                       g_assert_not_reached ();
-                                       /* FIXME: where is move connected to the instruction list? */
-                                       //tmp->prev->data->next = move;
-                               }
-                               else 
-                                       if (val == AMD64_RCX) {
-                                               if (ins->sreg2 < MONO_MAX_IREGS) {
-                                                       /* sreg2 is already assigned to a hard reg, need to copy */
-                                                       MonoInst *copy = create_copy_ins (cfg, AMD64_RCX, ins->sreg2, NULL, FALSE);
-                                                       insert_before_ins (ins, tmp, copy);
-                                               }
-                                               need_ecx_spill = FALSE;
-                                       }
-                               if (need_ecx_spill && !(rs->ifree_mask & (1 << AMD64_RCX))) {
-                                       DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RCX]));
-                                       get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RCX], FALSE);
-                                       mono_regstate_free_int (rs, AMD64_RCX);
-                               }
-                               /* force-set sreg2 */
-                               rs->iassign [ins->sreg2] = AMD64_RCX;
-                               rs->isymbolic [AMD64_RCX] = ins->sreg2;
-                               ins->sreg2 = AMD64_RCX;
-                               rs->ifree_mask &= ~ (1 << AMD64_RCX);
-                       }
-               } else if (spec [MONO_INST_CLOB] == 'd') { /* division */
-                       int dest_reg = AMD64_RAX;
-                       int clob_reg = AMD64_RDX;
-                       if (spec [MONO_INST_DEST] == 'd') {
-                               dest_reg = AMD64_RDX; /* reminder */
-                               clob_reg = AMD64_RAX;
-                       }
-                       val = rs->iassign [ins->dreg];
-                       if (0 && val >= 0 && val != dest_reg && !(rs->ifree_mask & (1 << dest_reg))) {
-                               DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
-                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
-                               mono_regstate_free_int (rs, dest_reg);
-                       }
-                       if (val < 0) {
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       int spill = -val -1;
-                                       dest_mask = 1 << clob_reg;
-                                       prev_dreg = ins->dreg;
-                                       val = mono_regstate_alloc_int (rs, dest_mask);
-                                       if (val < 0)
-                                               val = get_register_spilling (cfg, tmp, ins, dest_mask, ins->dreg, FALSE);
-                                       rs->iassign [ins->dreg] = val;
-                                       if (spill)
-                                               create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
-                                       DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
-                                       rs->isymbolic [val] = prev_dreg;
-                                       ins->dreg = val;
-                                       if (val != dest_reg) { /* force a copy */
-                                               create_copy_ins (cfg, val, dest_reg, ins, FALSE);
-                                       }
-                               } else {
-                                       DEBUG (g_print ("\tshortcut assignment of R%d to %s\n", ins->dreg, mono_arch_regname (dest_reg)));
-                                       prev_dreg = ins->dreg;
-                                       rs->iassign [ins->dreg] = dest_reg;
-                                       rs->isymbolic [dest_reg] = ins->dreg;
-                                       ins->dreg = dest_reg;
-                                       rs->ifree_mask &= ~ (1 << dest_reg);
-                               }
-                       } else {
-                               //DEBUG (g_print ("dest reg in div assigned: %s\n", mono_arch_regname (val)));
-                               if (val != dest_reg) { /* force a copy */
-                                       create_copy_ins (cfg, val, dest_reg, ins, FALSE);
-                                       if (!(rs->ifree_mask & (1 << dest_reg)) && rs->isymbolic [dest_reg] >= MONO_MAX_IREGS) {
-                                               DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [dest_reg]));
-                                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [dest_reg], FALSE);
-                                               mono_regstate_free_int (rs, dest_reg);
-                                       }
-                               }
-                       }
-                       if (!(rs->ifree_mask & (1 << clob_reg)) && (clob_reg != val) && (rs->isymbolic [clob_reg] >= 8)) {
-                               DEBUG (g_print ("\tforced spill of clobbered reg R%d\n", rs->isymbolic [clob_reg]));
-                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [clob_reg], FALSE);
-                               mono_regstate_free_int (rs, clob_reg);
-                       }
-                       src1_mask = 1 << AMD64_RAX;
-                       src2_mask = 1 << AMD64_RCX;
-               }
-               if (spec [MONO_INST_DEST] == 'l') {
-                       int hreg;
-                       val = rs->iassign [ins->dreg];
-                       /* check special case when dreg have been moved from ecx (clob shift) */
-                       if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
-                               hreg = clob_dreg + 1;
-                       else
-                               hreg = ins->dreg + 1;
-
-                       /* base prev_dreg on fixed hreg, handle clob case */
-                       val = hreg - 1;
-
-                       if (val != rs->isymbolic [AMD64_RAX] && !(rs->ifree_mask & (1 << AMD64_RAX))) {
-                               DEBUG (g_print ("\t(long-low) forced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
-                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
-                               mono_regstate_free_int (rs, AMD64_RAX);
-                       }
-                       if (hreg != rs->isymbolic [AMD64_RDX] && !(rs->ifree_mask & (1 << AMD64_RDX))) {
-                               DEBUG (g_print ("\t(long-high) forced spill of R%d\n", rs->isymbolic [AMD64_RDX]));
-                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RDX], FALSE);
-                               mono_regstate_free_int (rs, AMD64_RDX);
-                       }
-               }
-
-               /*
-                * TRACK DREG
-                */
-               if (spec [MONO_INST_DEST] == 'f') {
-                       if (use_sse2) {
-                               /* Allocate an XMM reg the same way as an int reg */
-                               if (reg_is_soft (ins->dreg, TRUE)) {
-                                       val = rs->fassign [ins->dreg];
-                                       prev_dreg = ins->dreg;
-                                       
-                                       if (val < 0) {
-                                               int spill = 0;
-                                               if (val < -1) {
-                                                       /* the register gets spilled after this inst */
-                                                       spill = -val -1;
-                                               }
-                                               val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->dreg);
-                                               rs->fassign [ins->dreg] = val;
-                                               if (spill)
-                                                       create_spilled_store (cfg, spill, val, prev_dreg, ins, TRUE);
-                                       }
-                                       DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_amd64_regname (val, TRUE), ins->dreg));
-                                       rs->fsymbolic [val] = prev_dreg;
-                                       ins->dreg = val;
-                               }
-                       }
-                       else if (spec [MONO_INST_CLOB] != 'm') {
-                               if (reginfof [ins->dreg].flags & MONO_X86_FP_NEEDS_SPILL) {
-                                       GList *spill_node;
-                                       MonoInst *store;
-                                       spill_node = g_list_first (fspill_list);
-                                       g_assert (spill_node);
-
-                                       store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->dreg, ins);
-                                       insert_before_ins (ins, tmp, store);
-                                       fspill_list = g_list_remove (fspill_list, spill_node->data);
-                                       fspill--;
-                               }
-                       }
-               } else if (spec [MONO_INST_DEST] == 'L') {
-                       int hreg;
-                       val = rs->iassign [ins->dreg];
-                       /* check special case when dreg have been moved from ecx (clob shift) */
-                       if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
-                               hreg = clob_dreg + 1;
-                       else
-                               hreg = ins->dreg + 1;
-
-                       /* base prev_dreg on fixed hreg, handle clob case */
-                       prev_dreg = hreg - 1;
-
-                       if (val < 0) {
-                               int spill = 0;
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       spill = -val -1;
-                               }
-                               val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
-                               rs->iassign [ins->dreg] = val;
-                               if (spill)
-                                       create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
-                       }
-
-                       DEBUG (g_print ("\tassigned dreg (long) %s to dest R%d\n", mono_arch_regname (val), hreg - 1));
-                       rs->isymbolic [val] = hreg - 1;
-                       ins->dreg = val;
-                       
-                       val = rs->iassign [hreg];
-                       if (val < 0) {
-                               int spill = 0;
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       spill = -val -1;
-                               }
-                               val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
-                               rs->iassign [hreg] = val;
-                               if (spill)
-                                       create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
-                       }
-
-                       DEBUG (g_print ("\tassigned hreg (long-high) %s to dest R%d\n", mono_arch_regname (val), hreg));
-                       rs->isymbolic [val] = hreg;
-                       /* save reg allocating into unused */
-                       ins->unused = val;
-
-                       /* check if we can free our long reg */
-                       if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
-                               DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (val), hreg, reginfo [hreg].born_in));
-                               mono_regstate_free_int (rs, val);
-                       }
-               }
-               else if (ins->dreg >= MONO_MAX_IREGS) {
-                       int hreg;
-                       val = rs->iassign [ins->dreg];
-                       if (spec [MONO_INST_DEST] == 'l') {
-                               /* check special case when dreg have been moved from ecx (clob shift) */
-                               if (spec [MONO_INST_CLOB] == 's' && clob_dreg != -1)
-                                       hreg = clob_dreg + 1;
-                               else
-                                       hreg = ins->dreg + 1;
-
-                               /* base prev_dreg on fixed hreg, handle clob case */
-                               prev_dreg = hreg - 1;
-                       } else
-                               prev_dreg = ins->dreg;
-
-                       if (val < 0) {
-                               int spill = 0;
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       spill = -val -1;
-                               }
-                               val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->dreg, reginfo [ins->dreg].flags);
-                               rs->iassign [ins->dreg] = val;
-                               if (spill)
-                                       create_spilled_store (cfg, spill, val, prev_dreg, ins, FALSE);
-                       }
-                       DEBUG (g_print ("\tassigned dreg %s to dest R%d\n", mono_arch_regname (val), ins->dreg));
-                       rs->isymbolic [val] = prev_dreg;
-                       ins->dreg = val;
-                       /* handle cases where lreg needs to be eax:edx */
-                       if (spec [MONO_INST_DEST] == 'l') {
-                               /* check special case when dreg have been moved from ecx (clob shift) */
-                               int hreg = prev_dreg + 1;
-                               val = rs->iassign [hreg];
-                               if (val < 0) {
-                                       int spill = 0;
-                                       if (val < -1) {
-                                               /* the register gets spilled after this inst */
-                                               spill = -val -1;
-                                       }
-                                       val = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, hreg, reginfo [hreg].flags);
-                                       rs->iassign [hreg] = val;
-                                       if (spill)
-                                               create_spilled_store (cfg, spill, val, hreg, ins, FALSE);
-                               }
-                               DEBUG (g_print ("\tassigned hreg %s to dest R%d\n", mono_arch_regname (val), hreg));
-                               rs->isymbolic [val] = hreg;
-                               if (ins->dreg == AMD64_RAX) {
-                                       if (val != AMD64_RDX)
-                                               create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
-                               } else if (ins->dreg == AMD64_RDX) {
-                                       if (val == AMD64_RAX) {
-                                               /* swap */
-                                               g_assert_not_reached ();
-                                       } else {
-                                               /* two forced copies */
-                                               create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
-                                               create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
-                                       }
-                               } else {
-                                       if (val == AMD64_RDX) {
-                                               create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
-                                       } else {
-                                               /* two forced copies */
-                                               create_copy_ins (cfg, val, AMD64_RDX, ins, FALSE);
-                                               create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
-                                       }
-                               }
-                               if (reg_is_freeable (val, FALSE) && hreg >= 0 && reginfo [hreg].born_in >= i) {
-                                       DEBUG (g_print ("\tfreeable %s (R%d)\n", mono_arch_regname (val), hreg));
-                                       mono_regstate_free_int (rs, val);
-                               }
-                       } else if (spec [MONO_INST_DEST] == 'a' && ins->dreg != AMD64_RAX && spec [MONO_INST_CLOB] != 'd') {
-                               /* this instruction only outputs to EAX, need to copy */
-                               create_copy_ins (cfg, ins->dreg, AMD64_RAX, ins, FALSE);
-                       } else if (spec [MONO_INST_DEST] == 'd' && ins->dreg != AMD64_RDX && spec [MONO_INST_CLOB] != 'd') {
-                               create_copy_ins (cfg, ins->dreg, AMD64_RDX, ins, FALSE);
-                       }
-               }
-
-               if (use_sse2 && spec [MONO_INST_DEST] == 'f' && reg_is_freeable (ins->dreg, TRUE) && prev_dreg >= 0 && reginfof [prev_dreg].born_in >= i) {
-                       DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_fregname (ins->dreg), prev_dreg, reginfof [prev_dreg].born_in));
-                       mono_regstate_free_float (rs, ins->dreg);
-               }
-               if (spec [MONO_INST_DEST] != 'f' && reg_is_freeable (ins->dreg, FALSE) && prev_dreg >= 0 && reginfo [prev_dreg].born_in >= i) {
-                       DEBUG (g_print ("\tfreeable %s (R%d) (born in %d)\n", mono_arch_regname (ins->dreg), prev_dreg, reginfo [prev_dreg].born_in));
-                       mono_regstate_free_int (rs, ins->dreg);
-               }
-
-               /* put src1 in EAX if it needs to be */
-               if (spec [MONO_INST_SRC1] == 'a') {
-                       if (!(rs->ifree_mask & (1 << AMD64_RAX))) {
-                               DEBUG (g_print ("\tforced spill of R%d\n", rs->isymbolic [AMD64_RAX]));
-                               get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [AMD64_RAX], FALSE);
-                               mono_regstate_free_int (rs, AMD64_RAX);
-                       }
-                       if (ins->sreg1 < MONO_MAX_IREGS) {
-                               /* The argument is already in a hard reg, need to copy */
-                               MonoInst *copy = create_copy_ins (cfg, AMD64_RAX, ins->sreg1, NULL, FALSE);
-                               insert_before_ins (ins, tmp, copy);
-                       }
-                       /* force-set sreg1 */
-                       rs->iassign [ins->sreg1] = AMD64_RAX;
-                       rs->isymbolic [AMD64_RAX] = ins->sreg1;
-                       ins->sreg1 = AMD64_RAX;
-                       rs->ifree_mask &= ~ (1 << AMD64_RAX);
-               }
-
-               /*
-                * TRACK SREG1
-                */
-               if (spec [MONO_INST_SRC1] == 'f') {
-                       if (use_sse2) {
-                               if (reg_is_soft (ins->sreg1, TRUE)) {
-                                       val = rs->fassign [ins->sreg1];
-                                       prev_sreg1 = ins->sreg1;
-                                       if (val < 0) {
-                                               int spill = 0;
-                                               if (val < -1) {
-                                                       /* the register gets spilled after this inst */
-                                                       spill = -val -1;
-                                               }
-                                               val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg1);
-                                               rs->fassign [ins->sreg1] = val;
-                                               DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_fregname (val), ins->sreg1));
-                                               if (spill) {
-                                                       MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, TRUE);
-                                                       insert_before_ins (ins, tmp, store);
-                                               }
-                                       }
-                                       rs->fsymbolic [val] = prev_sreg1;
-                                       ins->sreg1 = val;
-                               } else {
-                                       prev_sreg1 = -1;
-                               }
-                       }
-                       else
-                               if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD) {
-                               MonoInst *load;
-                               MonoInst *store = NULL;
-
-                               if (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
-                                       GList *spill_node;
-                                       spill_node = g_list_first (fspill_list);
-                                       g_assert (spill_node);
-
-                                       store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg1, ins);          
-                                       fspill_list = g_list_remove (fspill_list, spill_node->data);
-                               }
-
-                               fspill++;
-                               fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
-                               load = create_spilled_load_float (cfg, fspill, ins->sreg1, ins);
-                               insert_before_ins (ins, tmp, load);
-                               if (store) 
-                                       insert_before_ins (load, tmp, store);
-                       }
-               } else if ((spec [MONO_INST_DEST] == 'L') && (spec [MONO_INST_SRC1] == 'L')) {
-                       /* force source to be same as dest */
-                       rs->iassign [ins->sreg1] = ins->dreg;
-                       rs->iassign [ins->sreg1 + 1] = ins->unused;
-
-                       DEBUG (g_print ("\tassigned sreg1 (long) %s to sreg1 R%d\n", mono_arch_regname (ins->dreg), ins->sreg1));
-                       DEBUG (g_print ("\tassigned sreg1 (long-high) %s to sreg1 R%d\n", mono_arch_regname (ins->unused), ins->sreg1 + 1));
-
-                       ins->sreg1 = ins->dreg;
-                       /* 
-                        * No need for saving the reg, we know that src1=dest in this cases
-                        * ins->inst_c0 = ins->unused;
-                        */
-
-                       /* make sure that we remove them from free mask */
-                       rs->ifree_mask &= ~ (1 << ins->dreg);
-                       rs->ifree_mask &= ~ (1 << ins->unused);
-               }
-               else if (ins->sreg1 >= MONO_MAX_IREGS) {
-                       val = rs->iassign [ins->sreg1];
-                       prev_sreg1 = ins->sreg1;
-                       if (val < 0) {
-                               int spill = 0;
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       spill = -val -1;
-                               }
-                               if (0 && (ins->opcode == OP_MOVE)) {
-                                       /* 
-                                        * small optimization: the dest register is already allocated
-                                        * but the src one is not: we can simply assign the same register
-                                        * here and peephole will get rid of the instruction later.
-                                        * This optimization may interfere with the clobbering handling:
-                                        * it removes a mov operation that will be added again to handle clobbering.
-                                        * There are also some other issues that should with make testjit.
-                                        */
-                                       mono_regstate_alloc_int (rs, 1 << ins->dreg);
-                                       val = rs->iassign [ins->sreg1] = ins->dreg;
-                                       //g_assert (val >= 0);
-                                       DEBUG (g_print ("\tfast assigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
-                               } else {
-                                       //g_assert (val == -1); /* source cannot be spilled */
-                                       val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src1_mask, ins->sreg1, reginfo [ins->sreg1].flags);
-                                       rs->iassign [ins->sreg1] = val;
-                                       DEBUG (g_print ("\tassigned sreg1 %s to R%d\n", mono_arch_regname (val), ins->sreg1));
-                               }
-                               if (spill) {
-                                       MonoInst *store = create_spilled_store (cfg, spill, val, prev_sreg1, NULL, FALSE);
-                                       insert_before_ins (ins, tmp, store);
-                               }
-                       }
-                       rs->isymbolic [val] = prev_sreg1;
-                       ins->sreg1 = val;
-               } else {
-                       prev_sreg1 = -1;
-               }
-
-               /* handle clobbering of sreg1 */
-               if (((spec [MONO_INST_DEST] == 'f' && spec [MONO_INST_SRC1] == 'f' && use_sse2) || spec [MONO_INST_CLOB] == '1' || spec [MONO_INST_CLOB] == 's') && ins->dreg != ins->sreg1) {
-                       MonoInst *sreg2_copy = NULL;
-
-                       gboolean fp = (spec [MONO_INST_SRC1] == 'f');
-
-                       if (ins->dreg == ins->sreg2) {
-                               /* 
-                                * copying sreg1 to dreg could clobber sreg2, so allocate a new
-                                * register for it.
-                                */
-                               int reg2 = 0;
-
-                               if (fp)
-                                       reg2 = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
-                               else
-                                       reg2 = mono_amd64_alloc_int_reg (cfg, tmp, ins, dest_mask, ins->sreg2, 0);
-
-                               DEBUG (g_print ("\tneed to copy sreg2 %s to reg %s\n", mono_amd64_regname (ins->sreg2, fp), mono_amd64_regname (reg2, fp)));
-                               sreg2_copy = create_copy_ins (cfg, reg2, ins->sreg2, NULL, fp);
-                               prev_sreg2 = ins->sreg2 = reg2;
-                       }
-
-                       MonoInst *copy = create_copy_ins (cfg, ins->dreg, ins->sreg1, NULL, fp);
-                       DEBUG (g_print ("\tneed to copy sreg1 %s to dreg %s\n", mono_amd64_regname (ins->sreg1, fp), mono_amd64_regname (ins->dreg, fp)));
-                       insert_before_ins (ins, tmp, copy);
-
-                       if (sreg2_copy)
-                               insert_before_ins (copy, tmp, sreg2_copy);
-
-                       /* we set sreg1 to dest as well */
-                       prev_sreg1 = ins->sreg1 = ins->dreg;
-                       src2_mask &= ~ (1 << ins->dreg);
-               }
-
-               /*
-                * TRACK SREG2
-                */
-               if (spec [MONO_INST_SRC2] == 'f') {
-                       if (use_sse2) {
-                               if (reg_is_soft (ins->sreg2, TRUE)) {
-                                       val = rs->fassign [ins->sreg2];
-                                       prev_sreg2 = ins->sreg2;
-                                       if (val < 0) {
-                                               int spill = 0;
-                                               if (val < -1) {
-                                                       /* the register gets spilled after this inst */
-                                                       spill = -val -1;
-                                               }
-                                               val = mono_amd64_alloc_float_reg (cfg, tmp, ins, AMD64_CALLEE_FREGS, ins->sreg2);
-                                               rs->fassign [ins->sreg2] = val;
-                                               DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_fregname (val), ins->sreg2));
-                                               if (spill)
-                                                       create_spilled_store (cfg, spill, val, prev_sreg2, ins, TRUE);
-                                       }
-                                       rs->fsymbolic [val] = prev_sreg2;
-                                       ins->sreg2 = val;
-                               } else {
-                                       prev_sreg2 = -1;
-                               }
-                       }
-                       else
-                       if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD) {
-                               MonoInst *load;
-                               MonoInst *store = NULL;
-
-                               if (reginfof [ins->sreg2].flags & MONO_X86_FP_NEEDS_LOAD_SPILL) {
-                                       GList *spill_node;
-
-                                       spill_node = g_list_first (fspill_list);
-                                       g_assert (spill_node);
-                                       if (spec [MONO_INST_SRC1] == 'f' && (reginfof [ins->sreg1].flags & MONO_X86_FP_NEEDS_LOAD_SPILL))
-                                               spill_node = g_list_next (spill_node);
-       
-                                       store = create_spilled_store_float (cfg, GPOINTER_TO_INT (spill_node->data), ins->sreg2, ins);
-                                       fspill_list = g_list_remove (fspill_list, spill_node->data);
-                               } 
-                               
-                               fspill++;
-                               fspill_list = g_list_prepend (fspill_list, GINT_TO_POINTER(fspill));
-                               load = create_spilled_load_float (cfg, fspill, ins->sreg2, ins);
-                               insert_before_ins (ins, tmp, load);
-                               if (store) 
-                                       insert_before_ins (load, tmp, store);
-                       }
-               } 
-               else if (ins->sreg2 >= MONO_MAX_IREGS) {
-                       val = rs->iassign [ins->sreg2];
-                       prev_sreg2 = ins->sreg2;
-                       if (val < 0) {
-                               int spill = 0;
-                               if (val < -1) {
-                                       /* the register gets spilled after this inst */
-                                       spill = -val -1;
-                               }
-                               val = mono_amd64_alloc_int_reg (cfg, tmp, ins, src2_mask, ins->sreg2, reginfo [ins->sreg2].flags);
-                               rs->iassign [ins->sreg2] = val;
-                               DEBUG (g_print ("\tassigned sreg2 %s to R%d\n", mono_arch_regname (val), ins->sreg2));
-                               if (spill)
-                                       create_spilled_store (cfg, spill, val, prev_sreg2, ins, FALSE);
-                       }
-                       rs->isymbolic [val] = prev_sreg2;
-                       ins->sreg2 = val;
-                       if (spec [MONO_INST_CLOB] == 's' && ins->sreg2 != AMD64_RCX) {
-                               DEBUG (g_print ("\tassigned sreg2 %s to R%d, but ECX is needed (R%d)\n", mono_arch_regname (val), ins->sreg2, rs->iassign [AMD64_RCX]));
-                       }
-               } else {
-                       prev_sreg2 = -1;
-               }
-
-               if (spec [MONO_INST_CLOB] == 'c') {
-                       int j, s;
-                       MonoCallInst *call = (MonoCallInst*)ins;
-                       GSList *list;
-                       guint32 clob_mask = AMD64_CALLEE_REGS;
-
-                       for (j = 0; j < MONO_MAX_IREGS; ++j) {
-                               s = 1 << j;
-                               if ((clob_mask & s) && !(rs->ifree_mask & s) && j != ins->sreg1) {
-                                       get_register_force_spilling (cfg, tmp, ins, rs->isymbolic [j], FALSE);
-                                       mono_regstate_free_int (rs, j);
-                                       //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
-                               }
-                       }
-
-                       if (use_sse2) {
-                               clob_mask = AMD64_CALLEE_FREGS;
-
-                               for (j = 0; j < MONO_MAX_FREGS; ++j) {
-                                       s = 1 << j;
-                                       if ((clob_mask & s) && !(rs->ffree_mask & s) && j != ins->sreg1) {
-                                               get_register_force_spilling (cfg, tmp, ins, rs->fsymbolic [j], TRUE);
-                                               mono_regstate_free_float (rs, j);
-                                               //g_warning ("register %s busy at call site\n", mono_arch_regname (j));
-                                       }
-                               }
-                       }
-
-                       /* 
-                        * Assign all registers in call->out_reg_args to the proper 
-                        * argument registers.
-                        */
-
-                       list = call->out_ireg_args;
-                       if (list) {
-                               while (list) {
-                                       guint64 regpair;
-                                       int reg, hreg;
-
-                                       regpair = (guint64) (list->data);
-                                       hreg = regpair >> 32;
-                                       reg = regpair & 0xffffffff;
-
-                                       rs->iassign [reg] = hreg;
-                                       rs->isymbolic [hreg] = reg;
-                                       rs->ifree_mask &= ~ (1 << hreg);
-
-                                       list = g_slist_next (list);
-                               }
-                               g_slist_free (call->out_ireg_args);
-                       }
-
-                       list = call->out_freg_args;
-                       if (list && use_sse2) {
-                               while (list) {
-                                       guint64 regpair;
-                                       int reg, hreg;
-
-                                       regpair = (guint64) (list->data);
-                                       hreg = regpair >> 32;
-                                       reg = regpair & 0xffffffff;
-
-                                       rs->fassign [reg] = hreg;
-                                       rs->fsymbolic [hreg] = reg;
-                                       rs->ffree_mask &= ~ (1 << hreg);
-
-                                       list = g_slist_next (list);
-                               }
-                       }
-                       if (call->out_freg_args)
-                               g_slist_free (call->out_freg_args);
-               }
-
-               /*if (reg_is_freeable (ins->sreg1) && prev_sreg1 >= 0 && reginfo [prev_sreg1].born_in >= i) {
-                       DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg1)));
-                       mono_regstate_free_int (rs, ins->sreg1);
-               }
-               if (reg_is_freeable (ins->sreg2) && prev_sreg2 >= 0 && reginfo [prev_sreg2].born_in >= i) {
-                       DEBUG (g_print ("freeable %s\n", mono_arch_regname (ins->sreg2)));
-                       mono_regstate_free_int (rs, ins->sreg2);
-               }*/
-       
-               DEBUG (print_ins (i, ins));
-               /* this may result from a insert_before call */
-               if (!tmp->next)
-                       bb->code = tmp->data;
-               tmp = tmp->next;
-       }
-
-       g_free (reginfo);
-       g_free (reginfof);
-       g_list_free (fspill_list);
-}
-
-static unsigned char*
-emit_float_to_int (MonoCompile *cfg, guchar *code, int dreg, int sreg, int size, gboolean is_signed)
-{
-       if (use_sse2) {
-               amd64_sse_cvtsd2si_reg_reg (code, dreg, sreg);
-       }
-       else {
-               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
-               x86_fnstcw_membase(code, AMD64_RSP, 0);
-               amd64_mov_reg_membase (code, dreg, AMD64_RSP, 0, 2);
-               amd64_alu_reg_imm (code, X86_OR, dreg, 0xc00);
-               amd64_mov_membase_reg (code, AMD64_RSP, 2, dreg, 2);
-               amd64_fldcw_membase (code, AMD64_RSP, 2);
-               amd64_push_reg (code, AMD64_RAX); // SP = SP - 8
-               amd64_fist_pop_membase (code, AMD64_RSP, 0, size == 8);
-               amd64_pop_reg (code, dreg);
-               amd64_fldcw_membase (code, AMD64_RSP, 0);
-               amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 8);
-       }
-
-       if (size == 1)
-               amd64_widen_reg (code, dreg, dreg, is_signed, FALSE);
-       else if (size == 2)
-               amd64_widen_reg (code, dreg, dreg, is_signed, TRUE);
-       return code;
-}
-
-static unsigned char*
-mono_emit_stack_alloc (guchar *code, MonoInst* tree)
-{
-       int sreg = tree->sreg1;
-#ifdef PLATFORM_WIN32
-       guint8* br[5];
-
-       NOT_IMPLEMENTED;
-
-       /*
-        * Under Windows:
-        * If requested stack size is larger than one page,
-        * perform stack-touch operation
-        */
-       /*
-        * Generate stack probe code.
-        * Under Windows, it is necessary to allocate one page at a time,
-        * "touching" stack after each successful sub-allocation. This is
-        * because of the way stack growth is implemented - there is a
-        * guard page before the lowest stack page that is currently commited.
-        * Stack normally grows sequentially so OS traps access to the
-        * guard page and commits more pages when needed.
-        */
-       amd64_test_reg_imm (code, sreg, ~0xFFF);
-       br[0] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
-
-       br[2] = code; /* loop */
-       amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
-       amd64_test_membase_reg (code, AMD64_RSP, 0, AMD64_RSP);
-       amd64_alu_reg_imm (code, X86_SUB, sreg, 0x1000);
-       amd64_alu_reg_imm (code, X86_CMP, sreg, 0x1000);
-       br[3] = code; x86_branch8 (code, X86_CC_AE, 0, FALSE);
-       amd64_patch (br[3], br[2]);
-       amd64_test_reg_reg (code, sreg, sreg);
-       br[4] = code; x86_branch8 (code, X86_CC_Z, 0, FALSE);
-       amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
-
-       br[1] = code; x86_jump8 (code, 0);
-
-       amd64_patch (br[0], code);
-       amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, sreg);
-       amd64_patch (br[1], code);
-       amd64_patch (br[4], code);
-#else /* PLATFORM_WIN32 */
-       amd64_alu_reg_reg (code, X86_SUB, AMD64_RSP, tree->sreg1);
-#endif
-       if (tree->flags & MONO_INST_INIT) {
-               int offset = 0;
-               if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX) {
-                       amd64_push_reg (code, AMD64_RAX);
-                       offset += 8;
-               }
-               if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX) {
-                       amd64_push_reg (code, AMD64_RCX);
-                       offset += 8;
-               }
-               if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI) {
-                       amd64_push_reg (code, AMD64_RDI);
-                       offset += 8;
-               }
-               
-               amd64_shift_reg_imm (code, X86_SHR, sreg, 4);
-               if (sreg != AMD64_RCX)
-                       amd64_mov_reg_reg (code, AMD64_RCX, sreg, 8);
-               amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
-                               
-               amd64_lea_membase (code, AMD64_RDI, AMD64_RSP, offset);
-               amd64_cld (code);
-               amd64_prefix (code, X86_REP_PREFIX);
-               amd64_stosl (code);
-               
-               if (tree->dreg != AMD64_RDI && sreg != AMD64_RDI)
-                       amd64_pop_reg (code, AMD64_RDI);
-               if (tree->dreg != AMD64_RCX && sreg != AMD64_RCX)
-                       amd64_pop_reg (code, AMD64_RCX);
-               if (tree->dreg != AMD64_RAX && sreg != AMD64_RAX)
-                       amd64_pop_reg (code, AMD64_RAX);
-       }
-       return code;
+       return code;
 }
 
 static guint8*
 emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
 {
        CallInfo *cinfo;
-       guint32 offset, quad;
+       guint32 quad;
 
        /* Move return value to the target register */
        /* FIXME: do this in the local reg allocator */
@@ -2957,20 +1853,18 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
        case OP_LCALL:
        case OP_LCALL_REG:
        case OP_LCALL_MEMBASE:
-               if (ins->dreg != AMD64_RAX)
-                       amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, 8);
+               g_assert (ins->dreg == AMD64_RAX);
                break;
        case OP_FCALL:
        case OP_FCALL_REG:
        case OP_FCALL_MEMBASE:
-               /* FIXME: optimize this */
-               offset = mono_spillvar_offset_float (cfg, 0);
                if (((MonoCallInst*)ins)->signature->ret->type == MONO_TYPE_R4) {
                        if (use_sse2)
                                amd64_sse_cvtss2sd_reg_reg (code, ins->dreg, AMD64_XMM0);
                        else {
-                               amd64_movss_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
-                               amd64_fld_membase (code, AMD64_RBP, offset, FALSE);
+                               /* FIXME: optimize this */
+                               amd64_movss_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
+                               amd64_fld_membase (code, AMD64_RSP, -8, FALSE);
                        }
                }
                else {
@@ -2979,8 +1873,9 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
                                        amd64_sse_movsd_reg_reg (code, ins->dreg, AMD64_XMM0);
                        }
                        else {
-                               amd64_movsd_membase_reg (code, AMD64_RBP, offset, AMD64_XMM0);
-                               amd64_fld_membase (code, AMD64_RBP, offset, TRUE);
+                               /* FIXME: optimize this */
+                               amd64_movsd_membase_reg (code, AMD64_RSP, -8, AMD64_XMM0);
+                               amd64_fld_membase (code, AMD64_RSP, -8, TRUE);
                        }
                }
                break;
@@ -3011,6 +1906,7 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
                                }
                        }
                }
+               g_free (cinfo);
                break;
        }
 
@@ -3034,7 +1930,7 @@ emit_load_volatile_arguments (MonoCompile *cfg, guint8 *code)
 
        /* FIXME: Generate intermediate code instead */
 
-       sig = method->signature;
+       sig = mono_method_signature (method);
 
        cinfo = get_call_info (sig, FALSE);
        
@@ -3128,7 +2024,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
        if (cfg->prof_options & MONO_PROFILE_COVERAGE) {
                MonoProfileCoverageInfo *cov = cfg->coverage_info;
-               g_assert (!mono_compile_aot);
+               g_assert (!cfg->compile_aot);
                cpos += 6;
 
                cov->data [bb->dfn].cil_code = bb->cil_code;
@@ -3164,15 +2060,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_set_membase (code, X86_CC_EQ, ins->inst_basereg, ins->inst_offset, TRUE);
                        break;
                case OP_STOREI1_MEMBASE_IMM:
-                       g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 1);
                        break;
                case OP_STOREI2_MEMBASE_IMM:
-                       g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 2);
                        break;
                case OP_STOREI4_MEMBASE_IMM:
-                       g_assert (amd64_is_imm32 (ins->inst_imm));
                        amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 4);
                        break;
                case OP_STOREI1_MEMBASE_REG:
@@ -3190,12 +2083,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_STORE_MEMBASE_IMM:
                case OP_STOREI8_MEMBASE_IMM:
-                       if (amd64_is_imm32 (ins->inst_imm))
-                               amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
-                       else {
-                               amd64_mov_reg_imm (code, GP_SCRATCH_REG, ins->inst_imm);
-                               amd64_mov_membase_reg (code, ins->inst_destbasereg, ins->inst_offset, GP_SCRATCH_REG, 8);
-                       }
+                       g_assert (amd64_is_imm32 (ins->inst_imm));
+                       amd64_mov_membase_imm (code, ins->inst_destbasereg, ins->inst_offset, ins->inst_imm, 8);
                        break;
                case CEE_LDIND_I:
                        amd64_mov_reg_mem (code, ins->dreg, (gssize)ins->inst_p0, sizeof (gpointer));
@@ -3212,13 +2101,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_LOAD_MEMBASE:
                case OP_LOADI8_MEMBASE:
-                       if (amd64_is_imm32 (ins->inst_offset)) {
-                               amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
-                       }
-                       else {
-                               amd64_mov_reg_imm_size (code, GP_SCRATCH_REG, ins->inst_offset, 8);
-                               amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, GP_SCRATCH_REG, 0, 8);
-                       }
+                       g_assert (amd64_is_imm32 (ins->inst_offset));
+                       amd64_mov_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, sizeof (gpointer));
                        break;
                case OP_LOADI4_MEMBASE:
                        amd64_movsxd_reg_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset);
@@ -3238,6 +2122,9 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_LOADI2_MEMBASE:
                        amd64_widen_membase (code, ins->dreg, ins->inst_basereg, ins->inst_offset, TRUE, TRUE);
                        break;
+               case OP_AMD64_LOADI8_MEMINDEX:
+                       amd64_mov_reg_memindex_size (code, ins->dreg, ins->inst_basereg, 0, ins->inst_indexreg, 0, 8);
+                       break;
                case CEE_CONV_I1:
                        amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, FALSE);
                        break;
@@ -3264,19 +2151,8 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, ins->sreg2);
                        break;
                case OP_COMPARE_IMM:
-                       if (!amd64_is_imm32 (ins->inst_imm)) {
-                               amd64_mov_reg_imm (code, AMD64_R11, ins->inst_imm);
-                               amd64_alu_reg_reg (code, X86_CMP, ins->sreg1, AMD64_R11);
-                       } else {
-                               amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
-                       }
-                       break;
-               case OP_X86_COMPARE_MEMBASE_REG:
-                       amd64_alu_membase_reg (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->sreg2);
-                       break;
-               case OP_X86_COMPARE_MEMBASE_IMM:
                        g_assert (amd64_is_imm32 (ins->inst_imm));
-                       amd64_alu_membase_imm (code, X86_CMP, ins->inst_basereg, ins->inst_offset, ins->inst_imm);
+                       amd64_alu_reg_imm (code, X86_CMP, ins->sreg1, ins->inst_imm);
                        break;
                case OP_X86_COMPARE_REG_MEMBASE:
                        amd64_alu_reg_membase (code, X86_CMP, ins->sreg1, ins->sreg2, ins->inst_offset);
@@ -3328,7 +2204,6 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case CEE_BREAK:
                        amd64_breakpoint (code);
                        break;
-
                case OP_ADDCC:
                case CEE_ADD:
                        amd64_alu_reg_reg (code, X86_ADD, ins->sreg1, ins->sreg2);
@@ -3367,38 +2242,36 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_alu_reg_imm (code, X86_AND, ins->sreg1, ins->inst_imm);
                        break;
                case CEE_MUL:
+               case OP_LMUL:
                        amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
                        break;
                case OP_MUL_IMM:
+               case OP_LMUL_IMM:
                        amd64_imul_reg_reg_imm (code, ins->dreg, ins->sreg1, ins->inst_imm);
                        break;
                case CEE_DIV:
+               case OP_LDIV:
                        amd64_cdq (code);
                        amd64_div_reg (code, ins->sreg2, TRUE);
                        break;
                case CEE_DIV_UN:
+               case OP_LDIV_UN:
                        amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
                        amd64_div_reg (code, ins->sreg2, FALSE);
                        break;
-               case OP_DIV_IMM:
-                       g_assert (amd64_is_imm32 (ins->inst_imm));
-                       amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
-                       amd64_cdq (code);
-                       amd64_div_reg (code, ins->sreg2, TRUE);
-                       break;
                case CEE_REM:
+               case OP_LREM:
                        amd64_cdq (code);
                        amd64_div_reg (code, ins->sreg2, TRUE);
                        break;
                case CEE_REM_UN:
+               case OP_LREM_UN:
                        amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
                        amd64_div_reg (code, ins->sreg2, FALSE);
                        break;
-               case OP_REM_IMM:
-                       g_assert (amd64_is_imm32 (ins->inst_imm));
-                       amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
-                       amd64_cdq (code);
-                       amd64_div_reg (code, ins->sreg2, TRUE);
+               case OP_LMUL_OVF:
+                       amd64_imul_reg_reg (code, ins->sreg1, ins->sreg2);
+                       EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
                        break;
                case CEE_OR:
                        amd64_alu_reg_reg (code, X86_OR, ins->sreg1, ins->sreg2);
@@ -3538,9 +2411,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_imul_reg_reg_size (code, ins->sreg1, ins->sreg2, 4);
                        EMIT_COND_SYSTEM_EXCEPTION (X86_CC_O, FALSE, "OverflowException");
                        break;
-               case OP_IMUL_OVF_UN: {
+               case OP_IMUL_OVF_UN:
+               case OP_LMUL_OVF_UN: {
                        /* the mul operation and the exception check should most likely be split */
                        int non_eax_reg, saved_eax = FALSE, saved_edx = FALSE;
+                       int size = (ins->opcode == OP_IMUL_OVF_UN) ? 4 : 8;
                        /*g_assert (ins->sreg2 == X86_EAX);
                        g_assert (ins->dreg == X86_EAX);*/
                        if (ins->sreg2 == X86_EAX) {
@@ -3553,7 +2428,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                        saved_eax = TRUE;
                                        amd64_push_reg (code, X86_EAX);
                                }
-                               amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, 4);
+                               amd64_mov_reg_reg (code, X86_EAX, ins->sreg1, size);
                                non_eax_reg = ins->sreg2;
                        }
                        if (ins->dreg == X86_EDX) {
@@ -3561,14 +2436,14 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                        saved_eax = TRUE;
                                        amd64_push_reg (code, X86_EAX);
                                }
-                       } else if (ins->dreg != X86_EAX) {
+                       } else {
                                saved_edx = TRUE;
                                amd64_push_reg (code, X86_EDX);
                        }
-                       amd64_mul_reg_size (code, non_eax_reg, FALSE, 4);
+                       amd64_mul_reg_size (code, non_eax_reg, FALSE, size);
                        /* save before the check since pop and mov don't change the flags */
                        if (ins->dreg != X86_EAX)
-                               amd64_mov_reg_reg (code, ins->dreg, X86_EAX, 4);
+                               amd64_mov_reg_reg (code, ins->dreg, X86_EAX, size);
                        if (saved_edx)
                                amd64_pop_reg (code, X86_EDX);
                        if (saved_eax)
@@ -3578,38 +2453,26 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
                case OP_IDIV:
                        amd64_cdq_size (code, 4);
-                       amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+                       amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
                        break;
                case OP_IDIV_UN:
                        amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
-                       amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
-                       break;
-               case OP_IDIV_IMM:
-                       amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
-                       amd64_cdq_size (code, 4);
-                       amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+                       amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
                        break;
                case OP_IREM:
                        amd64_cdq_size (code, 4);
-                       amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+                       amd64_div_reg_size (code, ins->sreg2, TRUE, 4);
                        break;
                case OP_IREM_UN:
                        amd64_alu_reg_reg (code, X86_XOR, AMD64_RDX, AMD64_RDX);
-                       amd64_div_reg_size (code, ins->sreg2, 4, FALSE);
-                       break;
-               case OP_IREM_IMM:
-                       amd64_mov_reg_imm (code, ins->sreg2, ins->inst_imm);
-                       amd64_cdq_size (code, 4);
-                       amd64_div_reg_size (code, ins->sreg2, 4, TRUE);
+                       amd64_div_reg_size (code, ins->sreg2, FALSE, 4);
                        break;
-
                case OP_ICOMPARE:
                        amd64_alu_reg_reg_size (code, X86_CMP, ins->sreg1, ins->sreg2, 4);
                        break;
                case OP_ICOMPARE_IMM:
                        amd64_alu_reg_imm_size (code, X86_CMP, ins->sreg1, ins->inst_imm, 4);
                        break;
-
                case OP_IBEQ:
                case OP_IBLT:
                case OP_IBGT:
@@ -3644,6 +2507,9 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case OP_SEXT_I2:
                        amd64_widen_reg (code, ins->dreg, ins->sreg1, TRUE, TRUE);
                        break;
+               case OP_SEXT_I4:
+                       amd64_movsxd_reg_reg (code, ins->dreg, ins->sreg1);
+                       break;
                case OP_ICONST:
                case OP_I8CONST:
                        if ((((guint64)ins->inst_c0) >> 32) == 0)
@@ -3653,12 +2519,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        break;
                case OP_AOTCONST:
                        mono_add_patch_info (cfg, offset, (MonoJumpInfoType)ins->inst_i1, ins->inst_p0);
-                       amd64_set_reg_template (code, ins->dreg);
+                       amd64_mov_reg_membase (code, ins->dreg, AMD64_RIP, 0, 8);
                        break;
                case CEE_CONV_I4:
                case CEE_CONV_U4:
                case OP_MOVE:
-               case OP_SETREG:
                        amd64_mov_reg_reg (code, ins->dreg, ins->sreg1, sizeof (gpointer));
                        break;
                case OP_AMD64_SET_XMMREG_R4: {
@@ -3715,7 +2580,10 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_leave (code);
                        offset = code - cfg->native_code;
                        mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_METHOD_JUMP, ins->inst_p0);
-                       amd64_set_reg_template (code, AMD64_R11);
+                       if (cfg->compile_aot)
+                               amd64_mov_reg_membase (code, AMD64_R11, AMD64_RIP, 0, 8);
+                       else
+                               amd64_set_reg_template (code, AMD64_R11);
                        amd64_jump_reg (code, AMD64_R11);
                        break;
                }
@@ -3739,6 +2607,22 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                         */
                        if ((call->signature->call_convention == MONO_CALL_VARARG) && (call->signature->pinvoke))
                                amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
+                       else if ((cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) && (cfg->method->klass->image != mono_defaults.corlib)) {
+                               /* 
+                                * Since the unmanaged calling convention doesn't contain a 
+                                * 'vararg' entry, we have to treat every pinvoke call as a
+                                * potential vararg call.
+                                */
+                               guint32 nregs, i;
+                               nregs = 0;
+                               for (i = 0; i < AMD64_XMM_NREG; ++i)
+                                       if (call->used_fregs & (1 << i))
+                                               nregs ++;
+                               if (!nregs)
+                                       amd64_alu_reg_reg (code, X86_XOR, AMD64_RAX, AMD64_RAX);
+                               else
+                                       amd64_mov_reg_imm (code, AMD64_RAX, nregs);
+                       }
 
                        if (ins->flags & MONO_INST_HAS_METHOD)
                                code = emit_call (cfg, code, MONO_PATCH_INFO_METHOD, call->method);
@@ -3783,8 +2667,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        call = (MonoCallInst*)ins;
 
                        if (AMD64_IS_ARGUMENT_REG (ins->sreg1)) {
-                               amd64_mov_reg_reg (code, AMD64_R11, ins->sreg1, 8);
-                               ins->sreg1 = AMD64_R11;
+                               /* 
+                                * Can't use R11 because it is clobbered by the trampoline 
+                                * code, and the reg value is needed by get_vcall_slot_addr.
+                                */
+                               amd64_mov_reg_reg (code, AMD64_RAX, ins->sreg1, 8);
+                               ins->sreg1 = AMD64_RAX;
                        }
 
                        amd64_call_membase (code, ins->sreg1, ins->inst_offset);
@@ -3846,6 +2734,12 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                             (gpointer)"mono_arch_throw_exception");
                        break;
                }
+               case OP_RETHROW: {
+                       amd64_mov_reg_reg (code, AMD64_RDI, ins->sreg1, 8);
+                       code = emit_call (cfg, code, MONO_PATCH_INFO_INTERNAL_METHOD, 
+                                            (gpointer)"mono_arch_rethrow_exception");
+                       break;
+               }
                case OP_CALL_HANDLER: 
                        /* Align stack */
                        amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
@@ -4027,7 +2921,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case CEE_CONV_R4: /* FIXME: change precision */
                case CEE_CONV_R8:
                        if (use_sse2)
-                               amd64_sse_cvtsi2sd_reg_reg (code, ins->dreg, ins->sreg1);
+                               amd64_sse_cvtsi2sd_reg_reg_size (code, ins->dreg, ins->sreg1, 4);
                        else {
                                amd64_push_reg (code, ins->sreg1);
                                amd64_fild_membase (code, AMD64_RSP, 0, FALSE);
@@ -4188,28 +3082,37 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                amd64_push_reg (code, AMD64_R11);
                                amd64_push_reg (code, AMD64_R11);
                                amd64_sse_xorpd_reg_membase (code, ins->dreg, AMD64_RSP, 0);
+                               amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
                        }
                        else
                                amd64_fchs (code);
                        break;          
                case OP_SIN:
-                       if (use_sse2)
-                               g_assert_not_reached ();
-                       amd64_fsin (code);
-                       amd64_fldz (code);
-                       amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+                       if (use_sse2) {
+                               EMIT_SSE2_FPFUNC (code, fsin, ins->dreg, ins->sreg1);
+                       }
+                       else {
+                               amd64_fsin (code);
+                               amd64_fldz (code);
+                               amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+                       }
                        break;          
                case OP_COS:
-                       if (use_sse2)
-                               g_assert_not_reached ();
-                       amd64_fcos (code);
-                       amd64_fldz (code);
-                       amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+                       if (use_sse2) {
+                               EMIT_SSE2_FPFUNC (code, fcos, ins->dreg, ins->sreg1);
+                       }
+                       else {
+                               amd64_fcos (code);
+                               amd64_fldz (code);
+                               amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
+                       }
                        break;          
                case OP_ABS:
-                       if (use_sse2)
-                               g_assert_not_reached ();
-                       amd64_fabs (code);
+                       if (use_sse2) {
+                               EMIT_SSE2_FPFUNC (code, fabs, ins->dreg, ins->sreg1);
+                       }
+                       else
+                               amd64_fabs (code);
                        break;          
                case OP_TAN: {
                        /* 
@@ -4257,9 +3160,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                        amd64_fp_op_reg (code, X86_FADD, 1, TRUE);
                        break;          
                case OP_SQRT:
-                       if (use_sse2)
-                               g_assert_not_reached ();
-                       amd64_fsqrt (code);
+                       if (use_sse2) {
+                               EMIT_SSE2_FPFUNC (code, fsqrt, ins->dreg, ins->sreg1);
+                       }
+                       else
+                               amd64_fsqrt (code);
                        break;          
                case OP_X86_FPOP:
                        if (!use_sse2)
@@ -4293,7 +3198,11 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                }
                case OP_FCOMPARE:
                        if (use_sse2) {
-                               amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+                               /* 
+                                * The two arguments are swapped because the fbranch instructions
+                                * depend on this for the non-sse case to work.
+                                */
+                               amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
                                break;
                        }
                        if (cfg->opt & MONO_OPT_FCMOV) {
@@ -4345,7 +3254,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                 */
                                amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
                                if (use_sse2)
-                                       amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+                                       amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
                                else {
                                        amd64_fcomip (code, 1);
                                        amd64_fstp (code, 0);
@@ -4396,7 +3305,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                guchar *unordered_check;
                                amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
                                if (use_sse2)
-                                       amd64_sse_comisd_reg_reg (code, ins->sreg1, ins->sreg2);
+                                       amd64_sse_comisd_reg_reg (code, ins->sreg2, ins->sreg1);
                                else {
                                        amd64_fcomip (code, 1);
                                        amd64_fstp (code, 0);
@@ -4428,12 +3337,63 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
 
                                amd64_patch (end_jump, code);
                        }
-                       amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
-                       amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
-
-                       if (ins->dreg != AMD64_RAX) 
-                               amd64_pop_reg (code, AMD64_RAX);
+                       amd64_set_reg (code, X86_CC_EQ, ins->dreg, TRUE);
+                       amd64_widen_reg (code, ins->dreg, ins->dreg, FALSE, FALSE);
+
+                       if (ins->dreg != AMD64_RAX) 
+                               amd64_pop_reg (code, AMD64_RAX);
+                       break;
+               case OP_FCLT_MEMBASE:
+               case OP_FCGT_MEMBASE:
+               case OP_FCLT_UN_MEMBASE:
+               case OP_FCGT_UN_MEMBASE:
+               case OP_FCEQ_MEMBASE: {
+                       guchar *unordered_check, *jump_to_end;
+                       int x86_cond;
+                       g_assert (use_sse2);
+
+                       amd64_alu_reg_reg (code, X86_XOR, ins->dreg, ins->dreg);
+                       amd64_sse_comisd_reg_membase (code, ins->sreg1, ins->sreg2, ins->inst_offset);
+
+                       switch (ins->opcode) {
+                       case OP_FCEQ_MEMBASE:
+                               x86_cond = X86_CC_EQ;
+                               break;
+                       case OP_FCLT_MEMBASE:
+                       case OP_FCLT_UN_MEMBASE:
+                               x86_cond = X86_CC_LT;
+                               break;
+                       case OP_FCGT_MEMBASE:
+                       case OP_FCGT_UN_MEMBASE:
+                               x86_cond = X86_CC_GT;
+                               break;
+                       default:
+                               g_assert_not_reached ();
+                       }
+
+                       unordered_check = code;
+                       x86_branch8 (code, X86_CC_P, 0, FALSE);
+                       amd64_set_reg (code, x86_cond, ins->dreg, FALSE);
+
+                       switch (ins->opcode) {
+                       case OP_FCEQ_MEMBASE:
+                       case OP_FCLT_MEMBASE:
+                       case OP_FCGT_MEMBASE:
+                               amd64_patch (unordered_check, code);
+                               break;
+                       case OP_FCLT_UN_MEMBASE:
+                       case OP_FCGT_UN_MEMBASE:
+                               jump_to_end = code;
+                               x86_jump8 (code, 0);
+                               amd64_patch (unordered_check, code);
+                               amd64_inc_reg (code, ins->dreg);
+                               amd64_patch (jump_to_end, code);
+                               break;
+                       default:
+                               break;
+                       }
                        break;
+               }
                case OP_FBEQ:
                        if (use_sse2 || (cfg->opt & MONO_OPT_FCMOV)) {
                                guchar *jump = code;
@@ -4559,7 +3519,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                case CEE_CKFINITE: {
                        if (use_sse2) {
                                /* Transfer value to the fp stack */
-                               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 8);
+                               amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 16);
                                amd64_movsd_membase_reg (code, AMD64_RSP, 0, ins->sreg1);
                                amd64_fld_membase (code, AMD64_RSP, 0, TRUE);
                        }
@@ -4573,13 +3533,105 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
                                amd64_fstp (code, 0);
                        }                               
                        EMIT_COND_SYSTEM_EXCEPTION (X86_CC_EQ, FALSE, "ArithmeticException");
+                       if (use_sse2)
+                               amd64_alu_reg_imm (code, X86_ADD, AMD64_RSP, 16);
                        break;
                }
-               case OP_X86_TLS_GET: {
+               case OP_TLS_GET: {
                        x86_prefix (code, X86_FS_PREFIX);
                        amd64_mov_reg_mem (code, ins->dreg, ins->inst_offset, 8);
                        break;
                }
+               case OP_MEMORY_BARRIER: {
+                       /* Not needed on amd64 */
+                       break;
+               }
+               case OP_ATOMIC_ADD_I4:
+               case OP_ATOMIC_ADD_I8: {
+                       int dreg = ins->dreg;
+                       guint32 size = (ins->opcode == OP_ATOMIC_ADD_I4) ? 4 : 8;
+
+                       if (dreg == ins->inst_basereg)
+                               dreg = AMD64_R11;
+                       
+                       if (dreg != ins->sreg2)
+                               amd64_mov_reg_reg (code, ins->dreg, ins->sreg2, size);
+
+                       x86_prefix (code, X86_LOCK_PREFIX);
+                       amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
+
+                       if (dreg != ins->dreg)
+                               amd64_mov_reg_reg (code, ins->dreg, dreg, size);
+
+                       break;
+               }
+               case OP_ATOMIC_ADD_NEW_I4:
+               case OP_ATOMIC_ADD_NEW_I8: {
+                       int dreg = ins->dreg;
+                       guint32 size = (ins->opcode == OP_ATOMIC_ADD_NEW_I4) ? 4 : 8;
+
+                       if ((dreg == ins->sreg2) || (dreg == ins->inst_basereg))
+                               dreg = AMD64_R11;
+
+                       amd64_mov_reg_reg (code, dreg, ins->sreg2, size);
+                       amd64_prefix (code, X86_LOCK_PREFIX);
+                       amd64_xadd_membase_reg (code, ins->inst_basereg, ins->inst_offset, dreg, size);
+                       /* dreg contains the old value, add with sreg2 value */
+                       amd64_alu_reg_reg_size (code, X86_ADD, dreg, ins->sreg2, size);
+                       
+                       if (ins->dreg != dreg)
+                               amd64_mov_reg_reg (code, ins->dreg, dreg, size);
+
+                       break;
+               }
+               case OP_ATOMIC_EXCHANGE_I4:
+               case OP_ATOMIC_EXCHANGE_I8: {
+                       guchar *br[2];
+                       int sreg2 = ins->sreg2;
+                       int breg = ins->inst_basereg;
+                       guint32 size = (ins->opcode == OP_ATOMIC_EXCHANGE_I4) ? 4 : 8;
+
+                       /* 
+                        * See http://msdn.microsoft.com/msdnmag/issues/0700/Win32/ for
+                        * an explanation of how this works.
+                        */
+
+                       /* cmpxchg uses eax as comperand, need to make sure we can use it
+                        * hack to overcome limits in x86 reg allocator 
+                        * (req: dreg == eax and sreg2 != eax and breg != eax) 
+                        */
+                       if (ins->dreg != AMD64_RAX)
+                               amd64_push_reg (code, AMD64_RAX);
+                       
+                       /* We need the EAX reg for the cmpxchg */
+                       if (ins->sreg2 == AMD64_RAX) {
+                               amd64_push_reg (code, AMD64_RDX);
+                               amd64_mov_reg_reg (code, AMD64_RDX, AMD64_RAX, size);
+                               sreg2 = AMD64_RDX;
+                       }
+
+                       if (breg == AMD64_RAX) {
+                               amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, size);
+                               breg = AMD64_R11;
+                       }
+
+                       amd64_mov_reg_membase (code, AMD64_RAX, breg, ins->inst_offset, size);
+
+                       br [0] = code; amd64_prefix (code, X86_LOCK_PREFIX);
+                       amd64_cmpxchg_membase_reg_size (code, breg, ins->inst_offset, sreg2, size);
+                       br [1] = code; amd64_branch8 (code, X86_CC_NE, -1, FALSE);
+                       amd64_patch (br [1], br [0]);
+
+                       if (ins->dreg != AMD64_RAX) {
+                               amd64_mov_reg_reg (code, ins->dreg, AMD64_RAX, size);
+                               amd64_pop_reg (code, AMD64_RAX);
+                       }
+
+                       if (ins->sreg2 != sreg2)
+                               amd64_pop_reg (code, AMD64_RDX);
+
+                       break;
+               }
                default:
                        g_warning ("unknown opcode %s in %s()\n", mono_inst_name (ins->opcode), __FUNCTION__);
                        g_assert_not_reached ();
@@ -4611,6 +3663,7 @@ void
 mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, MonoJumpInfo *ji, gboolean run_cctors)
 {
        MonoJumpInfo *patch_info;
+       gboolean compile_aot = !run_cctors;
 
        for (patch_info = ji; patch_info; patch_info = patch_info->next) {
                unsigned char *ip = patch_info->ip.i + code;
@@ -4618,45 +3671,32 @@ mono_arch_patch_code (MonoMethod *method, MonoDomain *domain, guint8 *code, Mono
 
                target = mono_resolve_patch_target (method, domain, code, patch_info, run_cctors);
 
+               if (compile_aot) {
+                       switch (patch_info->type) {
+                       case MONO_PATCH_INFO_BB:
+                       case MONO_PATCH_INFO_LABEL:
+                               break;
+                       default:
+                               /* No need to patch these */
+                               continue;
+                       }
+               }
+
                switch (patch_info->type) {
-               case MONO_PATCH_INFO_METHOD_REL:
-               case MONO_PATCH_INFO_METHOD_JUMP:
-                       *((gconstpointer *)(ip + 2)) = target;
-                       continue;
-               case MONO_PATCH_INFO_SWITCH: {
-                       *((gconstpointer *)(ip + 2)) = target;
+               case MONO_PATCH_INFO_NONE:
                        continue;
-               }
-               case MONO_PATCH_INFO_IID:
-                       *((guint32 *)(ip + 2)) = (guint32)(guint64)target;
-                       continue;                       
                case MONO_PATCH_INFO_CLASS_INIT: {
-                       /* FIXME: Might already been changed to a nop */
-                       *((gconstpointer *)(ip + 2)) = target;
-                       continue;
+                       /* Might already been changed to a nop */
+                       guint8* ip2 = ip;
+                       amd64_call_code (ip2, 0);
+                       break;
                }
+               case MONO_PATCH_INFO_METHOD_REL:
                case MONO_PATCH_INFO_R8:
                case MONO_PATCH_INFO_R4:
                        g_assert_not_reached ();
                        continue;
-               case MONO_PATCH_INFO_METHODCONST:
-               case MONO_PATCH_INFO_CLASS:
-               case MONO_PATCH_INFO_IMAGE:
-               case MONO_PATCH_INFO_FIELD:
-               case MONO_PATCH_INFO_VTABLE:
-               case MONO_PATCH_INFO_SFLDA:
-               case MONO_PATCH_INFO_EXC_NAME:
-               case MONO_PATCH_INFO_LDSTR:
-               case MONO_PATCH_INFO_TYPE_FROM_HANDLE:
-               case MONO_PATCH_INFO_LDTOKEN:
-               case MONO_PATCH_INFO_IP:
-                       *((gconstpointer *)(ip + 2)) = target;
-                       continue;
-               case MONO_PATCH_INFO_METHOD:
-                       *((gconstpointer *)(ip + 2)) = target;
-                       continue;
-               case MONO_PATCH_INFO_ABS:
-               case MONO_PATCH_INFO_INTERNAL_METHOD:
+               case MONO_PATCH_INFO_BB:
                        break;
                default:
                        break;
@@ -4672,7 +3712,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        MonoBasicBlock *bb;
        MonoMethodSignature *sig;
        MonoInst *inst;
-       int alloc_size, pos, max_offset, i;
+       int alloc_size, pos, max_offset, i, quad;
        guint8 *code;
        CallInfo *cinfo;
 
@@ -4697,12 +3737,13 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        pos = 0;
 
        if (method->save_lmf) {
+               gint32 lmf_offset;
 
                pos = ALIGN_TO (pos + sizeof (MonoLMF), 16);
 
                amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, pos);
 
-               gint32 lmf_offset = - cfg->arch.lmf_offset;
+               lmf_offset = - cfg->arch.lmf_offset;
 
                /* Save ip */
                amd64_lea_membase (code, AMD64_R11, AMD64_RIP, 0);
@@ -4736,7 +3777,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 
        if (alloc_size) {
                /* See mono_emit_stack_alloc */
-#ifdef PLATFORM_WIN32
+#if defined(PLATFORM_WIN32) || defined(MONO_ARCH_SIGSEGV_ON_ALTSTACK)
                guint32 remaining_size = alloc_size;
                while (remaining_size >= 0x1000) {
                        amd64_alu_reg_imm (code, X86_SUB, AMD64_RSP, 0x1000);
@@ -4773,7 +3814,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                }
        }
 
-       sig = method->signature;
+       sig = mono_method_signature (method);
        pos = 0;
 
        cinfo = get_call_info (sig, FALSE);
@@ -4825,6 +3866,25 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                        case ArgInDoubleSSEReg:
                                amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset, ainfo->reg);
                                break;
+                       case ArgValuetypeInReg:
+                               for (quad = 0; quad < 2; quad ++) {
+                                       switch (ainfo->pair_storage [quad]) {
+                                       case ArgInIReg:
+                                               amd64_mov_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad], sizeof (gpointer));
+                                               break;
+                                       case ArgInFloatSSEReg:
+                                               amd64_movss_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               break;
+                                       case ArgInDoubleSSEReg:
+                                               amd64_movsd_membase_reg (code, inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), ainfo->pair_regs [quad]);
+                                               break;
+                                       case ArgNone:
+                                               break;
+                                       default:
+                                               g_assert_not_reached ();
+                                       }
+                               }
+                               break;
                        default:
                                break;
                        }
@@ -4846,6 +3906,8 @@ mono_arch_emit_prolog (MonoCompile *cfg)
        }
 
        if (method->save_lmf) {
+               gint32 lmf_offset;
+
                if (lmf_tls_offset != -1) {
                        /* Load lmf quicky using the FS register */
                        x86_prefix (code, X86_FS_PREFIX);
@@ -4861,7 +3923,7 @@ mono_arch_emit_prolog (MonoCompile *cfg)
                                                                 (gpointer)"mono_get_lmf_addr");                
                }
 
-               gint32 lmf_offset = - cfg->arch.lmf_offset;
+               lmf_offset = - cfg->arch.lmf_offset;
 
                /* Save lmf_addr */
                amd64_mov_membase_reg (code, AMD64_RBP, lmf_offset + G_STRUCT_OFFSET (MonoLMF, lmf_addr), AMD64_RAX, 8);
@@ -4889,10 +3951,28 @@ mono_arch_emit_prolog (MonoCompile *cfg)
 void
 mono_arch_emit_epilog (MonoCompile *cfg)
 {
-       MonoJumpInfo *patch_info;
        MonoMethod *method = cfg->method;
-       int pos, i;
+       int quad, pos, i;
        guint8 *code;
+       int max_epilog_size = 16;
+       CallInfo *cinfo;
+       
+       if (cfg->method->save_lmf)
+               max_epilog_size += 256;
+       
+       if (mono_jit_trace_calls != NULL)
+               max_epilog_size += 50;
+
+       if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
+               max_epilog_size += 50;
+
+       max_epilog_size += (AMD64_NREG * 2);
+
+       while (cfg->code_len + max_epilog_size > (cfg->code_size - 16)) {
+               cfg->code_size *= 2;
+               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               mono_jit_stats.code_reallocs++;
+       }
 
        code = cfg->native_code + cfg->code_len;
 
@@ -4952,25 +4032,126 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                }
        }
 
+       /* Load returned vtypes into registers if needed */
+       cinfo = get_call_info (mono_method_signature (method), FALSE);
+       if (cinfo->ret.storage == ArgValuetypeInReg) {
+               ArgInfo *ainfo = &cinfo->ret;
+               MonoInst *inst = cfg->ret;
+
+               for (quad = 0; quad < 2; quad ++) {
+                       switch (ainfo->pair_storage [quad]) {
+                       case ArgInIReg:
+                               amd64_mov_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)), sizeof (gpointer));
+                               break;
+                       case ArgInFloatSSEReg:
+                               amd64_movss_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               break;
+                       case ArgInDoubleSSEReg:
+                               amd64_movsd_reg_membase (code, ainfo->pair_regs [quad], inst->inst_basereg, inst->inst_offset + (quad * sizeof (gpointer)));
+                               break;
+                       case ArgNone:
+                               break;
+                       default:
+                               g_assert_not_reached ();
+                       }
+               }
+       }
+       g_free (cinfo);
+
        amd64_leave (code);
        amd64_ret (code);
 
+       cfg->code_len = code - cfg->native_code;
+
+       g_assert (cfg->code_len < cfg->code_size);
+
+}
+
+void
+mono_arch_emit_exceptions (MonoCompile *cfg)
+{
+       MonoJumpInfo *patch_info;
+       int nthrows, i;
+       guint8 *code;
+       MonoClass *exc_classes [16];
+       guint8 *exc_throw_start [16], *exc_throw_end [16];
+       guint32 code_size = 0;
+
+       /* Compute needed space */
+       for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
+               if (patch_info->type == MONO_PATCH_INFO_EXC)
+                       code_size += 40;
+               if (patch_info->type == MONO_PATCH_INFO_R8)
+                       code_size += 8 + 7; /* sizeof (double) + alignment */
+               if (patch_info->type == MONO_PATCH_INFO_R4)
+                       code_size += 4 + 7; /* sizeof (float) + alignment */
+       }
+
+       while (cfg->code_len + code_size > (cfg->code_size - 16)) {
+               cfg->code_size *= 2;
+               cfg->native_code = g_realloc (cfg->native_code, cfg->code_size);
+               mono_jit_stats.code_reallocs++;
+       }
+
+       code = cfg->native_code + cfg->code_len;
+
        /* add code to raise exceptions */
+       nthrows = 0;
        for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
                switch (patch_info->type) {
                case MONO_PATCH_INFO_EXC: {
-                       guint64 offset;
+                       MonoClass *exc_class;
+                       guint8 *buf, *buf2;
+                       guint32 throw_ip;
 
                        amd64_patch (patch_info->ip.i + cfg->native_code, code);
-                       mono_add_patch_info (cfg, code - cfg->native_code, MONO_PATCH_INFO_EXC_NAME, patch_info->data.target);
-                       amd64_set_reg_template (code, AMD64_RDI);
-                       /* 7 is the length of the lea */
-                       offset = (((guint64)code + 7) - (guint64)cfg->native_code) - (guint64)patch_info->ip.i;
-                       amd64_lea_membase (code, AMD64_RSI, AMD64_RIP, - offset);
-                       patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
-                       patch_info->data.name = "mono_arch_throw_exception_by_name";
-                       patch_info->ip.i = code - cfg->native_code;
-                       EMIT_CALL ();
+
+                       exc_class = mono_class_from_name (mono_defaults.corlib, "System", patch_info->data.name);
+                       g_assert (exc_class);
+                       throw_ip = patch_info->ip.i;
+
+                       //x86_breakpoint (code);
+                       /* Find a throw sequence for the same exception class */
+                       for (i = 0; i < nthrows; ++i)
+                               if (exc_classes [i] == exc_class)
+                                       break;
+                       if (i < nthrows) {
+                               amd64_mov_reg_imm (code, AMD64_RSI, (exc_throw_end [i] - cfg->native_code) - throw_ip);
+                               x86_jump_code (code, exc_throw_start [i]);
+                               patch_info->type = MONO_PATCH_INFO_NONE;
+                       }
+                       else {
+                               buf = code;
+                               amd64_mov_reg_imm_size (code, AMD64_RSI, 0xf0f0f0f0, 4);
+                               buf2 = code;
+
+                               if (nthrows < 16) {
+                                       exc_classes [nthrows] = exc_class;
+                                       exc_throw_start [nthrows] = code;
+                               }
+
+                               amd64_mov_reg_imm (code, AMD64_RDI, exc_class->type_token);
+                               patch_info->data.name = "mono_arch_throw_corlib_exception";
+                               patch_info->type = MONO_PATCH_INFO_INTERNAL_METHOD;
+                               patch_info->ip.i = code - cfg->native_code;
+
+                               if (cfg->compile_aot) {
+                                       amd64_mov_reg_membase (code, GP_SCRATCH_REG, AMD64_RIP, 0, 8);
+                                       amd64_call_reg (code, GP_SCRATCH_REG);
+                               } else {
+                                       /* The callee is in memory allocated using the code manager */
+                                       amd64_call_code (code, 0);
+                               }
+
+                               amd64_mov_reg_imm (buf, AMD64_RSI, (code - cfg->native_code) - throw_ip);
+                               while (buf < buf2)
+                                       x86_nop (buf);
+
+                               if (nthrows < 16) {
+                                       exc_throw_end [nthrows] = code;
+                                       nthrows ++;
+                               }
+                       }
                        break;
                }
                default:
@@ -4985,9 +4166,11 @@ mono_arch_emit_epilog (MonoCompile *cfg)
 
                switch (patch_info->type) {
                case MONO_PATCH_INFO_R8: {
+                       guint8 *pos;
+
                        code = (guint8*)ALIGN_TO (code, 8);
 
-                       guint8* pos = cfg->native_code + patch_info->ip.i;
+                       pos = cfg->native_code + patch_info->ip.i;
 
                        *(double*)code = *(double*)patch_info->data.target;
 
@@ -5001,9 +4184,11 @@ mono_arch_emit_epilog (MonoCompile *cfg)
                        break;
                }
                case MONO_PATCH_INFO_R4: {
+                       guint8 *pos;
+
                        code = (guint8*)ALIGN_TO (code, 8);
 
-                       guint8* pos = cfg->native_code + patch_info->ip.i;
+                       pos = cfg->native_code + patch_info->ip.i;
 
                        *(float*)code = *(float*)patch_info->data.target;
 
@@ -5039,27 +4224,11 @@ mono_arch_emit_epilog (MonoCompile *cfg)
 
 }
 
-/*
- * Allow tracing to work with this interface (with an optional argument)
- */
-
-/*
- * This may be needed on some archs or for debugging support.
- */
-void
-mono_arch_instrument_mem_needs (MonoMethod *method, int *stack, int *code)
-{
-       /* no stack room needed now (may be needed for FASTCALL-trace support) */
-       *stack = 0;
-       /* split prolog-epilog requirements? */
-       *code = 50; /* max bytes needed: check this number */
-}
-
 void*
 mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean enable_arguments)
 {
        guchar *code = p;
-       CallInfo *cinfo;
+       CallInfo *cinfo = NULL;
        MonoMethodSignature *sig;
        MonoInst *inst;
        int i, n, stack_area = 0;
@@ -5068,7 +4237,7 @@ mono_arch_instrument_prolog (MonoCompile *cfg, void *func, void *p, gboolean ena
 
        if (enable_arguments) {
                /* Allocate a new area on the stack and save arguments there */
-               sig = cfg->method->signature;
+               sig = mono_method_signature (cfg->method);
 
                cinfo = get_call_info (sig, FALSE);
 
@@ -5118,7 +4287,7 @@ mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean ena
        guchar *code = p;
        int save_mode = SAVE_NONE;
        MonoMethod *method = cfg->method;
-       int rtype = mono_type_get_underlying_type (method->signature->ret)->type;
+       int rtype = mono_type_get_underlying_type (mono_method_signature (method)->ret)->type;
        
        switch (rtype) {
        case MONO_TYPE_VOID:
@@ -5207,42 +4376,10 @@ mono_arch_instrument_epilog (MonoCompile *cfg, void *func, void *p, gboolean ena
        return code;
 }
 
-int
-mono_arch_max_epilog_size (MonoCompile *cfg)
-{
-       int max_epilog_size = 16;
-       MonoJumpInfo *patch_info;
-       
-       if (cfg->method->save_lmf)
-               max_epilog_size += 256;
-       
-       if (mono_jit_trace_calls != NULL)
-               max_epilog_size += 50;
-
-       if (cfg->prof_options & MONO_PROFILE_ENTER_LEAVE)
-               max_epilog_size += 50;
-
-       max_epilog_size += (AMD64_NREG * 2);
-
-       /* 
-        * make sure we have enough space for exceptions
-        */
-       for (patch_info = cfg->patch_info; patch_info; patch_info = patch_info->next) {
-               if (patch_info->type == MONO_PATCH_INFO_EXC)
-                       max_epilog_size += 40;
-               if (patch_info->type == MONO_PATCH_INFO_R8)
-                       max_epilog_size += 8 + 7; /* sizeof (double) + alignment */
-               if (patch_info->type == MONO_PATCH_INFO_R4)
-                       max_epilog_size += 4 + 7; /* sizeof (float) + alignment */
-       }
-
-       return max_epilog_size;
-}
-
 void
 mono_arch_flush_icache (guint8 *code, gint size)
 {
-       /* not needed */
+       /* Not needed */
 }
 
 void
@@ -5269,7 +4406,7 @@ static int reg_to_ucontext_reg [] = {
  * integer overflow.
  */
 gboolean
-mono_arch_is_int_overflow (void *sigctx)
+mono_arch_is_int_overflow (void *sigctx, void *info)
 {
        ucontext_t *ctx = (ucontext_t*)sigctx;
        guint8* rip;
@@ -5278,7 +4415,7 @@ mono_arch_is_int_overflow (void *sigctx)
        rip = (guint8*)ctx->uc_mcontext.gregs [REG_RIP];
 
        if (IS_REX (rip [0])) {
-               reg = amd64_rex_r (rip [0]);
+               reg = amd64_rex_b (rip [0]);
                rip ++;
        }
        else
@@ -5295,8 +4432,14 @@ mono_arch_is_int_overflow (void *sigctx)
        return FALSE;
 }
 
+guint32
+mono_arch_get_patch_offset (guint8 *code)
+{
+       return 3;
+}
+
 gpointer*
-mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
+mono_arch_get_vcall_slot_addr (guint8* code, gpointer *regs)
 {
        guint32 reg;
        guint32 disp;
@@ -5309,31 +4452,51 @@ mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
         * 0xff m=1,o=2 imm8
         * 0xff m=2,o=2 imm32
         */
-       code -= 6;
+       code -= 7;
 
-       if (IS_REX (code [3]) && (code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x3)) {
-               /* call *%reg */
-               return NULL;
+       /* 
+        * A given byte sequence can match more than case here, so we have to be
+        * really careful about the ordering of the cases. Longer sequences
+        * come first.
+        */
+       if ((code [0] == 0x41) && (code [1] == 0xff) && (code [2] == 0x15)) {
+               /* call OFFSET(%rip) */
+               disp = *(guint32*)(code + 3);
+               return (gpointer*)(code + disp + 7);
        }
-       else if ((code [0] == 0xff) && (amd64_modrm_reg (code [1]) == 0x2) && (amd64_modrm_mod (code [1]) == 0x2)) {
+       else if ((code [1] == 0xff) && (amd64_modrm_reg (code [2]) == 0x2) && (amd64_modrm_mod (code [2]) == 0x2)) {
                /* call *[reg+disp32] */
-               reg = amd64_modrm_rm (code [1]);
-               disp = *(guint32*)(code + 2);
+               if (IS_REX (code [0]))
+                       rex = code [0];
+               reg = amd64_modrm_rm (code [2]);
+               disp = *(guint32*)(code + 3);
                //printf ("B: [%%r%d+0x%x]\n", reg, disp);
        }
-       else if ((code [3] == 0xff) && (amd64_modrm_reg (code [4]) == 0x2) && (amd64_modrm_mod (code [4]) == 0x1)) {
+       else if (code [2] == 0xe8) {
+               /* call <ADDR> */
+               return NULL;
+       }
+       else if (IS_REX (code [4]) && (code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x3)) {
+               /* call *%reg */
+               return NULL;
+       }
+       else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x1)) {
                /* call *[reg+disp8] */
-               reg = amd64_modrm_rm (code [4]);
-               disp = *(guint8*)(code + 5);
+               if (IS_REX (code [3]))
+                       rex = code [3];
+               reg = amd64_modrm_rm (code [5]);
+               disp = *(guint8*)(code + 6);
                //printf ("B: [%%r%d+0x%x]\n", reg, disp);
        }
-       else if ((code [4] == 0xff) && (amd64_modrm_reg (code [5]) == 0x2) && (amd64_modrm_mod (code [5]) == 0x0)) {
+       else if ((code [5] == 0xff) && (amd64_modrm_reg (code [6]) == 0x2) && (amd64_modrm_mod (code [6]) == 0x0)) {
                        /*
                         * This is a interface call: should check the above code can't catch it earlier 
                         * 8b 40 30   mov    0x30(%eax),%eax
                         * ff 10      call   *(%eax)
                         */
-               reg = amd64_modrm_rm (code [5]);
+               if (IS_REX (code [4]))
+                       rex = code [4];
+               reg = amd64_modrm_rm (code [6]);
                disp = 0;
        }
        else
@@ -5341,140 +4504,64 @@ mono_amd64_get_vcall_slot_addr (guint8* code, guint64 *regs)
 
        reg += amd64_rex_b (rex);
 
-       /* FIXME: */
-       return (gpointer)((regs [reg]) + disp);
+       /* R11 is clobbered by the trampoline code */
+       g_assert (reg != AMD64_R11);
+
+       return (gpointer)(((guint64)(regs [reg])) + disp);
 }
 
-/*
- * Support for fast access to the thread-local lmf structure using the GS
- * segment register on NPTL + kernel 2.6.x.
- */
+gpointer*
+mono_arch_get_delegate_method_ptr_addr (guint8* code, gpointer *regs)
+{
+       guint32 reg;
+       guint32 disp;
 
-static gboolean tls_offset_inited = FALSE;
+       code -= 10;
 
-/* code should be simply return <tls var>; */
-static int 
-read_tls_offset_from_method (void* method)
-{
-       guint8 *code = (guint8*)method;
+       if (IS_REX (code [0]) && (code [1] == 0x8b) && (code [3] == 0x48) && (code [4] == 0x8b) && (code [5] == 0x40) && (code [7] == 0x48) && (code [8] == 0xff) && (code [9] == 0xd0)) {
+               /* mov REG, %rax; mov <OFFSET>(%rax), %rax; call *%rax */
+               reg = amd64_rex_b (code [0]) + amd64_modrm_rm (code [2]);
+               disp = code [6];
 
-       /* 
-        * Determine the offset of mono_lfm_addr inside the TLS structures
-        * by disassembling the function above.
-        */
-       /* This is generated by gcc 3.3.2 */
-       if ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
-               (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
-               (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
-               (code [9] == 0x00) && (code [10] == 0x00) && (code [11] == 0x00) &&
-               (code [12] == 0x0) && (code [13] == 0x48) && (code [14] == 0x8b) &&
-               (code [15] == 0x80)) {
-               return *(gint32*)&(code [16]);
-       } else if
-               /* This is generated by gcc-3.3.2 with -O=2 */
-               /* mov fs:0, %rax ; mov <offset>(%rax), %rax ; retq */
-               ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
-                (code [3] == 0x04) && (code [4] == 0x25) &&
-                (code [9] == 0x48) && (code [10] == 0x8b) && (code [11] == 0x80) &&
-                (code [16] == 0xc3)) {
-                       return *(gint32*)&(code [12]);
-       } else if 
-               /* This is generated by gcc-3.4.1 */
-               ((code [0] == 0x55) && (code [1] == 0x48) && (code [2] == 0x89) &&
-                (code [3] == 0xe5) && (code [4] == 0x64) && (code [5] == 0x48) &&
-                (code [6] == 0x8b) && (code [7] == 0x04) && (code [8] == 0x25) &&
-                (code [13] == 0xc9) && (code [14] == 0xc3)) {
-                       return *(gint32*)&(code [9]);
-       } else if
-               /* This is generated by gcc-3.4.1 with -O=2 */
-               ((code [0] == 0x64) && (code [1] == 0x48) && (code [2] == 0x8b) &&
-                (code [3] == 0x04) && (code [4] == 0x25)) {
-               return *(gint32*)&(code [5]);
+               if (reg == AMD64_RAX)
+                       return NULL;
+               else
+                       return (gpointer*)(((guint64)(regs [reg])) + disp);
        }
 
-       return -1;
+       return NULL;
 }
 
+/*
+ * Support for fast access to the thread-local lmf structure using the GS
+ * segment register on NPTL + kernel 2.6.x.
+ */
+
+static gboolean tls_offset_inited = FALSE;
+
 void
 mono_arch_setup_jit_tls_data (MonoJitTlsData *tls)
 {
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-       pthread_t self = pthread_self();
-       pthread_attr_t attr;
-       void *staddr = NULL;
-       size_t stsize = 0;
-       struct sigaltstack sa;
-#endif
-
        if (!tls_offset_inited) {
                tls_offset_inited = TRUE;
 
-               lmf_tls_offset = read_tls_offset_from_method (mono_get_lmf_addr);
-               appdomain_tls_offset = read_tls_offset_from_method (mono_domain_get);
-               //thread_tls_offset = read_tls_offset_from_method (mono_thread_current);
+               appdomain_tls_offset = mono_domain_get_tls_offset ();
+               lmf_tls_offset = mono_get_lmf_tls_offset ();
+               thread_tls_offset = mono_thread_get_tls_offset ();
        }               
-
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-
-       /* Determine stack boundaries */
-       if (!mono_running_on_valgrind ()) {
-#ifdef HAVE_PTHREAD_GETATTR_NP
-               pthread_getattr_np( self, &attr );
-#else
-#ifdef HAVE_PTHREAD_ATTR_GET_NP
-               pthread_attr_get_np( self, &attr );
-#elif defined(sun)
-               pthread_attr_init( &attr );
-               pthread_attr_getstacksize( &attr, &stsize );
-#else
-#error "Not implemented"
-#endif
-#endif
-#ifndef sun
-               pthread_attr_getstack( &attr, &staddr, &stsize );
-#endif
-       }
-
-       /* 
-        * staddr seems to be wrong for the main thread, so we keep the value in
-        * tls->end_of_stack
-        */
-       tls->stack_size = stsize;
-
-       /* Setup an alternate signal stack */
-       tls->signal_stack = g_malloc (SIGNAL_STACK_SIZE);
-       tls->signal_stack_size = SIGNAL_STACK_SIZE;
-
-       sa.ss_sp = tls->signal_stack;
-       sa.ss_size = SIGNAL_STACK_SIZE;
-       sa.ss_flags = SS_ONSTACK;
-       sigaltstack (&sa, NULL);
-#endif
 }
 
 void
 mono_arch_free_jit_tls_data (MonoJitTlsData *tls)
 {
-#ifdef MONO_ARCH_SIGSEGV_ON_ALTSTACK
-       struct sigaltstack sa;
-
-       sa.ss_sp = tls->signal_stack;
-       sa.ss_size = SIGNAL_STACK_SIZE;
-       sa.ss_flags = SS_DISABLE;
-       sigaltstack  (&sa, NULL);
-
-       if (tls->signal_stack)
-               g_free (tls->signal_stack);
-#endif
 }
 
 void
 mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_reg, int this_type, int vt_reg)
 {
+       MonoCallInst *call = (MonoCallInst*)inst;
        int out_reg = param_regs [0];
 
-       /* FIXME: RDI and RSI might get clobbered */
-
        if (vt_reg != -1) {
                CallInfo * cinfo = get_call_info (inst->signature, FALSE);
                MonoInst *vtarg;
@@ -5493,11 +4580,14 @@ mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_re
                        MONO_EMIT_NEW_BIALU_IMM (cfg, OP_SUB_IMM, X86_ESP, X86_ESP, 8);
                }
                else {
-                       MONO_INST_NEW (cfg, vtarg, OP_SETREG);
+                       MONO_INST_NEW (cfg, vtarg, OP_MOVE);
                        vtarg->sreg1 = vt_reg;
-                       vtarg->dreg = out_reg;
-                       out_reg = param_regs [1];
+                       vtarg->dreg = mono_regstate_next_int (cfg->rs);
                        mono_bblock_add_inst (cfg->cbb, vtarg);
+
+                       mono_call_inst_add_outarg_reg (call, vtarg->dreg, out_reg, FALSE);
+
+                       out_reg = param_regs [1];
                }
 
                g_free (cinfo);
@@ -5506,46 +4596,136 @@ mono_arch_emit_this_vret_args (MonoCompile *cfg, MonoCallInst *inst, int this_re
        /* add the this argument */
        if (this_reg != -1) {
                MonoInst *this;
-               MONO_INST_NEW (cfg, this, OP_SETREG);
+               MONO_INST_NEW (cfg, this, OP_MOVE);
                this->type = this_type;
                this->sreg1 = this_reg;
-               this->dreg = out_reg;
+               this->dreg = mono_regstate_next_int (cfg->rs);
                mono_bblock_add_inst (cfg->cbb, this);
+
+               mono_call_inst_add_outarg_reg (call, this->dreg, out_reg, FALSE);
        }
 }
 
-gint
-mono_arch_get_opcode_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
+MonoInst*
+mono_arch_get_inst_for_method (MonoCompile *cfg, MonoMethod *cmethod, MonoMethodSignature *fsig, MonoInst **args)
 {
-       if (use_sse2)
-               return -1;
+       MonoInst *ins = NULL;
 
        if (cmethod->klass == mono_defaults.math_class) {
-               if (strcmp (cmethod->name, "Sin") == 0)
-                       return OP_SIN;
-               else if (strcmp (cmethod->name, "Cos") == 0)
-                       return OP_COS;
-               else if (strcmp (cmethod->name, "Tan") == 0)
-                       return OP_TAN;
-               else if (strcmp (cmethod->name, "Atan") == 0)
-                       return OP_ATAN;
-               else if (strcmp (cmethod->name, "Sqrt") == 0)
-                       return OP_SQRT;
-               else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8)
-                       return OP_ABS;
+               if (strcmp (cmethod->name, "Sin") == 0) {
+                       MONO_INST_NEW (cfg, ins, OP_SIN);
+                       ins->inst_i0 = args [0];
+               } else if (strcmp (cmethod->name, "Cos") == 0) {
+                       MONO_INST_NEW (cfg, ins, OP_COS);
+                       ins->inst_i0 = args [0];
+               } else if (strcmp (cmethod->name, "Tan") == 0) {
+                       if (use_sse2)
+                               return ins;
+                       MONO_INST_NEW (cfg, ins, OP_TAN);
+                       ins->inst_i0 = args [0];
+               } else if (strcmp (cmethod->name, "Atan") == 0) {
+                       if (use_sse2)
+                               return ins;
+                       MONO_INST_NEW (cfg, ins, OP_ATAN);
+                       ins->inst_i0 = args [0];
+               } else if (strcmp (cmethod->name, "Sqrt") == 0) {
+                       MONO_INST_NEW (cfg, ins, OP_SQRT);
+                       ins->inst_i0 = args [0];
+               } else if (strcmp (cmethod->name, "Abs") == 0 && fsig->params [0]->type == MONO_TYPE_R8) {
+                       MONO_INST_NEW (cfg, ins, OP_ABS);
+                       ins->inst_i0 = args [0];
+               }
 #if 0
                /* OP_FREM is not IEEE compatible */
-               else if (strcmp (cmethod->name, "IEEERemainder") == 0)
-                       return OP_FREM;
+               else if (strcmp (cmethod->name, "IEEERemainder") == 0) {
+                       MONO_INST_NEW (cfg, ins, OP_FREM);
+                       ins->inst_i0 = args [0];
+                       ins->inst_i1 = args [1];
+               }
 #endif
-               else
-                       return -1;
-       } else {
-               return -1;
+       } else if (cmethod->klass == mono_defaults.thread_class &&
+                          strcmp (cmethod->name, "MemoryBarrier") == 0) {
+               MONO_INST_NEW (cfg, ins, OP_MEMORY_BARRIER);
+       } else if(cmethod->klass->image == mono_defaults.corlib &&
+                          (strcmp (cmethod->klass->name_space, "System.Threading") == 0) &&
+                          (strcmp (cmethod->klass->name, "Interlocked") == 0)) {
+
+               if (strcmp (cmethod->name, "Increment") == 0) {
+                       MonoInst *ins_iconst;
+                       guint32 opcode;
+
+                       if (fsig->params [0]->type == MONO_TYPE_I4)
+                               opcode = OP_ATOMIC_ADD_NEW_I4;
+                       else if (fsig->params [0]->type == MONO_TYPE_I8)
+                               opcode = OP_ATOMIC_ADD_NEW_I8;
+                       else
+                               g_assert_not_reached ();
+                       MONO_INST_NEW (cfg, ins, opcode);
+                       MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
+                       ins_iconst->inst_c0 = 1;
+
+                       ins->inst_i0 = args [0];
+                       ins->inst_i1 = ins_iconst;
+               } else if (strcmp (cmethod->name, "Decrement") == 0) {
+                       MonoInst *ins_iconst;
+                       guint32 opcode;
+
+                       if (fsig->params [0]->type == MONO_TYPE_I4)
+                               opcode = OP_ATOMIC_ADD_NEW_I4;
+                       else if (fsig->params [0]->type == MONO_TYPE_I8)
+                               opcode = OP_ATOMIC_ADD_NEW_I8;
+                       else
+                               g_assert_not_reached ();
+                       MONO_INST_NEW (cfg, ins, opcode);
+                       MONO_INST_NEW (cfg, ins_iconst, OP_ICONST);
+                       ins_iconst->inst_c0 = -1;
+
+                       ins->inst_i0 = args [0];
+                       ins->inst_i1 = ins_iconst;
+               } else if (strcmp (cmethod->name, "Add") == 0) {
+                       guint32 opcode;
+
+                       if (fsig->params [0]->type == MONO_TYPE_I4)
+                               opcode = OP_ATOMIC_ADD_I4;
+                       else if (fsig->params [0]->type == MONO_TYPE_I8)
+                               opcode = OP_ATOMIC_ADD_I8;
+                       else
+                               g_assert_not_reached ();
+                       
+                       MONO_INST_NEW (cfg, ins, opcode);
+
+                       ins->inst_i0 = args [0];
+                       ins->inst_i1 = args [1];
+               } else if (strcmp (cmethod->name, "Exchange") == 0) {
+                       guint32 opcode;
+
+                       if (fsig->params [0]->type == MONO_TYPE_I4)
+                               opcode = OP_ATOMIC_EXCHANGE_I4;
+                       else if ((fsig->params [0]->type == MONO_TYPE_I8) ||
+                                        (fsig->params [0]->type == MONO_TYPE_I) ||
+                                        (fsig->params [0]->type == MONO_TYPE_OBJECT))
+                               opcode = OP_ATOMIC_EXCHANGE_I8;
+                       else
+                               return NULL;
+
+                       MONO_INST_NEW (cfg, ins, opcode);
+
+                       ins->inst_i0 = args [0];
+                       ins->inst_i1 = args [1];
+               } else if (strcmp (cmethod->name, "Read") == 0 && (fsig->params [0]->type == MONO_TYPE_I8)) {
+                       /* 64 bit reads are already atomic */
+                       MONO_INST_NEW (cfg, ins, CEE_LDIND_I8);
+                       ins->inst_i0 = args [0];
+               }
+
+               /* 
+                * Can't implement CompareExchange methods this way since they have
+                * three arguments.
+                */
        }
-       return -1;
-}
 
+       return ins;
+}
 
 gboolean
 mono_arch_print_tree (MonoInst *tree, int arity)
@@ -5560,7 +4740,7 @@ MonoInst* mono_arch_get_domain_intrinsic (MonoCompile* cfg)
        if (appdomain_tls_offset == -1)
                return NULL;
        
-       MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
+       MONO_INST_NEW (cfg, ins, OP_TLS_GET);
        ins->inst_offset = appdomain_tls_offset;
        return ins;
 }
@@ -5572,7 +4752,7 @@ MonoInst* mono_arch_get_thread_intrinsic (MonoCompile* cfg)
        if (thread_tls_offset == -1)
                return NULL;
        
-       MONO_INST_NEW (cfg, ins, OP_X86_TLS_GET);
+       MONO_INST_NEW (cfg, ins, OP_TLS_GET);
        ins->inst_offset = thread_tls_offset;
        return ins;
 }